query
stringlengths
7
3.85k
document
stringlengths
11
430k
metadata
dict
negatives
sequencelengths
0
101
negative_scores
sequencelengths
0
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
QueryWithdrawQuotas returns the users cryptocurrency withdraw quotas
func (h *HUOBI) QueryWithdrawQuotas(ctx context.Context, cryptocurrency string) (WithdrawQuota, error) { resp := struct { WithdrawQuota WithdrawQuota `json:"data"` }{} vals := url.Values{} vals.Set("currency", cryptocurrency) err := h.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, huobiAccountWithdrawQuota, vals, nil, &resp, true) if err != nil { return WithdrawQuota{}, err } return resp.WithdrawQuota, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func QueryWithdraws(rpcAddr string) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\trequest := common.GetInterxRequest(r)\n\t\tresponse := common.GetResponseFormat(request, rpcAddr)\n\t\tstatusCode := http.StatusOK\n\n\t\tcommon.GetLogger().Info(\"[query-withdraws] Entering withdraws query\")\n\n\t\tif !common.RPCMethods[\"GET\"][config.QueryWithdraws].Enabled {\n\t\t\tresponse.Response, response.Error, statusCode = common.ServeError(0, \"\", \"API disabled\", http.StatusForbidden)\n\t\t} else {\n\t\t\tif common.RPCMethods[\"GET\"][config.QueryWithdraws].CachingEnabled {\n\t\t\t\tfound, cacheResponse, cacheError, cacheStatus := common.SearchCache(request, response)\n\t\t\t\tif found {\n\t\t\t\t\tresponse.Response, response.Error, statusCode = cacheResponse, cacheError, cacheStatus\n\t\t\t\t\tcommon.WrapResponse(w, request, *response, statusCode, false)\n\n\t\t\t\t\tcommon.GetLogger().Info(\"[query-withdraws] Returning from the cache\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tresponse.Response, response.Error, statusCode = QueryBlockTransactionsHandler(rpcAddr, r, true)\n\t\t}\n\n\t\tcommon.WrapResponse(w, request, *response, statusCode, common.RPCMethods[\"GET\"][config.QueryStatus].CachingEnabled)\n\t}\n}", "func GetWithdraws(queryRoute string, cdc *codec.Codec) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"withdraws\",\n\t\tShort: \"Get withdraw list by page and limit\",\n\t\tExample: \"withdraws --page=1 --limit=10\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\n\t\t\t// parse inputs\n\t\t\tpage, limit, err := helpers.ParsePaginationParams(viper.GetString(flags.FlagPage), viper.GetString(flags.FlagLimit), helpers.ParamTypeCliFlag)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// prepare request\n\t\t\treq := types.WithdrawsReq{\n\t\t\t\tPage: page,\n\t\t\t\tLimit: limit,\n\t\t\t}\n\n\t\t\tbz, err := cliCtx.Codec.MarshalJSON(req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// query and parse the result\n\t\t\tres, _, err := cliCtx.QueryWithData(fmt.Sprintf(\"custom/%s/%s\", queryRoute, types.QueryWithdraws), bz)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tvar out types.Withdraws\n\t\t\tcdc.MustUnmarshalJSON(res, &out)\n\n\t\t\treturn cliCtx.PrintOutput(out)\n\t\t},\n\t}\n\thelpers.AddPaginationCmdFlags(cmd)\n\n\treturn cmd\n}", "func (s *Service) GetWithdraw(c context.Context, dateVersion string, from, limit int) (count int, withdrawVos []*model.WithdrawVo, err error) {\n\tcount, upAccounts, err := s.UpWithdraw(c, dateVersion, from, limit)\n\tif err != nil {\n\t\tlog.Error(\"s.UpWithdraw error(%v)\", err)\n\t\treturn\n\t}\n\n\tmids := make([]int64, len(upAccounts))\n\tfor i, up := range upAccounts {\n\t\tmids[i] = up.MID\n\t}\n\n\twithdrawVos = make([]*model.WithdrawVo, 0)\n\tif len(mids) == 0 {\n\t\treturn\n\t}\n\n\tupIncomeWithdrawMap, err := s.dao.QueryUpWithdrawByMids(c, mids, dateVersion)\n\tif err != nil {\n\t\tlog.Error(\"s.dao.QueryUpWithdrawByMids error(%v)\", err)\n\t\treturn\n\t}\n\n\tfor _, up := range upAccounts {\n\t\tif upIncomeWithdraw, ok := upIncomeWithdrawMap[up.MID]; ok && upIncomeWithdraw.State == _withdrawing {\n\t\t\tvo := &model.WithdrawVo{\n\t\t\t\tMID: up.MID,\n\t\t\t\tThirdCoin: float64(up.TotalUnwithdrawIncome) * float64(0.01),\n\t\t\t\tThirdOrderNo: strconv.FormatInt(upIncomeWithdraw.ID, 10),\n\t\t\t\tCTime: time.Unix(int64(upIncomeWithdraw.CTime), 0).Format(\"2006-01-02 15:04:05\"),\n\t\t\t\tNotifyURL: \"http://up-profit.bilibili.co/allowance/api/x/internal/growup/up/withdraw/success\",\n\t\t\t}\n\n\t\t\twithdrawVos = append(withdrawVos, vo)\n\t\t}\n\t}\n\n\treturn\n}", "func (_IWETH *IWETHSession) Withdraw(arg0 *big.Int) (*types.Transaction, error) {\r\n\treturn _IWETH.Contract.Withdraw(&_IWETH.TransactOpts, arg0)\r\n}", "func AllWithdraw() []Withdraw {\n\torm := get_DBFront()\n\tvar with []Withdraw\n\terr := orm.SetTable(\"withdraw\").FindAll(&with)\n\tif !check_err(err) {\n\t\tLog(Log_Struct{\"error\", \"DB_Error_Line_699\", err})\n\t}\n\tSliceReverse(with)\n\treturn with\n}", "func (_IWETH *IWETHTransactorSession) Withdraw(arg0 *big.Int) (*types.Transaction, error) {\r\n\treturn _IWETH.Contract.Withdraw(&_IWETH.TransactOpts, arg0)\r\n}", "func (_Contract *ContractSession) Withdraw(value *big.Int) (*types.Transaction, error) {\n\treturn _Contract.Contract.Withdraw(&_Contract.TransactOpts, value)\n}", "func (_BREMICO *BREMICOSession) Withdraw(_value *big.Int) (*types.Transaction, error) {\n\treturn _BREMICO.Contract.Withdraw(&_BREMICO.TransactOpts, _value)\n}", "func (_Contract *ContractTransactorSession) Withdraw(value *big.Int) (*types.Transaction, error) {\n\treturn _Contract.Contract.Withdraw(&_Contract.TransactOpts, value)\n}", "func (_BREMICO *BREMICOSession) ConfirmWithdraw() (*types.Transaction, error) {\n\treturn _BREMICO.Contract.ConfirmWithdraw(&_BREMICO.TransactOpts)\n}", "func (_BREMICO *BREMICOTransactorSession) Withdraw(_value *big.Int) (*types.Transaction, error) {\n\treturn _BREMICO.Contract.Withdraw(&_BREMICO.TransactOpts, _value)\n}", "func (_Vault *VaultTransactor) RequestWithdraw(opts *bind.TransactOpts, incognitoAddress string, token common.Address, amount *big.Int, signData []byte, timestamp []byte) (*types.Transaction, error) {\n\treturn _Vault.contract.Transact(opts, \"requestWithdraw\", incognitoAddress, token, amount, signData, timestamp)\n}", "func withdraw(ctx iscp.Sandbox) (dict.Dict, error) {\n\tstate := ctx.State()\n\tmustCheckLedger(state, \"accounts.withdraw.begin\")\n\n\tif ctx.Caller().Address().Equals(ctx.ChainID().AsAddress()) {\n\t\t// if the caller is on the same chain, do nothing\n\t\treturn nil, nil\n\t}\n\ttokensToWithdraw, ok := GetAccountBalances(state, ctx.Caller())\n\tif !ok {\n\t\t// empty balance, nothing to withdraw\n\t\treturn nil, nil\n\t}\n\t// will be sending back to default entry point\n\ta := assert.NewAssert(ctx.Log())\n\t// bring balances to the current account (owner's account). It is needed for subsequent Send call\n\ta.Require(MoveBetweenAccounts(state, ctx.Caller(), commonaccount.Get(ctx.ChainID()), tokensToWithdraw),\n\t\t\"accounts.withdraw.inconsistency. failed to move tokens to owner's account\")\n\n\t// add incoming tokens (after fees) to the balances to be withdrawn. Otherwise they would end up in the common account\n\ttokensToWithdraw.AddAll(ctx.IncomingTransfer())\n\t// Send call assumes tokens are in the current account\n\ta.Require(ctx.Send(ctx.Caller().Address(), tokensToWithdraw, &iscp.SendMetadata{\n\t\tTargetContract: ctx.Caller().Hname(),\n\t}), \"accounts.withdraw.inconsistency: failed sending tokens \")\n\n\tctx.Log().Debugf(\"accounts.withdraw.success. Sent to address %s\", tokensToWithdraw.String())\n\n\tmustCheckLedger(state, \"accounts.withdraw.exit\")\n\treturn nil, nil\n}", "func (_DogsOfRome *DogsOfRomeSession) Withdraw() (*types.Transaction, error) {\n\treturn _DogsOfRome.Contract.Withdraw(&_DogsOfRome.TransactOpts)\n}", "func (_BREMICO *BREMICOTransactor) Withdraw(opts *bind.TransactOpts, _value *big.Int) (*types.Transaction, error) {\n\treturn _BREMICO.contract.Transact(opts, \"withdraw\", _value)\n}", "func (_Cakevault *CakevaultTransactorSession) Withdraw(_shares *big.Int) (*types.Transaction, error) {\n\treturn _Cakevault.Contract.Withdraw(&_Cakevault.TransactOpts, _shares)\n}", "func (hs *HistoryService) Withdraw() (*CurrencyHistory, error) {\n\treturn hs.currency(\"withdraw\")\n}", "func (_Wmatic *WmaticSession) Withdraw(wad *big.Int) (*types.Transaction, error) {\n\treturn _Wmatic.Contract.Withdraw(&_Wmatic.TransactOpts, wad)\n}", "func (_Contract *ContractTransactor) Withdraw(opts *bind.TransactOpts, value *big.Int) (*types.Transaction, error) {\n\treturn _Contract.contract.Transact(opts, \"withdraw\", value)\n}", "func (_Vault *VaultSession) RequestWithdraw(incognitoAddress string, token common.Address, amount *big.Int, signData []byte, timestamp []byte) (*types.Transaction, error) {\n\treturn _Vault.Contract.RequestWithdraw(&_Vault.TransactOpts, incognitoAddress, token, amount, signData, timestamp)\n}", "func (_BREMICO *BREMICOTransactorSession) ConfirmWithdraw() (*types.Transaction, error) {\n\treturn _BREMICO.Contract.ConfirmWithdraw(&_BREMICO.TransactOpts)\n}", "func (_IWETH *IWETHTransactor) Withdraw(opts *bind.TransactOpts, arg0 *big.Int) (*types.Transaction, error) {\r\n\treturn _IWETH.contract.Transact(opts, \"withdraw\", arg0)\r\n}", "func (_DogsOfRome *DogsOfRomeTransactorSession) Withdraw() (*types.Transaction, error) {\n\treturn _DogsOfRome.Contract.Withdraw(&_DogsOfRome.TransactOpts)\n}", "func (_TokenStakingEscrow *TokenStakingEscrowTransactor) Withdraw(opts *bind.TransactOpts, operator common.Address) (*types.Transaction, error) {\n\treturn _TokenStakingEscrow.contract.Transact(opts, \"withdraw\", operator)\n}", "func (_Cakevault *CakevaultSession) Withdraw(_shares *big.Int) (*types.Transaction, error) {\n\treturn _Cakevault.Contract.Withdraw(&_Cakevault.TransactOpts, _shares)\n}", "func (_XStaking *XStakingSession) Withdraw(amount *big.Int) (*types.Transaction, error) {\n\treturn _XStaking.Contract.Withdraw(&_XStaking.TransactOpts, amount)\n}", "func WithdrawalCryptos(mods ...qm.QueryMod) withdrawalCryptoQuery {\n\tmods = append(mods, qm.From(\"\\\"withdrawal_crypto\\\"\"))\n\treturn withdrawalCryptoQuery{NewQuery(mods...)}\n}", "func (_Vault *VaultTransactorSession) RequestWithdraw(incognitoAddress string, token common.Address, amount *big.Int, signData []byte, timestamp []byte) (*types.Transaction, error) {\n\treturn _Vault.Contract.RequestWithdraw(&_Vault.TransactOpts, incognitoAddress, token, amount, signData, timestamp)\n}", "func (_TokenStakingEscrow *TokenStakingEscrowFilterer) FilterDepositWithdrawn(opts *bind.FilterOpts, operator []common.Address, grantee []common.Address) (*TokenStakingEscrowDepositWithdrawnIterator, error) {\n\n\tvar operatorRule []interface{}\n\tfor _, operatorItem := range operator {\n\t\toperatorRule = append(operatorRule, operatorItem)\n\t}\n\tvar granteeRule []interface{}\n\tfor _, granteeItem := range grantee {\n\t\tgranteeRule = append(granteeRule, granteeItem)\n\t}\n\n\tlogs, sub, err := _TokenStakingEscrow.contract.FilterLogs(opts, \"DepositWithdrawn\", operatorRule, granteeRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &TokenStakingEscrowDepositWithdrawnIterator{contract: _TokenStakingEscrow.contract, event: \"DepositWithdrawn\", logs: logs, sub: sub}, nil\n}", "func (_ElvTradable *ElvTradableTransactor) Withdraw(opts *bind.TransactOpts, _amount *big.Int) (*types.Transaction, error) {\n\treturn _ElvTradable.contract.Transact(opts, \"withdraw\", _amount)\n}", "func (_Wmatic *WmaticTransactorSession) Withdraw(wad *big.Int) (*types.Transaction, error) {\n\treturn _Wmatic.Contract.Withdraw(&_Wmatic.TransactOpts, wad)\n}", "func (_Cakevault *CakevaultTransactor) Withdraw(opts *bind.TransactOpts, _shares *big.Int) (*types.Transaction, error) {\n\treturn _Cakevault.contract.Transact(opts, \"withdraw\", _shares)\n}", "func (_TokenStakingEscrow *TokenStakingEscrowSession) Withdraw(operator common.Address) (*types.Transaction, error) {\n\treturn _TokenStakingEscrow.Contract.Withdraw(&_TokenStakingEscrow.TransactOpts, operator)\n}", "func (_BREMICO *BREMICOTransactor) ConfirmWithdraw(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BREMICO.contract.Transact(opts, \"confirmWithdraw\")\n}", "func (_Vault *VaultSession) WithdrawRequests(arg0 common.Address, arg1 common.Address) (*big.Int, error) {\n\treturn _Vault.Contract.WithdrawRequests(&_Vault.CallOpts, arg0, arg1)\n}", "func (_IStakingRewards *IStakingRewardsSession) Withdraw(amount *big.Int) (*types.Transaction, error) {\n\treturn _IStakingRewards.Contract.Withdraw(&_IStakingRewards.TransactOpts, amount)\n}", "func (_XStaking *XStakingTransactorSession) Withdraw(amount *big.Int) (*types.Transaction, error) {\n\treturn _XStaking.Contract.Withdraw(&_XStaking.TransactOpts, amount)\n}", "func (_WandappETH *WandappETHSession) Withdraw(proof []byte) (*types.Transaction, error) {\n\treturn _WandappETH.Contract.Withdraw(&_WandappETH.TransactOpts, proof)\n}", "func (_Smartchef *SmartchefTransactor) Withdraw(opts *bind.TransactOpts, _amount *big.Int) (*types.Transaction, error) {\n\treturn _Smartchef.contract.Transact(opts, \"withdraw\", _amount)\n}", "func (_XStaking *XStakingFilterer) FilterWithdrawn(opts *bind.FilterOpts, user []common.Address) (*XStakingWithdrawnIterator, error) {\n\n\tvar userRule []interface{}\n\tfor _, userItem := range user {\n\t\tuserRule = append(userRule, userItem)\n\t}\n\n\tlogs, sub, err := _XStaking.contract.FilterLogs(opts, \"Withdrawn\", userRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &XStakingWithdrawnIterator{contract: _XStaking.contract, event: \"Withdrawn\", logs: logs, sub: sub}, nil\n}", "func (_Cakevault *CakevaultSession) WithdrawAll() (*types.Transaction, error) {\n\treturn _Cakevault.Contract.WithdrawAll(&_Cakevault.TransactOpts)\n}", "func (_Wmatic *WmaticTransactor) Withdraw(opts *bind.TransactOpts, wad *big.Int) (*types.Transaction, error) {\n\treturn _Wmatic.contract.Transact(opts, \"withdraw\", wad)\n}", "func (_Cakevault *CakevaultTransactorSession) WithdrawAll() (*types.Transaction, error) {\n\treturn _Cakevault.Contract.WithdrawAll(&_Cakevault.TransactOpts)\n}", "func (_Smartchef *SmartchefSession) Withdraw(_amount *big.Int) (*types.Transaction, error) {\n\treturn _Smartchef.Contract.Withdraw(&_Smartchef.TransactOpts, _amount)\n}", "func (_WandappETH *WandappETHTransactorSession) Withdraw(proof []byte) (*types.Transaction, error) {\n\treturn _WandappETH.Contract.Withdraw(&_WandappETH.TransactOpts, proof)\n}", "func (_Lmc *LmcFilterer) FilterWithdrawn(opts *bind.FilterOpts, user []common.Address) (*LmcWithdrawnIterator, error) {\n\n\tvar userRule []interface{}\n\tfor _, userItem := range user {\n\t\tuserRule = append(userRule, userItem)\n\t}\n\n\tlogs, sub, err := _Lmc.contract.FilterLogs(opts, \"Withdrawn\", userRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &LmcWithdrawnIterator{contract: _Lmc.contract, event: \"Withdrawn\", logs: logs, sub: sub}, nil\n}", "func GetWithdraw(queryRoute string, cdc *codec.Codec) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"withdraw [withdrawID]\",\n\t\tShort: \"Get withdraw by ID\",\n\t\tExample: \"withdraw 0\",\n\t\tArgs: cobra.ExactArgs(1),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\n\t\t\t// parse inputs\n\t\t\tid, err := helpers.ParseDnIDParam(\"withdrawID\", args[0], helpers.ParamTypeCliArg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// prepare request\n\t\t\treq := types.WithdrawReq{\n\t\t\t\tID: id,\n\t\t\t}\n\n\t\t\tbz, err := cliCtx.Codec.MarshalJSON(req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// query and parse the result\n\t\t\tres, _, err := cliCtx.QueryWithData(fmt.Sprintf(\"custom/%s/%s\", queryRoute, types.QueryWithdraw), bz)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tvar out types.Withdraw\n\t\t\tcdc.MustUnmarshalJSON(res, &out)\n\n\t\t\treturn cliCtx.PrintOutput(out)\n\t\t},\n\t}\n\thelpers.BuildCmdHelp(cmd, []string{\n\t\t\"withdraw unique ID\",\n\t})\n\n\treturn cmd\n}", "func (_TokenStakingEscrow *TokenStakingEscrowTransactorSession) Withdraw(operator common.Address) (*types.Transaction, error) {\n\treturn _TokenStakingEscrow.Contract.Withdraw(&_TokenStakingEscrow.TransactOpts, operator)\n}", "func (_Smartchef *SmartchefTransactorSession) Withdraw(_amount *big.Int) (*types.Transaction, error) {\n\treturn _Smartchef.Contract.Withdraw(&_Smartchef.TransactOpts, _amount)\n}", "func (c *Client) AccountWithdraw(currency string, quantity *big.Float, address string, paymentID string) (TransactionID, error) {\r\n\tdefer c.clearError()\r\n\r\n\tparams := map[string]string{\r\n\t\t\"apikey\": c.apiKey,\r\n\t\t\"currency\": currency,\r\n\t\t\"quantity\": quantity.String(),\r\n\t\t\"address\": address,\r\n\t}\r\n\r\n\tif paymentID != \"\" {\r\n\t\tparams[\"paymentid\"] = paymentID\r\n\t}\r\n\r\n\tvar parsedResponse *baseResponse\r\n\r\n\tparsedResponse = c.sendRequest(\"account/withdraw\", params)\r\n\r\n\tif c.err != nil {\r\n\t\treturn TransactionID{}, c.err\r\n\t}\r\n\r\n\tif parsedResponse.Success != true {\r\n\t\tc.setError(\"api error - account/withdraw\", parsedResponse.Message)\r\n\t\treturn TransactionID{}, c.err\r\n\t}\r\n\r\n\tvar response TransactionID\r\n\tdefaultVal := TransactionID{}\r\n\r\n\tif err := json.Unmarshal(parsedResponse.Result, &response); err != nil {\r\n\t\tc.setError(\"api error - account/withdraw\", err.Error())\r\n\t\treturn defaultVal, c.err\r\n\t}\r\n\r\n\tif response == defaultVal {\r\n\t\tc.setError(\"validate response\", \"nil vals in withdraw response\")\r\n\t}\r\n\r\n\treturn response, nil\r\n}", "func ListQuotas(query ...*models.QuotaQuery) ([]*Quota, error) {\n\tcondition, params := quotaQueryConditions(query...)\n\n\tsql := fmt.Sprintf(`\nSELECT\n a.id,\n a.reference,\n a.reference_id,\n a.hard,\n b.used,\n b.creation_time,\n b.update_time\nFROM\n quota AS a\n JOIN quota_usage AS b ON a.id = b.id %s`, condition)\n\n\torderBy := quotaOrderBy(query...)\n\tif orderBy != \"\" {\n\t\tsql += ` order by ` + orderBy\n\t}\n\n\tif len(query) > 0 && query[0] != nil {\n\t\tpage, size := query[0].Page, query[0].Size\n\t\tif size > 0 {\n\t\t\tsql += ` limit ?`\n\t\t\tparams = append(params, size)\n\t\t\tif page > 0 {\n\t\t\t\tsql += ` offset ?`\n\t\t\t\tparams = append(params, size*(page-1))\n\t\t\t}\n\t\t}\n\t}\n\n\tvar quotas []*Quota\n\tif _, err := GetOrmer().Raw(sql, params).QueryRows(&quotas); err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, quota := range quotas {\n\t\td, ok := driver.Get(quota.Reference)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tref, err := d.Load(quota.ReferenceID)\n\t\tif err != nil {\n\t\t\tlog.Warning(fmt.Sprintf(\"Load quota reference object (%s, %s) failed: %v\", quota.Reference, quota.ReferenceID, err))\n\t\t\tcontinue\n\t\t}\n\n\t\tquota.Ref = ref\n\t}\n\n\treturn quotas, nil\n}", "func (_IStakingRewards *IStakingRewardsTransactorSession) Withdraw(amount *big.Int) (*types.Transaction, error) {\n\treturn _IStakingRewards.Contract.Withdraw(&_IStakingRewards.TransactOpts, amount)\n}", "func (_WELV9 *WELV9Transactor) Withdraw(opts *bind.TransactOpts, wad *big.Int) (*types.Transaction, error) {\n\treturn _WELV9.contract.Transact(opts, \"withdraw\", wad)\n}", "func (_XStaking *XStakingTransactor) Withdraw(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) {\n\treturn _XStaking.contract.Transact(opts, \"withdraw\", amount)\n}", "func (_DogsOfRome *DogsOfRomeTransactor) Withdraw(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _DogsOfRome.contract.Transact(opts, \"withdraw\")\n}", "func (_Vault *VaultFilterer) FilterWithdraw(opts *bind.FilterOpts) (*VaultWithdrawIterator, error) {\n\n\tlogs, sub, err := _Vault.contract.FilterLogs(opts, \"Withdraw\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &VaultWithdrawIterator{contract: _Vault.contract, event: \"Withdraw\", logs: logs, sub: sub}, nil\n}", "func (_Vault *VaultCallerSession) WithdrawRequests(arg0 common.Address, arg1 common.Address) (*big.Int, error) {\n\treturn _Vault.Contract.WithdrawRequests(&_Vault.CallOpts, arg0, arg1)\n}", "func (_SingleAuto *SingleAutoFilterer) FilterWithdraw(opts *bind.FilterOpts, user []common.Address, pid []*big.Int) (*SingleAutoWithdrawIterator, error) {\n\n\tvar userRule []interface{}\n\tfor _, userItem := range user {\n\t\tuserRule = append(userRule, userItem)\n\t}\n\tvar pidRule []interface{}\n\tfor _, pidItem := range pid {\n\t\tpidRule = append(pidRule, pidItem)\n\t}\n\n\tlogs, sub, err := _SingleAuto.contract.FilterLogs(opts, \"Withdraw\", userRule, pidRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &SingleAutoWithdrawIterator{contract: _SingleAuto.contract, event: \"Withdraw\", logs: logs, sub: sub}, nil\n}", "func (_Smartchef *SmartchefFilterer) FilterWithdraw(opts *bind.FilterOpts, user []common.Address) (*SmartchefWithdrawIterator, error) {\n\n\tvar userRule []interface{}\n\tfor _, userItem := range user {\n\t\tuserRule = append(userRule, userItem)\n\t}\n\n\tlogs, sub, err := _Smartchef.contract.FilterLogs(opts, \"Withdraw\", userRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &SmartchefWithdrawIterator{contract: _Smartchef.contract, event: \"Withdraw\", logs: logs, sub: sub}, nil\n}", "func GetTotalOfQuotas(query ...*models.QuotaQuery) (int64, error) {\n\tcondition, params := quotaQueryConditions(query...)\n\tsql := fmt.Sprintf(\"SELECT COUNT(1) FROM quota AS a JOIN quota_usage AS b ON a.id = b.id %s\", condition)\n\n\tvar count int64\n\tif err := GetOrmer().Raw(sql, params).QueryRow(&count); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn count, nil\n}", "func (_Lmc *LmcTransactor) Withdraw(opts *bind.TransactOpts, _tokenAmount *big.Int) (*types.Transaction, error) {\n\treturn _Lmc.contract.Transact(opts, \"withdraw\", _tokenAmount)\n}", "func (_DelegateProfile *DelegateProfileTransactorSession) Withdraw() (*types.Transaction, error) {\n\treturn _DelegateProfile.Contract.Withdraw(&_DelegateProfile.TransactOpts)\n}", "func (_Lmc *LmcSession) Withdraw(_tokenAmount *big.Int) (*types.Transaction, error) {\n\treturn _Lmc.Contract.Withdraw(&_Lmc.TransactOpts, _tokenAmount)\n}", "func (_SingleAuto *SingleAutoSession) Withdraw(_pid *big.Int, _wantAmt *big.Int) (*types.Transaction, error) {\n\treturn _SingleAuto.Contract.Withdraw(&_SingleAuto.TransactOpts, _pid, _wantAmt)\n}", "func (t *SupplyChaincode) queryTrade(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tvar TransactionId, Sku, TradeDate, TraceInfo, Counter string\t// Fileds of a trade\n\tvar err error\n\t\n\tfmt.Println(\"---inside queryTrade---\")\n\tfmt.Printf(\"queryTrade received %d args\\n \", len(args))\n\t\n\tTransactionId = args[0]\n\tSku = args[1]\n\tTradeDate = args[2]\n\tTraceInfo = args[3]\n\tCounter = \"Counter\"\n\n\tTransactionIdVal, err := stub.GetState(TransactionId)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\t\n\tSkuVal, err := stub.GetState(Sku)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\t\n\tTradeDateVal, err := stub.GetState(TradeDate)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\t\n\tTraceInfoVal, err := stub.GetState(TraceInfo)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\t\n\tCounterValbytes, err := stub.GetState(Counter)\n\tfmt.Printf(\"CounterVal:%d\\n\", CounterValbytes)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\t\n/*\n\tfmt.Printf(\"1.Query results: %x\\n\", TransactionIdVal)\n\tfmt.Printf(\"2.Query results: %x\\n\", SkuVal)\n\tfmt.Printf(\"3.Query results: %x\\n\", TradeDateVal)\n\tfmt.Printf(\"4.Query results: %x\\n\", TraceInfoVal)\n\n\tfmt.Printf(\"1.Query results: %x\\n\", string(TransactionIdVal))\n\tfmt.Printf(\"2.Query results: %x\\n\", string(SkuVal))\n\tfmt.Printf(\"3.Query results: %x\\n\", string(TradeDateVal))\n\tfmt.Printf(\"4.Query results: %x\\n\", string(TraceInfoVal))\n*/\n\tQueryResults := []byte(string(TransactionIdVal) + \",\" + \n\t\t\t\t\t\t\tstring(SkuVal) + \",\" + \n\t\t\t\t\t\t\tstring(TradeDateVal) + \",\" + \n\t\t\t\t\t\t\tstring(TraceInfoVal) + \",\" + \n\t\t\t\t\t\t\tstring(string(CounterValbytes)))\n\treturn shim.Success(QueryResults)\n}", "func (_Token *TokenSession) ExecuteWithdrawal() (*types.Transaction, error) {\n\treturn _Token.Contract.ExecuteWithdrawal(&_Token.TransactOpts)\n}", "func (_PBridge *PBridgeSession) ExecuteWithdrawTx(txKey string, to common.Address, amount *big.Int, isERC20 bool, ERC20 common.Address, signatures []byte) (*types.Transaction, error) {\n\treturn _PBridge.Contract.ExecuteWithdrawTx(&_PBridge.TransactOpts, txKey, to, amount, isERC20, ERC20, signatures)\n}", "func (_SingleAuto *SingleAutoTransactorSession) Withdraw(_pid *big.Int, _wantAmt *big.Int) (*types.Transaction, error) {\n\treturn _SingleAuto.Contract.Withdraw(&_SingleAuto.TransactOpts, _pid, _wantAmt)\n}", "func (_WandappETH *WandappETHTransactor) Withdraw(opts *bind.TransactOpts, proof []byte) (*types.Transaction, error) {\n\treturn _WandappETH.contract.Transact(opts, \"withdraw\", proof)\n}", "func (_SingleAuto *SingleAutoTransactor) Withdraw(opts *bind.TransactOpts, _pid *big.Int, _wantAmt *big.Int) (*types.Transaction, error) {\n\treturn _SingleAuto.contract.Transact(opts, \"withdraw\", _pid, _wantAmt)\n}", "func (self *NovaV2) GetQuotas(tenantId string) (nova.QuotaSet, error) {\n\tvar out QuotaRespV2\n\terr := nova.Get(self.Client, \"os-quota-sets/\"+url.QueryEscape(tenantId), &out)\n\tif err != nil {\n\t\treturn nova.QuotaSet{}, err\n\t}\n\n\treturn out.QuotaSet.ToQuotaSet(), nil\n}", "func (sc Funcs) Withdraw(ctx wasmlib.ScFuncClientContext) *WithdrawCall {\n\treturn &WithdrawCall{Func: wasmlib.NewScFunc(ctx, HScName, HFuncWithdraw)}\n}", "func (_PBridge *PBridgeFilterer) FilterTxWithdrawCompleted(opts *bind.FilterOpts) (*PBridgeTxWithdrawCompletedIterator, error) {\n\n\tlogs, sub, err := _PBridge.contract.FilterLogs(opts, \"TxWithdrawCompleted\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &PBridgeTxWithdrawCompletedIterator{contract: _PBridge.contract, event: \"TxWithdrawCompleted\", logs: logs, sub: sub}, nil\n}", "func (_IStakingRewards *IStakingRewardsTransactor) Withdraw(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) {\n\treturn _IStakingRewards.contract.Transact(opts, \"withdraw\", amount)\n}", "func (_Vault *VaultSession) Withdraw(inst []byte, heights *big.Int, instPaths [][32]byte, instPathIsLefts []bool, instRoots [32]byte, blkData [32]byte, sigIdxs []*big.Int, sigVs []uint8, sigRs [][32]byte, sigSs [][32]byte) (*types.Transaction, error) {\n\treturn _Vault.Contract.Withdraw(&_Vault.TransactOpts, inst, heights, instPaths, instPathIsLefts, instRoots, blkData, sigIdxs, sigVs, sigRs, sigSs)\n}", "func (_PBridge *PBridgeTransactorSession) ExecuteWithdrawTx(txKey string, to common.Address, amount *big.Int, isERC20 bool, ERC20 common.Address, signatures []byte) (*types.Transaction, error) {\n\treturn _PBridge.Contract.ExecuteWithdrawTx(&_PBridge.TransactOpts, txKey, to, amount, isERC20, ERC20, signatures)\n}", "func (_DelegateProfile *DelegateProfileSession) Withdraw() (*types.Transaction, error) {\n\treturn _DelegateProfile.Contract.Withdraw(&_DelegateProfile.TransactOpts)\n}", "func (_PBridge *PBridgeTransactor) ExecuteWithdrawTx(opts *bind.TransactOpts, txKey string, to common.Address, amount *big.Int, isERC20 bool, ERC20 common.Address, signatures []byte) (*types.Transaction, error) {\n\treturn _PBridge.contract.Transact(opts, \"executeWithdrawTx\", txKey, to, amount, isERC20, ERC20, signatures)\n}", "func (sc stakingClient) Withdraw(fromInfo keys.Info, passWd, coinsStr, memo string, accNum, seqNum uint64) (\n\tresp sdk.TxResponse, err error) {\n\tif err = params.CheckKeyParams(fromInfo, passWd); err != nil {\n\t\treturn\n\t}\n\n\tcoin, err := sdk.ParseDecCoin(coinsStr)\n\tif err != nil {\n\t\treturn resp, fmt.Errorf(\"failed : parse Coins [%s] error: %s\", coinsStr, err)\n\t}\n\n\tmsg := types.NewMsgWithdraw(fromInfo.GetAddress(), coin)\n\n\treturn sc.BuildAndBroadcast(fromInfo.GetName(), passWd, memo, []sdk.Msg{msg}, accNum, seqNum)\n\n}", "func (_Lmc *LmcTransactorSession) Withdraw(_tokenAmount *big.Int) (*types.Transaction, error) {\n\treturn _Lmc.Contract.Withdraw(&_Lmc.TransactOpts, _tokenAmount)\n}", "func (_Vault *VaultTransactorSession) Withdraw(inst []byte, heights *big.Int, instPaths [][32]byte, instPathIsLefts []bool, instRoots [32]byte, blkData [32]byte, sigIdxs []*big.Int, sigVs []uint8, sigRs [][32]byte, sigSs [][32]byte) (*types.Transaction, error) {\n\treturn _Vault.Contract.Withdraw(&_Vault.TransactOpts, inst, heights, instPaths, instPathIsLefts, instRoots, blkData, sigIdxs, sigVs, sigRs, sigSs)\n}", "func (_Token *TokenTransactorSession) ExecuteWithdrawal() (*types.Transaction, error) {\n\treturn _Token.Contract.ExecuteWithdrawal(&_Token.TransactOpts)\n}", "func (a *QuotasApiService) QuotasGet(ctx _context.Context) (QuotaDto, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue QuotaDto\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/quotas\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 204 {\n\t\t\tvar v QuotaDto\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (_Cakevault *CakevaultTransactor) WithdrawAll(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Cakevault.contract.Transact(opts, \"withdrawAll\")\n}", "func (_Vault *VaultCaller) WithdrawRequests(opts *bind.CallOpts, arg0 common.Address, arg1 common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Vault.contract.Call(opts, out, \"withdrawRequests\", arg0, arg1)\n\treturn *ret0, err\n}", "func (k *Kraken) WithdrawCryptocurrencyFunds(withdrawRequest *exchange.WithdrawRequest) (string, error) {\n\treturn k.Withdraw(withdrawRequest.Currency.String(), withdrawRequest.TradePassword, withdrawRequest.Amount)\n}", "func (_Token *TokenTransactor) ExecuteWithdrawal(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Token.contract.Transact(opts, \"executeWithdrawal\")\n}", "func (_ElvTradableLocal *ElvTradableLocalTransactor) Withdraw(opts *bind.TransactOpts, _amount *big.Int) (*types.Transaction, error) {\n\treturn _ElvTradableLocal.contract.Transact(opts, \"withdraw\", _amount)\n}", "func Withdraw(accID string, amount int64) error {\n\tif amount <= 0 {\n\t\treturn fmt.Errorf(\"invalid amount; %d\", amount)\n\t}\n\n\tvar accs []*share.Account\n\terr := client.GetByNames(ctx, share.KindAccount, []string{accID, \"Cash\"}, &accs)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"get accounts error; %v\", err)\n\t}\n\n\tif accs[0].Balance < amount {\n\t\treturn fmt.Errorf(\"balance of account %s is %d, not enough for withdraw %d\", accID, accs[0].Balance, amount)\n\t}\n\n\taccs[0].Balance -= amount\n\taccs[1].Balance += amount\n\ttrans := []*share.Transaction{\n\t\t{Type: share.TransactionTypeWithdraw, AccountID: accID, Amount: -amount},\n\t\t{Type: share.TransactionTypeDeposit, AccountID: \"Cash\", Amount: amount},\n\t}\n\tfor _, tran := range trans {\n\t\ttran.NewKey(share.KindTransaction)\n\t}\n\terr = client.SaveModels(ctx, \"\", []interface{}{accs[0], accs[1], trans[0], trans[1]})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"save models error; %v\", err)\n\t}\n\treturn nil\n}", "func TxWithdrawAllRewards(f *cli.Fixtures, from string, flags ...string) (bool, string, string) {\n\tcmd := fmt.Sprintf(\"%s tx distribution withdraw-all-rewards %v --keyring-backend=test --from=%s\", f.SimcliBinary, f.Flags(), from)\n\treturn cli.ExecuteWriteRetStdStreams(f.T, cli.AddFlags(cmd, flags), clientkeys.DefaultKeyPass)\n}", "func WithdrawByUid(uid int) []Withdraw {\n\tvar with []Withdraw\n\tfor _, v := range AllWithdraw() {\n\t\tif v.Uid == uid {\n\t\t\twith = append(with, v)\n\t\t}\n\t}\n\tSliceReverse(with)\n\treturn with\n}", "func (_DelegateProfile *DelegateProfileTransactor) Withdraw(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _DelegateProfile.contract.Transact(opts, \"withdraw\")\n}", "func (client *Client) ListQuotas(tenantID string) ([]types.QuotaDetails, error) {\n\tvar result types.QuotaListResponse\n\n\turl, err := client.getCiaoQuotasResource()\n\tif err != nil {\n\t\treturn result.Quotas, errors.Wrap(err, \"Error getting quotas resource\")\n\t}\n\n\tif tenantID != \"\" {\n\t\turl = fmt.Sprintf(\"%s/%s/quotas\", url, tenantID)\n\t} else {\n\t\turl = fmt.Sprintf(\"%s/quotas\", url)\n\t}\n\n\terr = client.getResource(url, api.TenantsV1, nil, &result)\n\n\treturn result.Quotas, err\n}", "func QueryAccount() {\n\n\taccountInfo, err := binanceSrv.Account(binance.AccountRequest{\n\t\tRecvWindow: 5 * time.Second,\n\t\tTimestamp: time.Now(),\n\t})\n\tif err != nil {\n\t\tlevel.Error(logger).Log(\"QueryAccount - fail! Err=\", err)\n\t\treturn\n\t}\n\n\tlookForNew:\n\tfor _, balance := range accountInfo.Balances {\n\n\t\tif balance.Asset == \"BTC\" || balance.Asset == \"ETH\" || balance.Free+balance.Locked == 0{\n\t\t\tcontinue\n\t\t}\n\n\t\tfmt.Printf(\"QueryAccount - %s balance=%f. Free=%f, Locked=%f\\n\", balance.Asset,\n\t\t\tbalance.Free+balance.Locked, balance.Free, balance.Locked)\n\n\t\tasset := balance.Asset + \"BTC\"\n\n\t\t// get latest price\n\t\thighestBid := getHighestBid(asset)\n\t\tif highestBid.Time.Add(time.Second * 60).Before(time.Now()) {\n\t\t\tfmt.Println(\"Warning! QueryAccount - getHighestBid got old data. fail to manage its project\", asset)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, knownProject := range ActiveProjectList {\n\n\t\t\t// Existing Known Project?\n\t\t\tif knownProject.Symbol == asset {\n\n\t\t\t\tif !FloatEquals(knownProject.AccBalanceBase, balance.Free+balance.Locked) {\n\n\t\t\t\t\tfmt.Printf(\"QueryAccount - Info: found new balance for %s. new=%f, old=%f\\n\",\n\t\t\t\t\t\tknownProject.Symbol, balance.Free+balance.Locked,\n\t\t\t\t\t\tknownProject.AccBalanceBase)\n\n\t\t\t\t\tknownProject.AccBalanceBase = balance.Free+balance.Locked\n\t\t\t\t\tknownProject.AccBalanceLocked = balance.Locked\n\n\t\t\t\t\tif !UpdateProjectAccBalanceBase(knownProject){\n\t\t\t\t\t\tfmt.Printf(\"QueryAccount - Update Project %s AccBalanceBase Fail!\\n\",\n\t\t\t\t\t\t\tknownProject.Symbol)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tcontinue lookForNew\n\t\t\t}\n\t\t}\n\n\n\t\thistoryRemain := GetHistoryRemain(asset)\n\n\t\t// ignore trivial balance\n\t\tif highestBid.Price * (balance.Free+balance.Locked) < MinOrderTotal {\n\n\t\t\t// update trivial balance into history_remain table\n\t\t\tif !FloatEquals(historyRemain.Amount, balance.Free+balance.Locked){\n\t\t\t\tUpdateHistoryRemain(asset, balance.Free+balance.Locked)\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\t// Must Be a New Project!\n\t\tProjectImport(balance, historyRemain)\n\t}\n}", "func (e *Event) GetWithdrawals() *Withdrawals {\n\te.RLock()\n\tdefer e.RUnlock()\n\treturn e.withdrawals\n}", "func (c *Client) Withdraw(ctx context.Context, foreignID string, amount float64, currency, address string) (Withdrawal, error) {\n\treqBody := map[string]interface{}{\n\t\t\"foreign_id\": foreignID,\n\t\t\"amount\": amount,\n\t\t\"currency\": currency,\n\t\t\"address\": address,\n\t}\n\n\treqJSON, err := json.Marshal(reqBody)\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"request body marshaling error: %w\", err)\n\t}\n\n\twithdrawalURL, err := joinURL(c.api, withdrawalEndpoint)\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"request url creating error: %w\", err)\n\t}\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, withdrawalURL.String(), bytes.NewBuffer(reqJSON))\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"request creating error: %w\", err)\n\t}\n\n\tsig, err := createHmac(c.secret, reqJSON)\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"hmac signature creationg error: %w\", err)\n\t}\n\n\treq.Header.Set(contentTypeHeader, jsonContentType)\n\treq.Header.Set(keyHeader, c.apiKey)\n\treq.Header.Set(signatureHeader, sig)\n\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"request error: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\terr = ensureSuccessResponse(resp)\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"request failed: %w\", err)\n\t}\n\n\trespBody := struct {\n\t\tData Withdrawal `json:\"data\"`\n\t}{}\n\n\terr = json.NewDecoder(resp.Body).Decode(&respBody)\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"response unmarshaling error: %w\", err)\n\t}\n\n\treturn respBody.Data, nil\n}", "func (_Constants *ConstantsSession) WithdrawLockPeriod() (*big.Int, error) {\n\treturn _Constants.Contract.WithdrawLockPeriod(&_Constants.CallOpts)\n}", "func (c *Client) Withdraw(ctx context.Context, p *WithdrawRequestBody) (err error) {\n\t_, err = c.WithdrawEndpoint(ctx, p)\n\treturn\n}", "func (_EtherDelta *EtherDeltaSession) Withdraw(amount *big.Int) (*types.Transaction, error) {\n\treturn _EtherDelta.Contract.Withdraw(&_EtherDelta.TransactOpts, amount)\n}", "func (del Delegation) WithdrawRequests(args struct {\n\tCursor *Cursor\n\tCount int32\n\tActiveOnly bool\n}) ([]WithdrawRequest, error) {\n\t// limit query size; the count can be either positive or negative\n\t// this controls the loading direction\n\targs.Count = listLimitCount(args.Count, listMaxEdgesPerRequest)\n\n\t// pull list of withdrawals\n\twr, err := repository.R().WithdrawRequests(&del.Address, del.Delegation.ToStakerId, (*string)(args.Cursor), args.Count, args.ActiveOnly)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create withdrawals list from the collection\n\tlist := make([]WithdrawRequest, len(wr.Collection))\n\tfor i, req := range wr.Collection {\n\t\tlist[i] = NewWithdrawRequest(req)\n\t}\n\n\t// return the final resolvable list\n\treturn list, nil\n}" ]
[ "0.6501903", "0.62330467", "0.5987337", "0.5910363", "0.5897883", "0.5802158", "0.5735387", "0.5701625", "0.56914574", "0.56089395", "0.5605273", "0.559851", "0.5584993", "0.55758", "0.5575501", "0.55489874", "0.5545602", "0.5539576", "0.55338687", "0.5519495", "0.5516642", "0.5515322", "0.54920465", "0.5483645", "0.5482263", "0.5468699", "0.54684585", "0.54673135", "0.5465175", "0.5463982", "0.5451448", "0.54489297", "0.5441532", "0.54385316", "0.5428942", "0.54235864", "0.541805", "0.5415095", "0.54024065", "0.5394145", "0.53917867", "0.5370003", "0.5368091", "0.5365567", "0.53447074", "0.5341754", "0.5324609", "0.5300175", "0.5297417", "0.529694", "0.5296158", "0.52910537", "0.52836597", "0.52804947", "0.5273985", "0.5247686", "0.52297795", "0.5225637", "0.52242655", "0.5207218", "0.5207121", "0.5181311", "0.5179515", "0.5173665", "0.5171848", "0.51688033", "0.5157058", "0.515704", "0.51532966", "0.51513714", "0.5151145", "0.5150728", "0.51442164", "0.51371586", "0.5136285", "0.513375", "0.5131464", "0.5122958", "0.5120164", "0.5111464", "0.50980884", "0.50955653", "0.50937617", "0.5092649", "0.5083794", "0.50715154", "0.50584877", "0.50461054", "0.5044692", "0.5008336", "0.5006832", "0.4984504", "0.4983672", "0.4971261", "0.49656686", "0.49618942", "0.49573147", "0.49537656", "0.49521348", "0.4951926" ]
0.8049243
0
SearchForExistedWithdrawsAndDeposits returns withdrawal and deposit data
func (h *HUOBI) SearchForExistedWithdrawsAndDeposits(ctx context.Context, c currency.Code, transferType, direction string, fromID, limit int64) (WithdrawalHistory, error) { var resp WithdrawalHistory vals := url.Values{} vals.Set("type", transferType) if !c.IsEmpty() { vals.Set("currency", c.Lower().String()) } if direction != "" { vals.Set("direction", direction) } if fromID > 0 { vals.Set("from", strconv.FormatInt(fromID, 10)) } if limit > 0 { vals.Set("size", strconv.FormatInt(limit, 10)) } return resp, h.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, huobiWithdrawHistory, vals, nil, &resp, false) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Service) GetWithdraw(c context.Context, dateVersion string, from, limit int) (count int, withdrawVos []*model.WithdrawVo, err error) {\n\tcount, upAccounts, err := s.UpWithdraw(c, dateVersion, from, limit)\n\tif err != nil {\n\t\tlog.Error(\"s.UpWithdraw error(%v)\", err)\n\t\treturn\n\t}\n\n\tmids := make([]int64, len(upAccounts))\n\tfor i, up := range upAccounts {\n\t\tmids[i] = up.MID\n\t}\n\n\twithdrawVos = make([]*model.WithdrawVo, 0)\n\tif len(mids) == 0 {\n\t\treturn\n\t}\n\n\tupIncomeWithdrawMap, err := s.dao.QueryUpWithdrawByMids(c, mids, dateVersion)\n\tif err != nil {\n\t\tlog.Error(\"s.dao.QueryUpWithdrawByMids error(%v)\", err)\n\t\treturn\n\t}\n\n\tfor _, up := range upAccounts {\n\t\tif upIncomeWithdraw, ok := upIncomeWithdrawMap[up.MID]; ok && upIncomeWithdraw.State == _withdrawing {\n\t\t\tvo := &model.WithdrawVo{\n\t\t\t\tMID: up.MID,\n\t\t\t\tThirdCoin: float64(up.TotalUnwithdrawIncome) * float64(0.01),\n\t\t\t\tThirdOrderNo: strconv.FormatInt(upIncomeWithdraw.ID, 10),\n\t\t\t\tCTime: time.Unix(int64(upIncomeWithdraw.CTime), 0).Format(\"2006-01-02 15:04:05\"),\n\t\t\t\tNotifyURL: \"http://up-profit.bilibili.co/allowance/api/x/internal/growup/up/withdraw/success\",\n\t\t\t}\n\n\t\t\twithdrawVos = append(withdrawVos, vo)\n\t\t}\n\t}\n\n\treturn\n}", "func (_TokenStakingEscrow *TokenStakingEscrowFilterer) FilterDepositWithdrawn(opts *bind.FilterOpts, operator []common.Address, grantee []common.Address) (*TokenStakingEscrowDepositWithdrawnIterator, error) {\n\n\tvar operatorRule []interface{}\n\tfor _, operatorItem := range operator {\n\t\toperatorRule = append(operatorRule, operatorItem)\n\t}\n\tvar granteeRule []interface{}\n\tfor _, granteeItem := range grantee {\n\t\tgranteeRule = append(granteeRule, granteeItem)\n\t}\n\n\tlogs, sub, err := _TokenStakingEscrow.contract.FilterLogs(opts, \"DepositWithdrawn\", operatorRule, granteeRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &TokenStakingEscrowDepositWithdrawnIterator{contract: _TokenStakingEscrow.contract, event: \"DepositWithdrawn\", logs: logs, sub: sub}, nil\n}", "func (p *Poloniex) GetDepositsWithdrawals(ctx context.Context, start, end string) (DepositsWithdrawals, error) {\n\tresp := DepositsWithdrawals{}\n\tvalues := url.Values{}\n\n\tif start != \"\" {\n\t\tvalues.Set(\"start\", start)\n\t} else {\n\t\tvalues.Set(\"start\", \"0\")\n\t}\n\n\tif end != \"\" {\n\t\tvalues.Set(\"end\", end)\n\t} else {\n\t\tvalues.Set(\"end\", strconv.FormatInt(time.Now().Unix(), 10))\n\t}\n\n\treturn resp, p.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, poloniexDepositsWithdrawals, values, &resp)\n}", "func (s *Store) getDepositInfo(Txid string) (DepositInfo, error) {\n\tvar di DepositInfo\n\n\terr := s.db.View(func(tx *bolt.Tx) error {\n\t\tvar err error\n\t\tdi, err = s.getDepositInfoTx(tx, Txid)\n\t\treturn err\n\t})\n\n\treturn di, err\n}", "func AllWithdraw() []Withdraw {\n\torm := get_DBFront()\n\tvar with []Withdraw\n\terr := orm.SetTable(\"withdraw\").FindAll(&with)\n\tif !check_err(err) {\n\t\tLog(Log_Struct{\"error\", \"DB_Error_Line_699\", err})\n\t}\n\tSliceReverse(with)\n\treturn with\n}", "func (s *Store) GetOrCreateDepositInfo(dv scanner.Deposit) (DepositInfo, error) {\n\tlog := s.log.WithField(\"deposit\", dv)\n\n\tvar finalDepositInfo DepositInfo\n\tif err := s.db.Update(func(tx *bolt.Tx) error {\n\t\tdi, err := s.getDepositInfoTx(tx, dv.ID())\n\t\tswitch err.(type) {\n\t\tcase nil:\n\t\t\tfinalDepositInfo = di\n\t\t\treturn nil\n\n\t\tcase dbutil.ObjectNotExistErr:\n\t\t\tlog.Info(\"DepositInfo not found in DB, inserting\")\n\t\t\tboundAddr, err := s.getBindAddressTx(tx, dv.Address, dv.CoinType)\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"GetBindAddress failed: %v\", err)\n\t\t\t\tlog.WithError(err).Error(err)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif boundAddr == nil {\n\t\t\t\terr = ErrNoBoundAddress\n\t\t\t\tlog.WithError(err).Error(err)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\terr = s.createDepositTrackTx(tx, boundAddr)\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"CreateDepositTrack failed: %v\", err)\n\t\t\t\tlog.WithError(err).Error(err)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tlog = log.WithField(\"boundAddr\", boundAddr)\n\n\t\t\t// Integrity check of the boundAddr data against the deposit value data\n\t\t\tif boundAddr.CoinType != dv.CoinType {\n\t\t\t\terr := fmt.Errorf(\"boundAddr.CoinType != dv.CoinType\")\n\t\t\t\tlog.WithError(err).Error()\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t//TODO (therealssj): add owner address?\n\t\t\tdi := DepositInfo{\n\t\t\t\tCoinType: dv.CoinType,\n\t\t\t\tDepositAddress: dv.Address,\n\t\t\t\tKittyID: boundAddr.KittyID,\n\t\t\t\tDepositID: dv.ID(),\n\t\t\t\tStatus: StatusWaitDecide,\n\t\t\t\tDepositValue: dv.Value,\n\t\t\t\tDeposit: dv,\n\t\t\t}\n\n\t\t\tlog = log.WithField(\"depositInfo\", di)\n\n\t\t\tupdatedDi, err := s.addDepositInfoTx(tx, di)\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"addDepositInfoTx failed: %v\", err)\n\t\t\t\tlog.WithError(err).Error(err)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfinalDepositInfo = updatedDi\n\n\t\t\treturn nil\n\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"getDepositInfo failed: %v\", err)\n\t\t\tlog.WithError(err).Error(err)\n\t\t\treturn err\n\t\t}\n\t}); err != nil {\n\t\treturn DepositInfo{}, err\n\t}\n\n\treturn finalDepositInfo, nil\n\n}", "func AllDeposit() []Deposit {\n\torm := get_DBFront()\n\tvar depo []Deposit\n\terr := orm.SetTable(\"deposit\").FindAll(&depo)\n\tif !check_err(err) {\n\t\tLog(Log_Struct{\"error\", \"DB_Error_Line_671\", err})\n\t}\n\tSliceReverse(depo)\n\treturn depo\n}", "func (p *TDepositWithdrawServiceClient) AuditDepositWithdraw(ctx context.Context, traceId string, status string, mark string, withdrawId int32) (r bool, err error) {\n var _args5 TDepositWithdrawServiceAuditDepositWithdrawArgs\n _args5.TraceId = traceId\n _args5.Status = status\n _args5.Mark = mark\n _args5.WithdrawId = withdrawId\n var _result6 TDepositWithdrawServiceAuditDepositWithdrawResult\n if err = p.c.Call(ctx, \"auditDepositWithdraw\", &_args5, &_result6); err != nil {\n return\n }\n return _result6.GetSuccess(), nil\n}", "func (_IWETH *IWETHSession) Deposit() (*types.Transaction, error) {\r\n\treturn _IWETH.Contract.Deposit(&_IWETH.TransactOpts)\r\n}", "func (_TokenStakingEscrow *TokenStakingEscrowFilterer) WatchDepositWithdrawn(opts *bind.WatchOpts, sink chan<- *TokenStakingEscrowDepositWithdrawn, operator []common.Address, grantee []common.Address) (event.Subscription, error) {\n\n\tvar operatorRule []interface{}\n\tfor _, operatorItem := range operator {\n\t\toperatorRule = append(operatorRule, operatorItem)\n\t}\n\tvar granteeRule []interface{}\n\tfor _, granteeItem := range grantee {\n\t\tgranteeRule = append(granteeRule, granteeItem)\n\t}\n\n\tlogs, sub, err := _TokenStakingEscrow.contract.WatchLogs(opts, \"DepositWithdrawn\", operatorRule, granteeRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(TokenStakingEscrowDepositWithdrawn)\n\t\t\t\tif err := _TokenStakingEscrow.contract.UnpackLog(event, \"DepositWithdrawn\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (_IWETH *IWETHTransactorSession) Deposit() (*types.Transaction, error) {\r\n\treturn _IWETH.Contract.Deposit(&_IWETH.TransactOpts)\r\n}", "func (_OracleMgr *OracleMgrSession) GetDepositAtIndex(index *big.Int) (struct {\n\tAmount *big.Int\n\tAvailableAt *big.Int\n}, error) {\n\treturn _OracleMgr.Contract.GetDepositAtIndex(&_OracleMgr.CallOpts, index)\n}", "func QueryWithdraws(rpcAddr string) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\trequest := common.GetInterxRequest(r)\n\t\tresponse := common.GetResponseFormat(request, rpcAddr)\n\t\tstatusCode := http.StatusOK\n\n\t\tcommon.GetLogger().Info(\"[query-withdraws] Entering withdraws query\")\n\n\t\tif !common.RPCMethods[\"GET\"][config.QueryWithdraws].Enabled {\n\t\t\tresponse.Response, response.Error, statusCode = common.ServeError(0, \"\", \"API disabled\", http.StatusForbidden)\n\t\t} else {\n\t\t\tif common.RPCMethods[\"GET\"][config.QueryWithdraws].CachingEnabled {\n\t\t\t\tfound, cacheResponse, cacheError, cacheStatus := common.SearchCache(request, response)\n\t\t\t\tif found {\n\t\t\t\t\tresponse.Response, response.Error, statusCode = cacheResponse, cacheError, cacheStatus\n\t\t\t\t\tcommon.WrapResponse(w, request, *response, statusCode, false)\n\n\t\t\t\t\tcommon.GetLogger().Info(\"[query-withdraws] Returning from the cache\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tresponse.Response, response.Error, statusCode = QueryBlockTransactionsHandler(rpcAddr, r, true)\n\t\t}\n\n\t\tcommon.WrapResponse(w, request, *response, statusCode, common.RPCMethods[\"GET\"][config.QueryStatus].CachingEnabled)\n\t}\n}", "func (keeper Keeper) GetDeposit(ctx sdk.Context, symbol string, address sdk.CUAddress, hash string, index uint64) sdk.DepositItem {\n\tstore := ctx.KVStore(keeper.key)\n\tbz := store.Get(types.DepositStoreKey(symbol, address, hash, index))\n\tif bz == nil {\n\t\treturn sdk.DepositNil\n\t}\n\tvar item sdk.DepositItem\n\tkeeper.cdc.MustUnmarshalBinaryBare(bz, &item)\n\n\treturn item\n}", "func deposit(ctx iscp.Sandbox) (dict.Dict, error) {\n\tctx.Log().Debugf(\"accounts.deposit.begin -- %s\", ctx.IncomingTransfer())\n\n\tmustCheckLedger(ctx.State(), \"accounts.deposit.begin\")\n\n\tcaller := ctx.Caller()\n\tparams := kvdecoder.New(ctx.Params(), ctx.Log())\n\ttargetAccount := params.MustGetAgentID(ParamAgentID, caller)\n\ttargetAccount = commonaccount.AdjustIfNeeded(targetAccount, ctx.ChainID())\n\n\t// funds currently are in the common account (because call is to 'accounts'), they must be moved to the target\n\tsucc := MoveBetweenAccounts(ctx.State(), commonaccount.Get(ctx.ChainID()), targetAccount, ctx.IncomingTransfer())\n\tassert.NewAssert(ctx.Log()).Require(succ, \"internal error: failed to deposit to %s\", targetAccount.String())\n\n\tctx.Log().Debugf(\"accounts.deposit.success: target: %s\\n%s\",\n\t\ttargetAccount, ctx.IncomingTransfer().String())\n\n\tmustCheckLedger(ctx.State(), \"accounts.deposit.exit\")\n\treturn nil, nil\n}", "func (_OracleMgr *OracleMgrCallerSession) GetDepositAtIndex(index *big.Int) (struct {\n\tAmount *big.Int\n\tAvailableAt *big.Int\n}, error) {\n\treturn _OracleMgr.Contract.GetDepositAtIndex(&_OracleMgr.CallOpts, index)\n}", "func (s *Store) GetDepositStats() (int64, int64, int64, error) {\n\tvar totalBTCReceived int64\n\tvar totalSKYReceived int64\n\tvar totalBoxesSent int64\n\n\tif err := s.db.View(func(tx *bolt.Tx) error {\n\t\treturn dbutil.ForEach(tx, DepositInfoBkt, func(k, v []byte) error {\n\t\t\tvar dpi DepositInfo\n\t\t\tif err := json.Unmarshal(v, &dpi); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif dpi.CoinType == scanner.CoinTypeBTC {\n\t\t\t\ttotalBTCReceived += dpi.DepositValue\n\t\t\t}\n\n\t\t\tif dpi.CoinType == scanner.CoinTypeSKY {\n\t\t\t\ttotalSKYReceived += dpi.DepositValue\n\t\t\t}\n\n\t\t\t// TotalBoxesSent = no. of deposits with status == done\n\t\t\tif dpi.Status == StatusDone {\n\t\t\t\ttotalBoxesSent++\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\t}); err != nil {\n\t\treturn -1, -1, -1, err\n\t}\n\n\treturn totalBTCReceived, totalSKYReceived, totalBoxesSent, nil\n}", "func (_Wmatic *WmaticSession) Deposit() (*types.Transaction, error) {\n\treturn _Wmatic.Contract.Deposit(&_Wmatic.TransactOpts)\n}", "func (h *Reports) Withdrawals(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {\n\n\tvar data = make(map[string]interface{})\n\n\tclaims, err := auth.ClaimsFromContext(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfields := []datatable.DisplayField{\n\t\t{Field: \"id\", Title: \"ID\", Visible: false, Searchable: true, Orderable: true, Filterable: false},\n\t\t{Field: \"amount\", Title: \"Amount\", Visible: true, Searchable: false, Orderable: true, Filterable: true, FilterPlaceholder: \"filter Quantity\"},\n\t\t{Field: \"created_at\", Title: \"Date\", Visible: true, Searchable: true, Orderable: true, Filterable: true, FilterPlaceholder: \"filter Date\"},\n\t\t{Field: \"narration\", Title: \"Narration\", Visible: true, Searchable: true, Orderable: true, Filterable: true, FilterPlaceholder: \"filter Account\"},\n\t\t{Field: \"customer_name\", Title: \"Customer\", Visible: true, Searchable: true, Orderable: true, Filterable: true, FilterPlaceholder: \"filter Account\"},\n\t\t{Field: \"account\", Title: \"Account Number\", Visible: true, Searchable: true, Orderable: true, Filterable: true, FilterPlaceholder: \"filter Account\"},\n\t\t{Field: \"sales_rep_id\", Title: \"Recorded By\", Visible: true, Searchable: true, Orderable: false, Filterable: true, FilterPlaceholder: \"filter Recorder\"},\n\t}\n\n\tmapFunc := func(q transaction.TxReportResponse, cols []datatable.DisplayField) (resp []datatable.ColumnValue, err error) {\n\t\tfor i := 0; i < len(cols); i++ {\n\t\t\tcol := cols[i]\n\t\t\tvar v datatable.ColumnValue\n\t\t\tswitch col.Field {\n\t\t\tcase \"id\":\n\t\t\t\tv.Value = fmt.Sprintf(\"%s\", q.ID)\n\t\t\tcase \"amount\":\n\t\t\t\tv.Value = fmt.Sprintf(\"%f\", q.Amount)\n\t\t\t\tp := message.NewPrinter(language.English)\n\t\t\t\tv.Formatted = p.Sprintf(\"<a href='%s'>%.2f</a>\", urlCustomersTransactionsView(q.CustomerID, q.AccountID, q.ID), q.Amount)\n\t\t\tcase \"created_at\":\n\t\t\t\tdate := web.NewTimeResponse(ctx, time.Unix(q.CreatedAt, 0))\n\t\t\t\tv.Value = date.LocalDate\n\t\t\t\tv.Formatted = date.LocalDate\n\t\t\tcase \"narration\":\n\t\t\t\tvalues := strings.Split(q.Narration, \":\")\n\t\t\t\tif len(values) > 1 {\n\t\t\t\t\tif values[0] == \"sale\" {\n\t\t\t\t\t\tv.Value = values[1]\n\t\t\t\t\t\tv.Formatted = fmt.Sprintf(\"<a href='%s'>%s</a>\", urlSalesView(values[2]), v.Value)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tv.Value = q.Narration\n\t\t\t\t\tv.Formatted = q.Narration\n\t\t\t\t}\n\t\t\tcase \"payment_method\":\n\t\t\t\tv.Value = q.PaymentMethod\n\t\t\t\tv.Formatted = q.PaymentMethod\n\t\t\tcase \"customer_name\":\n\t\t\t\tv.Value = q.CustomerName\n\t\t\t\tv.Formatted = fmt.Sprintf(\"<a href='%s'>%s</a>\", urlCustomersView(q.CustomerID), v.Value)\n\t\t\tcase \"account\":\n\t\t\t\tv.Value = q.AccountNumber\n\t\t\t\tv.Formatted = fmt.Sprintf(\"<a href='%s'>%s</a>\", urlCustomersAccountsView(q.CustomerID, q.AccountID), v.Value)\n\t\t\tcase \"sales_rep_id\":\n\t\t\t\tv.Value = q.SalesRepID\n\t\t\t\tv.Formatted = fmt.Sprintf(\"<a href='%s'>%s</a>\", urlUsersView(q.SalesRepID), q.SalesRep)\n\t\t\tdefault:\n\t\t\t\treturn resp, errors.Errorf(\"Failed to map value for %s.\", col.Field)\n\t\t\t}\n\t\t\tresp = append(resp, v)\n\t\t}\n\n\t\treturn resp, nil\n\t}\n\n\tvar txWhere = []string{\"tx_type = 'withdrawal'\"}\n\tvar txArgs []interface{}\n\n\tvar date = time.Now()\n\tif v := r.URL.Query().Get(\"start_date\"); v != \"\" {\n\t\tdate, err = time.Parse(\"01/02/2006\", v)\n\t\tif err != nil {\n\t\t\tdate = time.Now()\n\t\t\treturn err\n\t\t}\n\t}\n\tdate = date.Truncate(time.Millisecond)\n\tdate = now.New(date).BeginningOfDay().Add(-1 * time.Hour)\n\ttxWhere = append(txWhere, fmt.Sprintf(\"created_at >= $%d\", len(txArgs)+1))\n\ttxArgs = append(txArgs, date.UTC().Unix())\n\tdata[\"startDate\"] = date.Format(\"01/02/2006\")\n\n\tdate = time.Now()\n\tif v := r.URL.Query().Get(\"end_date\"); v != \"\" {\n\t\tdate, err = time.Parse(\"01/02/2006\", v)\n\t\tif err != nil {\n\t\t\tdate = time.Now()\n\t\t\treturn err\n\t\t}\n\n\t}\n\tdate = date.Truncate(time.Millisecond)\n\tdate = now.New(date).EndOfDay().Add(-1 * time.Hour)\n\ttxWhere = append(txWhere, fmt.Sprintf(\"created_at <= $%d\", len(txArgs)+1))\n\ttxArgs = append(txArgs, date.Unix())\n\tdata[\"endDate\"] = date.Format(\"01/02/2006\")\n\n\tloadFunc := func(ctx context.Context, sorting string, fields []datatable.DisplayField) (resp [][]datatable.ColumnValue, err error) {\n\n\t\tvar order []string\n\t\tif len(sorting) > 0 {\n\t\t\torder = strings.Split(sorting, \",\")\n\t\t}\n\n\t\tfor i := range txWhere {\n\t\t\ttxWhere[i] = \"tx.\" + txWhere[i]\n\t\t}\n\t\tres, err := h.TransactionRepo.TxReport(ctx, claims, transaction.FindRequest{\n\t\t\tOrder: order, Where: strings.Join(txWhere, \" AND \"), Args: txArgs,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn resp, err\n\t\t}\n\n\t\tfor _, a := range res {\n\t\t\tl, err := mapFunc(a, fields)\n\t\t\tif err != nil {\n\t\t\t\treturn resp, errors.Wrapf(err, \"Failed to map brand for display.\")\n\t\t\t}\n\n\t\t\tresp = append(resp, l)\n\t\t}\n\n\t\treturn resp, nil\n\t}\n\n\tdt, err := datatable.New(ctx, w, r, h.Redis, fields, loadFunc)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif dt.HasCache() {\n\t\treturn nil\n\t}\n\n\tif ok, err := dt.Render(); ok {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\tdata[\"datatable\"] = dt.Response()\n\n\treturn h.Renderer.Render(ctx, w, r, TmplLayoutBase, \"report-withdrawals.gohtml\", web.MIMETextHTMLCharsetUTF8, http.StatusOK, data)\n}", "func ReadDeposits(rows *sql.Rows, a *Deposit) error {\n\treturn rows.Scan(&a.DID, &a.BID, &a.DEPID, &a.DPMID, &a.Dt, &a.Amount, &a.ClearedAmount, &a.FLAGS, &a.CreateTS, &a.CreateBy, &a.LastModTime, &a.LastModBy)\n}", "func (_Wmatic *WmaticTransactorSession) Deposit() (*types.Transaction, error) {\n\treturn _Wmatic.Contract.Deposit(&_Wmatic.TransactOpts)\n}", "func withdraw(ctx iscp.Sandbox) (dict.Dict, error) {\n\tstate := ctx.State()\n\tmustCheckLedger(state, \"accounts.withdraw.begin\")\n\n\tif ctx.Caller().Address().Equals(ctx.ChainID().AsAddress()) {\n\t\t// if the caller is on the same chain, do nothing\n\t\treturn nil, nil\n\t}\n\ttokensToWithdraw, ok := GetAccountBalances(state, ctx.Caller())\n\tif !ok {\n\t\t// empty balance, nothing to withdraw\n\t\treturn nil, nil\n\t}\n\t// will be sending back to default entry point\n\ta := assert.NewAssert(ctx.Log())\n\t// bring balances to the current account (owner's account). It is needed for subsequent Send call\n\ta.Require(MoveBetweenAccounts(state, ctx.Caller(), commonaccount.Get(ctx.ChainID()), tokensToWithdraw),\n\t\t\"accounts.withdraw.inconsistency. failed to move tokens to owner's account\")\n\n\t// add incoming tokens (after fees) to the balances to be withdrawn. Otherwise they would end up in the common account\n\ttokensToWithdraw.AddAll(ctx.IncomingTransfer())\n\t// Send call assumes tokens are in the current account\n\ta.Require(ctx.Send(ctx.Caller().Address(), tokensToWithdraw, &iscp.SendMetadata{\n\t\tTargetContract: ctx.Caller().Hname(),\n\t}), \"accounts.withdraw.inconsistency: failed sending tokens \")\n\n\tctx.Log().Debugf(\"accounts.withdraw.success. Sent to address %s\", tokensToWithdraw.String())\n\n\tmustCheckLedger(state, \"accounts.withdraw.exit\")\n\treturn nil, nil\n}", "func (c *DepositClient) Get(ctx context.Context, id int) (*Deposit, error) {\n\treturn c.Query().Where(deposit.ID(id)).Only(ctx)\n}", "func (k *Keeper) GetDeposit(ctx sdk.Context, address sdk.AccAddress) (deposit types.Deposit, found bool) {\n\tstore := k.Store(ctx)\n\n\tkey := types.DepositKey(address)\n\tvalue := store.Get(key)\n\tif value == nil {\n\t\treturn deposit, false\n\t}\n\n\tk.cdc.MustUnmarshalBinaryBare(value, &deposit)\n\treturn deposit, true\n}", "func (mapper GovMapper) GetDeposit(proposalID uint64, depositorAddr btypes.Address) (deposit gtypes.Deposit, exists bool) {\n\texists = mapper.Get(KeyDeposit(proposalID, depositorAddr), &deposit)\n\tif !exists {\n\t\treturn gtypes.Deposit{}, false\n\t}\n\n\treturn deposit, true\n}", "func (hs *HistoryService) Withdraw() (*CurrencyHistory, error) {\n\treturn hs.currency(\"withdraw\")\n}", "func (k Keeper) GetAllDeposit(ctx sdk.Context) (list []types.Deposit) {\n\tstore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DepositKey))\n\titerator := sdk.KVStorePrefixIterator(store, []byte{})\n\n\tdefer iterator.Close()\n\n\tfor ; iterator.Valid(); iterator.Next() {\n\t\tvar val types.Deposit\n\t\tk.cdc.MustUnmarshalBinaryBare(iterator.Value(), &val)\n\t\tlist = append(list, val)\n\t}\n\n\treturn\n}", "func (k Keeper) GetDeposit(ctx sdk.Context, id uint64) types.Deposit {\n\tstore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DepositKey))\n\tvar deposit types.Deposit\n\tk.cdc.MustUnmarshalBinaryBare(store.Get(GetDepositIDBytes(id)), &deposit)\n\treturn deposit\n}", "func QueryDeposits(rpcAddr string) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\trequest := common.GetInterxRequest(r)\n\t\tresponse := common.GetResponseFormat(request, rpcAddr)\n\t\tstatusCode := http.StatusOK\n\n\t\tcommon.GetLogger().Error(\"[query-deposits] Entering withdraws query\")\n\n\t\tif !common.RPCMethods[\"GET\"][config.QueryDeposits].Enabled {\n\t\t\tresponse.Response, response.Error, statusCode = common.ServeError(0, \"\", \"API disabled\", http.StatusForbidden)\n\t\t} else {\n\t\t\tif common.RPCMethods[\"GET\"][config.QueryDeposits].CachingEnabled {\n\t\t\t\tfound, cacheResponse, cacheError, cacheStatus := common.SearchCache(request, response)\n\t\t\t\tif found {\n\t\t\t\t\tresponse.Response, response.Error, statusCode = cacheResponse, cacheError, cacheStatus\n\t\t\t\t\tcommon.WrapResponse(w, request, *response, statusCode, false)\n\n\t\t\t\t\tcommon.GetLogger().Info(\"[query-deposits] Returning from the cache\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tresponse.Response, response.Error, statusCode = QueryBlockTransactionsHandler(rpcAddr, r, false)\n\t\t}\n\n\t\tcommon.WrapResponse(w, request, *response, statusCode, common.RPCMethods[\"GET\"][config.QueryStatus].CachingEnabled)\n\t}\n}", "func (plasma *Plasma) GetDeposit(plasmaBlockHeight *big.Int, nonce *big.Int) (plasmaTypes.Deposit, *big.Int, bool) {\n\tdeposit, err := plasma.Deposits(nil, nonce)\n\tif err != nil {\n\t\tlogger.Error(fmt.Sprintf(\"failed deposit retrieval: %s\", err))\n\t\treturn plasmaTypes.Deposit{}, nil, false\n\t}\n\n\tif deposit.CreatedAt.Sign() == 0 {\n\t\treturn plasmaTypes.Deposit{}, nil, false\n\t}\n\n\t// check the finality bound based off pegged ETH block\n\tethBlockNum, err := plasma.ethBlockPeg(plasmaBlockHeight)\n\tif err != nil {\n\t\tlogger.Error(fmt.Sprintf(\"could not get pegged ETH Block for sidechain block %s: %s\", plasmaBlockHeight, err))\n\t\treturn plasmaTypes.Deposit{}, nil, false\n\t}\n\n\t// how many blocks have occurred since deposit.\n\t// Note: Since pegged ETH block num could be before deposit's EthBlockNum, interval may be negative\n\tinterval := new(big.Int).Sub(ethBlockNum, deposit.EthBlockNum)\n\t// how many more blocks need to get added for deposit to be considered final\n\t// Note: If deposit is finalized, threshold can be 0 or negative\n\tthreshold := new(big.Int).Sub(big.NewInt(int64(plasma.finalityBound)), interval)\n\tif threshold.Sign() > 0 {\n\t\treturn plasmaTypes.Deposit{}, threshold, false\n\t}\n\n\treturn plasmaTypes.Deposit{\n\t\tOwner: deposit.Owner,\n\t\tAmount: deposit.Amount,\n\t\tEthBlockNum: deposit.EthBlockNum,\n\t}, threshold, true\n}", "func (s *Service) WithdrawDetail(c context.Context, mid int64) (upWithdraws []*model.UpIncomeWithdraw, err error) {\n\treturn s.dao.QueryUpWithdrawByMID(c, mid)\n}", "func (q queryServer) Deposit(ctx context.Context, req *v1.QueryDepositRequest) (*v1.QueryDepositResponse, error) {\n\tif req == nil {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"invalid request\")\n\t}\n\n\tif req.ProposalId == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"proposal id can not be 0\")\n\t}\n\n\tif req.Depositor == \"\" {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"empty depositor address\")\n\t}\n\n\tdepositor, err := q.k.authKeeper.AddressCodec().StringToBytes(req.Depositor)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdeposit, err := q.k.Deposits.Get(ctx, collections.Join(req.ProposalId, sdk.AccAddress(depositor)))\n\tif err != nil {\n\t\treturn nil, status.Error(codes.NotFound, err.Error())\n\t}\n\n\treturn &v1.QueryDepositResponse{Deposit: &deposit}, nil\n}", "func (hs *HistoryService) Deposit() (*CurrencyHistory, error) {\n\treturn hs.currency(\"deposit\")\n}", "func (_XStaking *XStakingFilterer) FilterWithdrawn(opts *bind.FilterOpts, user []common.Address) (*XStakingWithdrawnIterator, error) {\n\n\tvar userRule []interface{}\n\tfor _, userItem := range user {\n\t\tuserRule = append(userRule, userItem)\n\t}\n\n\tlogs, sub, err := _XStaking.contract.FilterLogs(opts, \"Withdrawn\", userRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &XStakingWithdrawnIterator{contract: _XStaking.contract, event: \"Withdrawn\", logs: logs, sub: sub}, nil\n}", "func (_TokenStakingEscrow *TokenStakingEscrowFilterer) FilterRevokedDepositWithdrawn(opts *bind.FilterOpts, operator []common.Address, grantManager []common.Address) (*TokenStakingEscrowRevokedDepositWithdrawnIterator, error) {\n\n\tvar operatorRule []interface{}\n\tfor _, operatorItem := range operator {\n\t\toperatorRule = append(operatorRule, operatorItem)\n\t}\n\tvar grantManagerRule []interface{}\n\tfor _, grantManagerItem := range grantManager {\n\t\tgrantManagerRule = append(grantManagerRule, grantManagerItem)\n\t}\n\n\tlogs, sub, err := _TokenStakingEscrow.contract.FilterLogs(opts, \"RevokedDepositWithdrawn\", operatorRule, grantManagerRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &TokenStakingEscrowRevokedDepositWithdrawnIterator{contract: _TokenStakingEscrow.contract, event: \"RevokedDepositWithdrawn\", logs: logs, sub: sub}, nil\n}", "func (s *server) Deposit(ctx context.Context, request *event.DepositParam) (*event.Response, error) {\n\treturn &event.Response{Status: int32(200), Message: string(\"Deposit\"), Data: []*event.Deposit{}}, nil\n}", "func (e Exchange) WithdrawalFiatFunds(exch, bankaccountid string, request *withdraw.FiatRequest) (string, error) {\n\tex, err := e.GetExchange(exch)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tv, err := engine.Bot.Config.GetBankAccountByID(bankaccountid)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\totp, err := engine.GetExchangeoOTPByName(exch)\n\tif err == nil {\n\t\totpValue, errParse := strconv.ParseInt(otp, 10, 64)\n\t\tif errParse != nil {\n\t\t\treturn \"\", errors.New(\"failed to generate OTP unable to continue\")\n\t\t}\n\t\trequest.GenericInfo.OneTimePassword = otpValue\n\t}\n\trequest.BankAccountName = v.AccountName\n\trequest.BankAccountNumber = v.AccountNumber\n\trequest.BankName = v.BankName\n\trequest.BankAddress = v.BankAddress\n\trequest.BankCity = v.BankPostalCity\n\trequest.BankCountry = v.BankCountry\n\trequest.BankPostalCode = v.BankPostalCode\n\trequest.BSB = v.BSBNumber\n\trequest.SwiftCode = v.SWIFTCode\n\trequest.IBAN = v.IBAN\n\n\terr = withdraw.Valid(request)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn ex.WithdrawFiatFunds(request)\n}", "func WithdrawByUid(uid int) []Withdraw {\n\tvar with []Withdraw\n\tfor _, v := range AllWithdraw() {\n\t\tif v.Uid == uid {\n\t\t\twith = append(with, v)\n\t\t}\n\t}\n\tSliceReverse(with)\n\treturn with\n}", "func (_Lmc *LmcFilterer) FilterWithdrawn(opts *bind.FilterOpts, user []common.Address) (*LmcWithdrawnIterator, error) {\n\n\tvar userRule []interface{}\n\tfor _, userItem := range user {\n\t\tuserRule = append(userRule, userItem)\n\t}\n\n\tlogs, sub, err := _Lmc.contract.FilterLogs(opts, \"Withdrawn\", userRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &LmcWithdrawnIterator{contract: _Lmc.contract, event: \"Withdrawn\", logs: logs, sub: sub}, nil\n}", "func deposit(res http.ResponseWriter, req *http.Request){\n\tvar result Account\n\n\tcollection := client.Database(Database).Collection(Collection)\n\tparams := url_parser(req.URL.String())\n\tfilter := bson.D{{\"identifier\", clean_string(params[\"account\"])}}\n\terr := collection.FindOne(context.TODO(), filter).Decode(&result)\n\t\n\tchange, err := strconv.ParseFloat(clean_string(params[\"deposit\"]), 64)\n\t\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tinitial, err := strconv.ParseFloat(result.Balance, 64)\n\tupdated := strconv.FormatFloat((initial + change), 'f', -1, 64)\n\tresult.Balance = updated\n\tif err != nil{\n\t\tfmt.Println(err)\n\t}\n\tentry, err := bson.Marshal(result)\n\t_ , err = collection.ReplaceOne(context.TODO(), filter, entry)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tgenerate_record(clean_string(params[\"account\"]), updated, \"+\"+clean_string(params[\"deposit\"]), \"deposit\")\n}", "func (sc stakingClient) Deposit(fromInfo keys.Info, passWd, coinsStr, memo string, accNum, seqNum uint64) (\n\tresp sdk.TxResponse, err error) {\n\tif err = params.CheckKeyParams(fromInfo, passWd); err != nil {\n\t\treturn\n\t}\n\n\tcoin, err := sdk.ParseDecCoin(coinsStr)\n\tif err != nil {\n\t\treturn resp, fmt.Errorf(\"failed : parse Coins [%s] error: %s\", coinsStr, err)\n\t}\n\n\tmsg := types.NewMsgDeposit(fromInfo.GetAddress(), coin)\n\n\treturn sc.BuildAndBroadcast(fromInfo.GetName(), passWd, memo, []sdk.Msg{msg}, accNum, seqNum)\n}", "func (mapper GovMapper) RefundDeposits(ctx context.Context, proposalID uint64, burnDeposit bool) {\n\n\tlog := ctx.Logger()\n\tparams := mapper.GetParams(ctx)\n\taccountMapper := ctx.Mapper(account.AccountMapperName).(*account.AccountMapper)\n\tdepositsIterator := mapper.GetDeposits(proposalID)\n\tdefer depositsIterator.Close()\n\tfor ; depositsIterator.Valid(); depositsIterator.Next() {\n\t\tdeposit := &gtypes.Deposit{}\n\t\tmapper.GetCodec().MustUnmarshalBinaryBare(depositsIterator.Value(), deposit)\n\n\t\tdepositAmount := int64(deposit.Amount)\n\n\t\t//需要扣除部分押金时\n\t\tburnAmount := int64(0)\n\t\tif burnDeposit {\n\t\t\tburnAmount = params.BurnRate.Mul(types.NewDec(depositAmount)).TruncateInt64()\n\t\t}\n\n\t\trefundAmount := depositAmount - burnAmount\n\n\t\t// refund deposit\n\t\tdepositor := accountMapper.GetAccount(deposit.Depositor).(*types.QOSAccount)\n\t\tdepositor.PlusQOS(btypes.NewInt(refundAmount))\n\t\taccountMapper.SetAccount(depositor)\n\n\t\t// burn deposit\n\t\tif burnDeposit {\n\t\t\tecomapper.GetDistributionMapper(ctx).AddToCommunityFeePool(btypes.NewInt(burnAmount))\n\t\t}\n\n\t\tlog.Debug(\"RefundDeposits\", \"depositAmount\", depositAmount, \"refundAmount\", refundAmount, \"burnAmount\", burnAmount)\n\n\t\tmapper.Del(depositsIterator.Key())\n\t}\n}", "func (h *Handle) Deposit() {\n\tvar result types.HexNumber\n\tvalue, _ := new(big.Int).SetString(\"20123456789000000000000000000\", 0)\n\taccount := types.Str2Address(\"0x1b978a1d302335a6f2ebe4b8823b5e17c3c84135\")\n\terr := tokenA.Deposit.SendTransaction(&result, account, value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlog.Println(result)\n}", "func (_IWETH *IWETHTransactor) Deposit(opts *bind.TransactOpts) (*types.Transaction, error) {\r\n\treturn _IWETH.contract.Transact(opts, \"deposit\")\r\n}", "func (h *HUOBIHADAX) GetDepositWithdrawalHistory(associatedID, currency string, isDeposit bool, size int64) ([]History, error) {\n\tvar resp = struct {\n\t\tResponse\n\t\tData []History `json:\"data\"`\n\t}{}\n\n\tvals := url.Values{}\n\n\tif isDeposit {\n\t\tvals.Set(\"type\", \"deposit\")\n\t} else {\n\t\tvals.Set(\"type\", \"withdraw\")\n\t}\n\n\tvals.Set(\"from\", associatedID)\n\tvals.Set(\"size\", strconv.FormatInt(size, 10))\n\tvals.Set(\"currency\", common.StringToLower(currency))\n\n\terr := h.SendAuthenticatedHTTPRequest(http.MethodGet,\n\t\thuobiHadaxDepositAddress,\n\t\tvals,\n\t\t&resp)\n\n\tif resp.ErrorMessage != \"\" {\n\t\treturn resp.Data, errors.New(resp.ErrorMessage)\n\t}\n\treturn resp.Data, err\n}", "func (s *Service) UpWithdraw(c context.Context, dateVersion string, from, limit int) (count int, upAccounts []*model.UpAccount, err error) {\n\tcount, err = s.dao.GetUpAccountCount(c, dateVersion)\n\tif err != nil {\n\t\tlog.Error(\"s.dao.GetUpAccountCount error(%v)\", err)\n\t\treturn\n\t}\n\tif count <= 0 {\n\t\treturn\n\t}\n\n\tupAccounts, err = s.dao.QueryUpAccountByDate(c, dateVersion, from, limit)\n\tif err != nil {\n\t\tlog.Error(\"s.dao.QueryUpAccountByDate error(%v)\", err)\n\t\treturn\n\t}\n\tif len(upAccounts) == 0 {\n\t\treturn\n\t}\n\n\tmids := make([]int64, len(upAccounts))\n\tfor i, up := range upAccounts {\n\t\tmids[i] = up.MID\n\t}\n\n\t// get up_income_withdraw by mids and date\n\tupIncomeWithdrawMap, err := s.dao.QueryUpWithdrawByMids(c, mids, dateVersion)\n\tif err != nil {\n\t\tlog.Error(\"s.dao.QueryUpWithdrawByMids error(%v)\", err)\n\t\treturn\n\t}\n\n\tfor _, up := range upAccounts {\n\t\tif _, ok := upIncomeWithdrawMap[up.MID]; !ok {\n\t\t\tupIncomeWithdraw := &model.UpIncomeWithdraw{\n\t\t\t\tMID: up.MID,\n\t\t\t\tWithdrawIncome: up.TotalUnwithdrawIncome,\n\t\t\t\tDateVersion: dateVersion,\n\t\t\t\tState: _withdrawing,\n\t\t\t}\n\n\t\t\terr = s.InsertUpWithdrawRecord(c, upIncomeWithdraw)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"s.InsertUpWithdrawRecord error(%v)\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (_IWETH *IWETHSession) Withdraw(arg0 *big.Int) (*types.Transaction, error) {\r\n\treturn _IWETH.Contract.Withdraw(&_IWETH.TransactOpts, arg0)\r\n}", "func (k Keeper) Withdraw(ctx sdk.Context, depositor sdk.AccAddress, amount sdk.Coin) error {\n\tdeposit, found := k.GetDeposit(ctx, depositor, amount.Denom)\n\tif !found {\n\t\treturn sdkerrors.Wrapf(types.ErrDepositNotFound, \"no %s deposit found for %s\", amount.Denom, depositor)\n\t}\n\tif !deposit.Amount.IsGTE(amount) {\n\t\treturn sdkerrors.Wrapf(types.ErrInvalidWithdrawAmount, \"%s>%s\", amount, deposit.Amount)\n\t}\n\n\terr := k.supplyKeeper.SendCoinsFromModuleToAccount(ctx, types.ModuleAccountName, depositor, sdk.NewCoins(amount))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx.EventManager().EmitEvent(\n\t\tsdk.NewEvent(\n\t\t\ttypes.EventTypeHarvestWithdrawal,\n\t\t\tsdk.NewAttribute(sdk.AttributeKeyAmount, amount.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyDepositor, depositor.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyDepositDenom, amount.Denom),\n\t\t),\n\t)\n\n\tif deposit.Amount.IsEqual(amount) {\n\t\tctx.EventManager().EmitEvent(\n\t\t\tsdk.NewEvent(\n\t\t\t\ttypes.EventTypeDeleteHarvestDeposit,\n\t\t\t\tsdk.NewAttribute(types.AttributeKeyDepositor, depositor.String()),\n\t\t\t\tsdk.NewAttribute(types.AttributeKeyDepositDenom, amount.Denom),\n\t\t\t),\n\t\t)\n\t\tk.DeleteDeposit(ctx, deposit)\n\t\treturn nil\n\t}\n\n\tdeposit.Amount = deposit.Amount.Sub(amount)\n\tk.SetDeposit(ctx, deposit)\n\n\treturn nil\n}", "func (_SingleAuto *SingleAutoFilterer) FilterWithdraw(opts *bind.FilterOpts, user []common.Address, pid []*big.Int) (*SingleAutoWithdrawIterator, error) {\n\n\tvar userRule []interface{}\n\tfor _, userItem := range user {\n\t\tuserRule = append(userRule, userItem)\n\t}\n\tvar pidRule []interface{}\n\tfor _, pidItem := range pid {\n\t\tpidRule = append(pidRule, pidItem)\n\t}\n\n\tlogs, sub, err := _SingleAuto.contract.FilterLogs(opts, \"Withdraw\", userRule, pidRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &SingleAutoWithdrawIterator{contract: _SingleAuto.contract, event: \"Withdraw\", logs: logs, sub: sub}, nil\n}", "func (o *Account) GetAllowDepositsOk() (*bool, bool) {\n\tif o == nil || o.AllowDeposits == nil {\n\t\treturn nil, false\n\t}\n\treturn o.AllowDeposits, true\n}", "func (s *Service) WithdrawSuccess(c context.Context, orderNo int64, tradeStatus int) (err error) {\n\tupWithdraw, err := s.dao.QueryUpWithdrawByID(c, orderNo)\n\tif err != nil {\n\t\tlog.Error(\"s.dao.QueryUpWithdrawByID error(%v)\", err)\n\t\treturn\n\t}\n\n\tif tradeStatus != _withdrawSuccess {\n\t\tlog.Info(\"param tradeStatus(%d) != withdraw success(2)\", tradeStatus)\n\t\treturn\n\t}\n\n\tif upWithdraw.State == _withdrawSuccess {\n\t\tlog.Info(\"withdraw has successed already\")\n\t\treturn\n\t}\n\n\ttx, err := s.dao.BeginTran(c)\n\tif err != nil {\n\t\tlog.Error(\"s.dao.BeginTran error(%v)\", err)\n\t\treturn\n\t}\n\n\t// update up_income_withdraw state\n\trows, err := s.dao.TxUpdateUpWithdrawState(tx, orderNo, _withdrawSuccess)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\tlog.Error(\"s.dao.UpdateUpWithdrawState error(%v)\", err)\n\t\treturn\n\t}\n\tif rows != 1 {\n\t\ttx.Rollback()\n\t\tlog.Error(\"s.dao.UpdateUpWithdrawState Update withdraw record error id(%d)\", orderNo)\n\t\treturn\n\t}\n\n\t// update up_account withdraw\n\trows, err = s.dao.TxUpdateUpAccountWithdraw(tx, upWithdraw.MID, upWithdraw.WithdrawIncome)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\tlog.Error(\"s.dao.UpdateUpAccountWithdraw error(%v)\", err)\n\t\treturn\n\t}\n\tif rows != 1 {\n\t\ttx.Rollback()\n\t\tlog.Error(\"s.dao.UpdateUpAccountWithdraw Update up account record error id(%d)\", orderNo)\n\t\treturn\n\t}\n\n\tmaxUpWithdrawDateVersion, err := s.dao.TxQueryMaxUpWithdrawDateVersion(tx, upWithdraw.MID)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\tlog.Error(\"s.dao.QueryMaxUpWithdrawDateVersion error(%v)\", err)\n\t\treturn\n\t}\n\n\ttime := 0\n\tvar version int64\n\tfor {\n\t\tversion, err = s.dao.TxQueryUpAccountVersion(tx, upWithdraw.MID)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\tlog.Error(\"s.dao.QueryUpAccountVersion error(%v)\", err)\n\t\t\treturn\n\t\t}\n\t\tif maxUpWithdrawDateVersion == \"\" {\n\t\t\tmaxUpWithdrawDateVersion = upWithdraw.DateVersion\n\t\t}\n\n\t\trows, err = s.dao.TxUpdateUpAccountUnwithdrawIncome(tx, upWithdraw.MID, maxUpWithdrawDateVersion, version)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\tlog.Error(\"s.dao.UpdateUpAccountUnwithdrawIncome error(%v)\", err)\n\t\t\treturn\n\t\t}\n\t\tif rows == 1 {\n\t\t\tif err = tx.Commit(); err != nil {\n\t\t\t\tlog.Error(\"tx.Commit error\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\ttime++\n\t\tif time >= 10 {\n\t\t\ttx.Rollback()\n\t\t\tlog.Info(\"try to synchronize unwithdraw income 10 times error mid(%d)\", upWithdraw.MID)\n\t\t\terr = fmt.Errorf(\"try to synchronize unwithdraw income 10 times error mid(%d)\", upWithdraw.MID)\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn\n}", "func (_Wmatic *WmaticTransactor) Deposit(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Wmatic.contract.Transact(opts, \"deposit\")\n}", "func withdraw(res http.ResponseWriter, req *http.Request){\n\tvar result Account\n\n\tcollection := client.Database(Database).Collection(Collection)\n\tparams := url_parser(req.URL.String())\n\tfilter := bson.D{{\"identifier\", clean_string(params[\"account\"])}}\n\terr := collection.FindOne(context.TODO(), filter).Decode(&result)\n\t\n\tchange, err := strconv.ParseFloat(clean_string(params[\"withdrawl\"]), 64)\n\t\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tinitial, err := strconv.ParseFloat(result.Balance, 64)\n\tupdated := strconv.FormatFloat((initial - change), 'f', -1, 64)\n\tresult.Balance = updated\n\n\tif err != nil{\n\t\tfmt.Println(err)\n\t}\n\tentry, err := bson.Marshal(result)\n\t_ , err = collection.ReplaceOne(context.TODO(), filter, entry)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tgenerate_record(clean_string(params[\"account\"]), updated, \"-\"+clean_string(params[\"withdrawl\"]), \"withdrawl\")\n}", "func (b *BanknoteDataService) Withdrawal(value int) (*[]models.BanknoteData, error) {\n\tbanknoteDatas := make([]models.BanknoteData, 0)\n\tvalueLeftOver := value\n\tavailableBankNotesValues := b.WithdrawalRepository.GetAvailableBanknotesValues()\n\tlog.Debugf(\"Retrieved available bank notes: %v\", availableBankNotesValues)\n\thelpers.Slice{}.SortDesc(availableBankNotesValues)\n\tfor _, availableBanknote := range availableBankNotesValues {\n\t\tresidual := valueLeftOver % availableBanknote\n\t\tif residual != valueLeftOver {\n\t\t\tquantity := valueLeftOver / availableBanknote\n\t\t\tbanknoteDatas = append(banknoteDatas, models.BanknoteData{Value: int16(availableBanknote), Quantity: int16(quantity)})\n\t\t\tvalueLeftOver = residual\n\t\t}\n\t}\n\n\tif valueLeftOver > 0 {\n\t\tlog.Errorf(\"Value not supported. Value: %v | Left Over: %v\", value, valueLeftOver)\n\t\treturn nil, &exceptions.UnsupportedValueError{\n\t\t\tReason: fmt.Sprintf(\"Withdrawal not supported for value, left over: %v\", value),\n\t\t}\n\t}\n\n\treturn &banknoteDatas, nil\n}", "func (s *Store) GetDepositInfoOfKittyID(kittyID string) ([]DepositInfo, error) {\n\tvar dpis []DepositInfo\n\n\tif err := s.db.View(func(tx *bolt.Tx) error {\n\t\tboundAddr, err := s.GetKittyBindAddress(kittyID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar txns []string\n\t\tif err := dbutil.GetBucketObject(tx, TxsBkt, boundAddr.Address, &txns); err != nil {\n\t\t\tswitch err.(type) {\n\t\t\tcase dbutil.ObjectNotExistErr:\n\t\t\tdefault:\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif len(txns) == 0 {\n\t\t\tdpis = append(dpis, DepositInfo{\n\t\t\t\tStatus: StatusWaitDeposit,\n\t\t\t\tDepositAddress: boundAddr.Address,\n\t\t\t\tKittyID: kittyID,\n\t\t\t\tUpdatedAt: time.Now().UTC().Unix(),\n\t\t\t\tCoinType: boundAddr.CoinType,\n\t\t\t})\n\t\t}\n\n\t\tfor _, txn := range txns {\n\t\t\tvar dpi DepositInfo\n\t\t\tif err := dbutil.GetBucketObject(tx, DepositInfoBkt, txn, &dpi); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tdpis = append(dpis, dpi)\n\t\t}\n\n\t\treturn nil\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// sort the dpis by update time\n\tsort.Slice(dpis, func(i, j int) bool {\n\t\treturn dpis[i].UpdatedAt < dpis[j].UpdatedAt\n\t})\n\n\t// renumber the seqs in the dpis\n\tfor i := range dpis {\n\t\tdpis[i].Seq = uint64(i)\n\t}\n\n\treturn dpis, nil\n}", "func (_TokenStakingEscrow *TokenStakingEscrowSession) DepositWithdrawnAmount(operator common.Address) (*big.Int, error) {\n\treturn _TokenStakingEscrow.Contract.DepositWithdrawnAmount(&_TokenStakingEscrow.CallOpts, operator)\n}", "func (_OracleMgr *OracleMgrCaller) GetDepositAtIndex(opts *bind.CallOpts, index *big.Int) (struct {\n\tAmount *big.Int\n\tAvailableAt *big.Int\n}, error) {\n\tret := new(struct {\n\t\tAmount *big.Int\n\t\tAvailableAt *big.Int\n\t})\n\tout := ret\n\terr := _OracleMgr.contract.Call(opts, out, \"getDepositAtIndex\", index)\n\treturn *ret, err\n}", "func (c *Client) Withdraw(ctx context.Context, foreignID string, amount float64, currency, address string) (Withdrawal, error) {\n\treqBody := map[string]interface{}{\n\t\t\"foreign_id\": foreignID,\n\t\t\"amount\": amount,\n\t\t\"currency\": currency,\n\t\t\"address\": address,\n\t}\n\n\treqJSON, err := json.Marshal(reqBody)\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"request body marshaling error: %w\", err)\n\t}\n\n\twithdrawalURL, err := joinURL(c.api, withdrawalEndpoint)\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"request url creating error: %w\", err)\n\t}\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, withdrawalURL.String(), bytes.NewBuffer(reqJSON))\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"request creating error: %w\", err)\n\t}\n\n\tsig, err := createHmac(c.secret, reqJSON)\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"hmac signature creationg error: %w\", err)\n\t}\n\n\treq.Header.Set(contentTypeHeader, jsonContentType)\n\treq.Header.Set(keyHeader, c.apiKey)\n\treq.Header.Set(signatureHeader, sig)\n\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"request error: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\terr = ensureSuccessResponse(resp)\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"request failed: %w\", err)\n\t}\n\n\trespBody := struct {\n\t\tData Withdrawal `json:\"data\"`\n\t}{}\n\n\terr = json.NewDecoder(resp.Body).Decode(&respBody)\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"response unmarshaling error: %w\", err)\n\t}\n\n\treturn respBody.Data, nil\n}", "func createDepositAndHotWallet(peatioClient *peatio.Client, currencies []string, opendaxAddr string) *mngapi.APIError {\n\t// Shared params\n\tparams := peatio.CreateWalletParams{\n\t\tBlockchainKey: \"opendax-cloud\",\n\t\tGateway: \"opendax_cloud\",\n\t\tAddress: \"address\",\n\t\tCurrencies: currencies,\n\t\tStatus: \"active\",\n\t\tSettings: peatio.Settings{\n\t\t\tURI: fmt.Sprintf(\"%v/api/v2/opx/peatio\", opendaxAddr),\n\t\t},\n\t}\n\t// Create Deposit Wallet\n\tdepositWalletParams := params\n\tdepositWalletParams.Kind = \"deposit\"\n\tdepositWalletParams.Name = fmt.Sprintf(\"%s Deposit Wallet\", strings.ToUpper(currencies[0]))\n\n\t_, depositApiError := peatioClient.CreateWallet(depositWalletParams)\n\tif depositApiError != nil {\n\t\tlog.Printf(\"ERROR: createWallets: Can't create deposit wallet. Error: %v. Errors: %v\", depositApiError.Error, depositApiError.Errors)\n\t\treturn depositApiError\n\t}\n\n\t// Create Hot Wallet\n\thotWalletParams := params\n\thotWalletParams.Kind = \"hot\"\n\thotWalletParams.Name = fmt.Sprintf(\"%s Hot Wallet\", strings.ToUpper(currencies[0]))\n\n\t_, hotApiError := peatioClient.CreateWallet(hotWalletParams)\n\tif hotApiError != nil {\n\t\tlog.Printf(\"ERROR: createWallets: Can't create deposit wallet. Error: %v. Errors: %v\", hotApiError.Error, hotApiError.Errors)\n\t\treturn hotApiError\n\t}\n\n\treturn nil\n}", "func (as *ApiService) Deposits(currency, status string, startAt, endAt int64, pagination *PaginationParam) (*ApiResponse, error) {\n\tp := map[string]string{}\n\tif currency != \"\" {\n\t\tp[\"currency\"] = currency\n\t}\n\tif status != \"\" {\n\t\tp[\"status\"] = status\n\t}\n\tif startAt > 0 {\n\t\tp[\"startAt\"] = IntToString(startAt)\n\t}\n\tif endAt > 0 {\n\t\tp[\"endAt\"] = IntToString(endAt)\n\t}\n\tpagination.ReadParam(p)\n\treq := NewRequest(http.MethodGet, \"/api/v1/deposits\", p)\n\treturn as.Call(req)\n}", "func (_WELV9 *WELV9Transactor) Deposit(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _WELV9.contract.Transact(opts, \"deposit\")\n}", "func (_Depositmanager *DepositmanagerCaller) PendingDeposits(opts *bind.CallOpts, arg0 *big.Int) ([32]byte, error) {\n\tvar (\n\t\tret0 = new([32]byte)\n\t)\n\tout := ret0\n\terr := _Depositmanager.contract.Call(opts, out, \"pendingDeposits\", arg0)\n\treturn *ret0, err\n}", "func (_Depositmanager *DepositmanagerSession) PendingDeposits(arg0 *big.Int) ([32]byte, error) {\n\treturn _Depositmanager.Contract.PendingDeposits(&_Depositmanager.CallOpts, arg0)\n}", "func (_IWETH *IWETHTransactorSession) Withdraw(arg0 *big.Int) (*types.Transaction, error) {\r\n\treturn _IWETH.Contract.Withdraw(&_IWETH.TransactOpts, arg0)\r\n}", "func (_TokenStakingEscrow *TokenStakingEscrowCaller) HasDeposit(opts *bind.CallOpts, operator common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _TokenStakingEscrow.contract.Call(opts, out, \"hasDeposit\", operator)\n\treturn *ret0, err\n}", "func (p *DirectBuy) Deposits() <-chan DepositInfo {\n\treturn p.deposits\n}", "func TestGetBTCDepositStatus(t *testing.T) {\n\n\ta := InitApp(\"https://mbank.dl-dev.ru/api/\")\n\n\tstat, err := a.GetBTCDepositStatus(\"tb1qtfnwald5a667730yqrvdt67aslmgn3k7qykq5a\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif stat == nil {\n\t\tt.Errorf(\"Empty stat\")\n\t}\n\n\tstat, err = a.GetBTCDepositStatus(\"saawdadadw\")\n\tif err.Error() != \"Address not found\" {\n\t\tt.Fatal(err)\n\t\tt.Errorf(\"Cannot found err\")\n\t}\n\n}", "func DepositByUid(uid int) []Deposit {\n\tvar depo []Deposit\n\tfor _, v := range AllDeposit() {\n\t\tif v.Uid == uid {\n\t\t\tdepo = append(depo, v)\n\t\t}\n\t}\n\tSliceReverse(depo)\n\treturn depo\n}", "func (t *SimpleChaincode) getTransactions(stub shim.ChaincodeStubInterface, finInst string) ([]byte, error) {\n\n\tvar res AllTransactions\n\n\tfmt.Println(\"Start find getTransactions\")\n\tfmt.Println(\"Looking for \" + finInst)\n\n\t//get the AllTransactions index\n\tallTxAsBytes, err := stub.GetState(\"allTx\")\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to get all Transactions\")\n\t}\n\n\tvar txs AllTransactions\n\tjson.Unmarshal(allTxAsBytes, &txs)\n\n\tfor i := range txs.Transactions {\n\n\t\tif txs.Transactions[i].Sender == finInst {\n\t\t\tres.Transactions = append(res.Transactions, txs.Transactions[i])\n\t\t}\n\n\t\tif txs.Transactions[i].Receiver == finInst {\n\t\t\tres.Transactions = append(res.Transactions, txs.Transactions[i])\n\t\t}\n\n\t\tif finInst == AUDITOR {\n\t\t\tres.Transactions = append(res.Transactions, txs.Transactions[i])\n\t\t}\n\t}\n\n\tresAsBytes, _ := json.Marshal(res)\n\n\treturn resAsBytes, nil\n\n}", "func (_Depositmanager *DepositmanagerCallerSession) PendingDeposits(arg0 *big.Int) ([32]byte, error) {\n\treturn _Depositmanager.Contract.PendingDeposits(&_Depositmanager.CallOpts, arg0)\n}", "func (e *Event) GetWithdrawals() *Withdrawals {\n\te.RLock()\n\tdefer e.RUnlock()\n\treturn e.withdrawals\n}", "func (_Rootchain *RootchainTransactorSession) Deposit(_depositTx []byte) (*types.Transaction, error) {\n\treturn _Rootchain.Contract.Deposit(&_Rootchain.TransactOpts, _depositTx)\n}", "func (sc stakingClient) Withdraw(fromInfo keys.Info, passWd, coinsStr, memo string, accNum, seqNum uint64) (\n\tresp sdk.TxResponse, err error) {\n\tif err = params.CheckKeyParams(fromInfo, passWd); err != nil {\n\t\treturn\n\t}\n\n\tcoin, err := sdk.ParseDecCoin(coinsStr)\n\tif err != nil {\n\t\treturn resp, fmt.Errorf(\"failed : parse Coins [%s] error: %s\", coinsStr, err)\n\t}\n\n\tmsg := types.NewMsgWithdraw(fromInfo.GetAddress(), coin)\n\n\treturn sc.BuildAndBroadcast(fromInfo.GetName(), passWd, memo, []sdk.Msg{msg}, accNum, seqNum)\n\n}", "func GetWithdraws(queryRoute string, cdc *codec.Codec) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"withdraws\",\n\t\tShort: \"Get withdraw list by page and limit\",\n\t\tExample: \"withdraws --page=1 --limit=10\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\n\t\t\t// parse inputs\n\t\t\tpage, limit, err := helpers.ParsePaginationParams(viper.GetString(flags.FlagPage), viper.GetString(flags.FlagLimit), helpers.ParamTypeCliFlag)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// prepare request\n\t\t\treq := types.WithdrawsReq{\n\t\t\t\tPage: page,\n\t\t\t\tLimit: limit,\n\t\t\t}\n\n\t\t\tbz, err := cliCtx.Codec.MarshalJSON(req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// query and parse the result\n\t\t\tres, _, err := cliCtx.QueryWithData(fmt.Sprintf(\"custom/%s/%s\", queryRoute, types.QueryWithdraws), bz)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tvar out types.Withdraws\n\t\t\tcdc.MustUnmarshalJSON(res, &out)\n\n\t\t\treturn cliCtx.PrintOutput(out)\n\t\t},\n\t}\n\thelpers.AddPaginationCmdFlags(cmd)\n\n\treturn cmd\n}", "func (_Wmatic *WmaticFilterer) FilterWithdrawal(opts *bind.FilterOpts, src []common.Address) (*WmaticWithdrawalIterator, error) {\n\n\tvar srcRule []interface{}\n\tfor _, srcItem := range src {\n\t\tsrcRule = append(srcRule, srcItem)\n\t}\n\n\tlogs, sub, err := _Wmatic.contract.FilterLogs(opts, \"Withdrawal\", srcRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &WmaticWithdrawalIterator{contract: _Wmatic.contract, event: \"Withdrawal\", logs: logs, sub: sub}, nil\n}", "func (_TokenStakingEscrow *TokenStakingEscrowCallerSession) DepositWithdrawnAmount(operator common.Address) (*big.Int, error) {\n\treturn _TokenStakingEscrow.Contract.DepositWithdrawnAmount(&_TokenStakingEscrow.CallOpts, operator)\n}", "func (_Rootchain *RootchainSession) Deposit(_depositTx []byte) (*types.Transaction, error) {\n\treturn _Rootchain.Contract.Deposit(&_Rootchain.TransactOpts, _depositTx)\n}", "func (_WELV9 *WELV9Filterer) FilterWithdrawal(opts *bind.FilterOpts, src []common.Address) (*WELV9WithdrawalIterator, error) {\n\n\tvar srcRule []interface{}\n\tfor _, srcItem := range src {\n\t\tsrcRule = append(srcRule, srcItem)\n\t}\n\n\tlogs, sub, err := _WELV9.contract.FilterLogs(opts, \"Withdrawal\", srcRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &WELV9WithdrawalIterator{contract: _WELV9.contract, event: \"Withdrawal\", logs: logs, sub: sub}, nil\n}", "func (q queryServer) Deposits(ctx context.Context, req *v1.QueryDepositsRequest) (*v1.QueryDepositsResponse, error) {\n\tif req == nil {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"invalid request\")\n\t}\n\n\tif req.ProposalId == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"proposal id can not be 0\")\n\t}\n\n\tvar deposits []*v1.Deposit\n\tdeposits, pageRes, err := query.CollectionPaginate(ctx, q.k.Deposits, req.Pagination, func(_ collections.Pair[uint64, sdk.AccAddress], deposit v1.Deposit) (*v1.Deposit, error) {\n\t\treturn &deposit, nil\n\t}, query.WithCollectionPaginationPairPrefix[uint64, sdk.AccAddress](req.ProposalId))\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\treturn &v1.QueryDepositsResponse{Deposits: deposits, Pagination: pageRes}, nil\n}", "func Withdraw(accID string, amount int64) error {\n\tif amount <= 0 {\n\t\treturn fmt.Errorf(\"invalid amount; %d\", amount)\n\t}\n\n\tvar accs []*share.Account\n\terr := client.GetByNames(ctx, share.KindAccount, []string{accID, \"Cash\"}, &accs)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"get accounts error; %v\", err)\n\t}\n\n\tif accs[0].Balance < amount {\n\t\treturn fmt.Errorf(\"balance of account %s is %d, not enough for withdraw %d\", accID, accs[0].Balance, amount)\n\t}\n\n\taccs[0].Balance -= amount\n\taccs[1].Balance += amount\n\ttrans := []*share.Transaction{\n\t\t{Type: share.TransactionTypeWithdraw, AccountID: accID, Amount: -amount},\n\t\t{Type: share.TransactionTypeDeposit, AccountID: \"Cash\", Amount: amount},\n\t}\n\tfor _, tran := range trans {\n\t\ttran.NewKey(share.KindTransaction)\n\t}\n\terr = client.SaveModels(ctx, \"\", []interface{}{accs[0], accs[1], trans[0], trans[1]})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"save models error; %v\", err)\n\t}\n\treturn nil\n}", "func (sc Funcs) Deposit(ctx wasmlib.ScFuncClientContext) *DepositCall {\n\treturn &DepositCall{Func: wasmlib.NewScFunc(ctx, HScName, HFuncDeposit)}\n}", "func (_Withdrawable *WithdrawableSession) GetDepositedBalance(arg0 common.Address, arg1 common.Address) (*big.Int, error) {\n\treturn _Withdrawable.Contract.GetDepositedBalance(&_Withdrawable.CallOpts, arg0, arg1)\n}", "func (_Vault *VaultFilterer) FilterWithdraw(opts *bind.FilterOpts) (*VaultWithdrawIterator, error) {\n\n\tlogs, sub, err := _Vault.contract.FilterLogs(opts, \"Withdraw\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &VaultWithdrawIterator{contract: _Vault.contract, event: \"Withdraw\", logs: logs, sub: sub}, nil\n}", "func (_Wmatic *WmaticSession) Withdraw(wad *big.Int) (*types.Transaction, error) {\n\treturn _Wmatic.Contract.Withdraw(&_Wmatic.TransactOpts, wad)\n}", "func createWithdrawalTx(t *testing.T, pool *Pool, store *txstore.Store, inputAmounts []int64,\n\toutputAmounts []int64) *withdrawalTx {\n\tnet := pool.Manager().ChainParams()\n\ttx := newWithdrawalTx()\n\t_, credits := TstCreateCredits(t, pool, inputAmounts, store)\n\tfor _, c := range credits {\n\t\ttx.addInput(c)\n\t}\n\tfor i, amount := range outputAmounts {\n\t\trequest := TstNewOutputRequest(\n\t\t\tt, uint32(i), \"34eVkREKgvvGASZW7hkgE2uNc1yycntMK6\", btcutil.Amount(amount), net)\n\t\ttx.addOutput(request)\n\t}\n\treturn tx\n}", "func (_EtherDelta *EtherDeltaSession) Deposit() (*types.Transaction, error) {\n\treturn _EtherDelta.Contract.Deposit(&_EtherDelta.TransactOpts)\n}", "func (_Smartchef *SmartchefSession) Deposit(_amount *big.Int) (*types.Transaction, error) {\n\treturn _Smartchef.Contract.Deposit(&_Smartchef.TransactOpts, _amount)\n}", "func ListBalance() (map[string]*BalanceResp, error) {\n\tbalances := make(map[string]*BalanceResp)\n\tbalances[\"john\"] = &BalanceResp{}\n\tbalances[\"kelvin\"] = &BalanceResp{}\n\tbalances[\"jun\"] = &BalanceResp{}\n\n\tvar assets []AccountResp\n\terr := DAO.DB().Model(&DAO.Transaction{}).\n\t\tSelect(\"spender as name, sum(price) as amount\").\n\t\tWhere(\"status = 1\").\n\t\tGroup(\"spender\").\n\t\tFind(&assets).Error\n\tif err != nil {\n\t\treturn nil, &Error{500, \"DB Error\"}\n\t}\n\n\tvar liabs []AccountResp\n\terr = DAO.DB().Model(&DAO.Transaction{}).\n\t\tSelect(\"liabilities.payer as name, sum(liabilities.amount) as amount\").\n\t\tJoins(\"left join liabilities on transactions.id = liabilities.transaction_id \").\n\t\tWhere(\"transactions.status = 1\").\n\t\tGroup(\"liabilities.payer\").\n\t\tFind(&liabs).Error\n\tif err != nil {\n\t\treturn nil, &Error{500, \"DB Error\"}\n\t}\n\n\tfor _, a := range assets {\n\t\tperson := balances[a.Name]\n\t\tperson.Asset = a.Amount\n\t}\n\tfor _, l := range liabs {\n\t\tperson := balances[l.Name]\n\t\tperson.Deficit = l.Amount\n\t}\n\tfor _, person := range balances {\n\t\tperson.Balance = person.Asset - person.Deficit\n\t}\n\n\treturn balances, nil\n}", "func (_Smartchef *SmartchefFilterer) FilterWithdraw(opts *bind.FilterOpts, user []common.Address) (*SmartchefWithdrawIterator, error) {\n\n\tvar userRule []interface{}\n\tfor _, userItem := range user {\n\t\tuserRule = append(userRule, userItem)\n\t}\n\n\tlogs, sub, err := _Smartchef.contract.FilterLogs(opts, \"Withdraw\", userRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &SmartchefWithdrawIterator{contract: _Smartchef.contract, event: \"Withdraw\", logs: logs, sub: sub}, nil\n}", "func (s service) findFunding(model documents.Model, fundingID string) (data Data, err error) {\n\tidx, err := extensions.FindAttributeSetIDX(model, fundingID, AttrFundingLabel, agreementIDLabel, fundingFieldKey)\n\tif err != nil {\n\t\treturn data, err\n\t}\n\treturn s.deriveFundingData(model, idx)\n}", "func Deposit(amount int) {\n\tdeposits <- amount\n}", "func (_DelegateProfile *DelegateProfileSession) Withdraw() (*types.Transaction, error) {\n\treturn _DelegateProfile.Contract.Withdraw(&_DelegateProfile.TransactOpts)\n}", "func Deposito(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"Application-json\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tdefer r.Body.Close()\n\tdeposit := models.Transaccion{}\n\n\tjson.NewDecoder(r.Body).Decode(&deposit)\n\tlog.Println(deposit)\n\n\ttsql := fmt.Sprintf(\"exec SP_DEPOSITO '%d', '%s', %f\", deposit.NoCuenta, deposit.TipoTran, deposit.Monto)\n\tQuery, err := db.Query(tsql)\n\n\tif err == nil {\n\t\tnotification := models.Notification{\n\t\t\tNoCuenta: deposit.NoCuenta,\n\t\t\tMonto: deposit.Monto,\n\t\t\tRazon: \"Transaccion realizada exitosamente\",\n\t\t\tStatus: true,\n\t\t}\n\n\t\tjsonresult, _ := json.Marshal(notification)\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write(jsonresult)\n\t\treturn\n\t}\n\n\tif err.Error() == help.ErrorCuentaNotFound {\n\t\tnotification := models.Notification{\n\t\t\tNoCuenta: deposit.NoCuenta,\n\t\t\tMonto: deposit.Monto,\n\t\t\tRazon: \"El numero de cuenta proporcionado no es válido\",\n\t\t\tStatus: false,\n\t\t}\n\n\t\tjsonresult, _ := json.Marshal(notification)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write(jsonresult)\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\tlog.Println(\"+++ Error no controlado: \", err.Error(), \"+++\")\n\t\treturn\n\t}\n\n\tdefer Query.Close()\n}", "func (_Smartchef *SmartchefSession) Withdraw(_amount *big.Int) (*types.Transaction, error) {\n\treturn _Smartchef.Contract.Withdraw(&_Smartchef.TransactOpts, _amount)\n}", "func (s *State) returnDeposit(tx *types.Transaction, height uint32) {\n\tvar inputValue common.Fixed64\n\tfor _, input := range tx.Inputs {\n\t\tinputValue += s.DepositOutputs[input.ReferKey()]\n\t}\n\n\treturnAction := func(producer *Producer) {\n\t\ts.history.Append(height, func() {\n\t\t\tif height >= s.chainParams.CRVotingStartHeight {\n\t\t\t\tproducer.depositAmount -= inputValue\n\t\t\t}\n\t\t\tproducer.state = Returned\n\t\t}, func() {\n\t\t\tif height >= s.chainParams.CRVotingStartHeight {\n\t\t\t\tproducer.depositAmount += inputValue\n\t\t\t}\n\t\t\tproducer.state = Canceled\n\t\t})\n\t}\n\n\tfor _, program := range tx.Programs {\n\t\tpk := program.Code[1 : len(program.Code)-1]\n\t\tif producer := s.getProducer(pk); producer != nil && producer.state == Canceled {\n\t\t\treturnAction(producer)\n\t\t}\n\t}\n}", "func (k *Kraken) WithdrawFiatFunds(withdrawRequest *exchange.WithdrawRequest) (string, error) {\n\treturn k.WithdrawCryptocurrencyFunds(withdrawRequest)\n}", "func (_PBridge *PBridgeFilterer) FilterDepositFunds(opts *bind.FilterOpts) (*PBridgeDepositFundsIterator, error) {\n\n\tlogs, sub, err := _PBridge.contract.FilterLogs(opts, \"DepositFunds\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &PBridgeDepositFundsIterator{contract: _PBridge.contract, event: \"DepositFunds\", logs: logs, sub: sub}, nil\n}", "func (deposit *Deposit) ValidateDeposit() (map[string]interface{}, bool) {\n\n\tif deposit.Amount <= 0 {\n\t\treturn u.Message(false, \"Amount is required\"), false\n\t}\n\n\tif deposit.FormaPago == \"\" {\n\t\treturn u.Message(false, \"FormaPago is required\"), false\n\t}\n\n\t//check client in DB\n\t_, err := ExistClientIdentificationDB(deposit.Clientidentificationcard)\n\tif err == gorm.ErrRecordNotFound {\n\t\treturn u.Message(false, \"Client exist no in DB\"), false\n\t}\n\n\treturn u.Message(false, \"Requirement passed\"), true\n}", "func Withdraw(amount int) bool {\n\tch := make(chan bool)\n\twithdrawals <- Withdrawal{amount, ch}\n\treturn <-ch\n}", "func (c *Client) AccountGetDepositHistory(currency string) ([]TransactionHistoryDescription, error) {\r\n\tdefer c.clearError()\r\n\r\n\tparams := map[string]string{\r\n\t\t\"apikey\": c.apiKey,\r\n\t}\r\n\r\n\tif currency != \"\" {\r\n\t\tparams[\"currency\"] = currency\r\n\t}\r\n\r\n\tvar parsedResponse *baseResponse\r\n\r\n\tparsedResponse = c.sendRequest(\"account/getdeposithistory\", params)\r\n\r\n\tif c.err != nil {\r\n\t\treturn nil, c.err\r\n\t}\r\n\r\n\tif parsedResponse.Success != true {\r\n\t\tc.setError(\"api error - account/getdeposithistory\", parsedResponse.Message)\r\n\t\treturn nil, c.err\r\n\t}\r\n\r\n\tvar response []TransactionHistoryDescription\r\n\r\n\tif err := json.Unmarshal(parsedResponse.Result, &response); err != nil {\r\n\t\tc.setError(\"api error - account/getdeposithistory\", err.Error())\r\n\t\treturn nil, c.err\r\n\t}\r\n\r\n\t//clean out responses with nil values.\r\n\tvar cleanedResponse []TransactionHistoryDescription\r\n\tdefaultVal := TransactionHistoryDescription{}\r\n\r\n\tfor _, curVal := range response {\r\n\t\tif curVal != defaultVal {\r\n\t\t\tcleanedResponse = append(cleanedResponse, curVal)\r\n\t\t}\r\n\t}\r\n\r\n\tif len(cleanedResponse) == 0 && len(response) != 0 {\r\n\t\tc.setError(\"validate response\", \"all historical deposits had empty values\")\r\n\t\treturn nil, c.err\r\n\t}\r\n\r\n\treturn cleanedResponse, nil\r\n}" ]
[ "0.63003683", "0.62463695", "0.58435166", "0.5834678", "0.5790308", "0.57658595", "0.5760613", "0.5757181", "0.5652355", "0.56248325", "0.5602636", "0.5565634", "0.5551058", "0.5546448", "0.5528693", "0.55241525", "0.55012304", "0.54893017", "0.5451733", "0.5433997", "0.5417753", "0.54050535", "0.5398466", "0.53463346", "0.53443074", "0.53419834", "0.5340173", "0.5323149", "0.5320608", "0.5302444", "0.52983356", "0.5290557", "0.5280322", "0.52760065", "0.52756315", "0.52688545", "0.5261992", "0.52578723", "0.52116436", "0.51902807", "0.5188839", "0.51647025", "0.5161433", "0.5155896", "0.51489115", "0.5137915", "0.5116621", "0.510288", "0.51010066", "0.5091166", "0.5075896", "0.50724566", "0.50664014", "0.50639147", "0.50625354", "0.50595045", "0.50535434", "0.5043624", "0.5041782", "0.50376964", "0.5031931", "0.5028133", "0.5026003", "0.5010418", "0.5004965", "0.5004762", "0.49996197", "0.49963683", "0.49915218", "0.4977721", "0.49770862", "0.49759227", "0.49735883", "0.49733263", "0.49715322", "0.49646205", "0.49604726", "0.4950119", "0.4937686", "0.4936609", "0.49215266", "0.4915346", "0.49111176", "0.4910533", "0.48986003", "0.48930123", "0.4889961", "0.48860222", "0.48854575", "0.48786828", "0.4877222", "0.48707867", "0.48681462", "0.48632148", "0.48630244", "0.48627818", "0.48619905", "0.48597455", "0.4859145", "0.48556587" ]
0.7214689
0
SendHTTPRequest sends an unauthenticated HTTP request
func (h *HUOBI) SendHTTPRequest(ctx context.Context, ep exchange.URL, path string, result interface{}) error { endpoint, err := h.API.Endpoints.GetURL(ep) if err != nil { return err } var tempResp json.RawMessage item := &request.Item{ Method: http.MethodGet, Path: endpoint + path, Result: &tempResp, Verbose: h.Verbose, HTTPDebugging: h.HTTPDebugging, HTTPRecording: h.HTTPRecording, } err = h.SendPayload(ctx, request.Unset, func() (*request.Item, error) { return item, nil }, request.UnauthenticatedRequest) if err != nil { return err } var errCap errorCapture if err := json.Unmarshal(tempResp, &errCap); err == nil { if errCap.ErrMsgType1 != "" { return fmt.Errorf("error code: %v error message: %s", errCap.CodeType1, errors.New(errCap.ErrMsgType1)) } if errCap.ErrMsgType2 != "" { return fmt.Errorf("error code: %v error message: %s", errCap.CodeType2, errors.New(errCap.ErrMsgType2)) } } return json.Unmarshal(tempResp, result) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func SendHTTPRequest(method, url, token string, body []byte) ([]byte, *http.Response, error) {\n\treq, err := http.NewRequest(method, url, bytes.NewBuffer(body))\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"Bearer %s\", token))\n\n\tclient := &http.Client{}\n\n\tresponse, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer response.Body.Close()\n\n\tbody, readErr := ioutil.ReadAll(response.Body)\n\tif readErr != nil {\n\t\tlog.Fatal(readErr)\n\t}\n\n\treturn body, response, nil\n}", "func (c *Coinbene) SendAuthHTTPRequest(ep exchange.URL, method, path, epPath string, isSwap bool,\n\tparams, result interface{}, f request.EndpointLimit) error {\n\tif !c.AllowAuthenticatedRequest() {\n\t\treturn fmt.Errorf(\"%s %w\", c.Name, exchange.ErrAuthenticatedRequestWithoutCredentialsSet)\n\t}\n\tendpoint, err := c.API.Endpoints.GetURL(ep)\n\tif err != nil {\n\t\treturn err\n\t}\n\tauthPath := coinbeneAuthPath\n\tif isSwap {\n\t\tauthPath = coinbeneSwapAuthPath\n\t}\n\tnow := time.Now()\n\ttimestamp := now.UTC().Format(\"2006-01-02T15:04:05.999Z\")\n\tvar finalBody io.Reader\n\tvar preSign string\n\tswitch {\n\tcase params != nil && method == http.MethodGet:\n\t\tp, ok := params.(url.Values)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"params is not of type url.Values\")\n\t\t}\n\t\tpreSign = timestamp + method + authPath + epPath + \"?\" + p.Encode()\n\t\tpath = common.EncodeURLValues(path, p)\n\tcase params != nil:\n\t\tvar i interface{}\n\t\tswitch p := params.(type) {\n\t\tcase url.Values:\n\t\t\tm := make(map[string]string)\n\t\t\tfor k, v := range p {\n\t\t\t\tm[k] = strings.Join(v, \"\")\n\t\t\t}\n\t\t\ti = m\n\t\tdefault:\n\t\t\ti = p\n\t\t}\n\t\ttempBody, err := json.Marshal(i)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfinalBody = bytes.NewBufferString(string(tempBody))\n\t\tpreSign = timestamp + method + authPath + epPath + string(tempBody)\n\tdefault:\n\t\tpreSign = timestamp + method + authPath + epPath\n\t}\n\ttempSign := crypto.GetHMAC(crypto.HashSHA256,\n\t\t[]byte(preSign),\n\t\t[]byte(c.API.Credentials.Secret))\n\theaders := make(map[string]string)\n\theaders[\"Content-Type\"] = \"application/json\"\n\theaders[\"ACCESS-KEY\"] = c.API.Credentials.Key\n\theaders[\"ACCESS-SIGN\"] = crypto.HexEncodeToString(tempSign)\n\theaders[\"ACCESS-TIMESTAMP\"] = timestamp\n\n\tvar resp json.RawMessage\n\terrCap := struct {\n\t\tCode int `json:\"code\"`\n\t\tMessage string `json:\"message\"`\n\t}{}\n\n\t// Expiry of timestamp doesn't appear to be documented, so making a reasonable assumption\n\tctx, cancel := context.WithDeadline(context.Background(), now.Add(15*time.Second))\n\tdefer cancel()\n\tif err := c.SendPayload(ctx, &request.Item{\n\t\tMethod: method,\n\t\tPath: endpoint + path,\n\t\tHeaders: headers,\n\t\tBody: finalBody,\n\t\tResult: &resp,\n\t\tAuthRequest: true,\n\t\tVerbose: c.Verbose,\n\t\tHTTPDebugging: c.HTTPDebugging,\n\t\tHTTPRecording: c.HTTPRecording,\n\t\tEndpoint: f,\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tif err := json.Unmarshal(resp, &errCap); err == nil {\n\t\tif errCap.Code != 200 && errCap.Message != \"\" {\n\t\t\treturn errors.New(errCap.Message)\n\t\t}\n\t}\n\treturn json.Unmarshal(resp, result)\n}", "func (r *Responder) NetworkAuthenticationRequired() { r.write(http.StatusNetworkAuthenticationRequired) }", "func (c *HTTPClient) DoUnauthenticated(ctx context.Context, method string, path string, params map[string]string, data interface{}, result interface{}) (statusCode int, err error) {\n\tc.l.Lock()\n\tdefer c.l.Unlock()\n\n\treq, err := c.prepareRequest(method, path, params, data)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn c.do(ctx, req, result, true, false, false)\n}", "func sendRequest(req *http.Request, credentials Credentials) ([]byte, error) {\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Connection\", \"close\")\n\treq.Header.Add(\"Authorization\", credentials.AuthToken)\n\n\tresp, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\treturn data, err\n}", "func (h *HitBTC) SendAuthenticatedHTTPRequest(ctx context.Context, ep exchange.URL, method, endpoint string, values url.Values, f request.EndpointLimit, result interface{}) error {\n\tcreds, err := h.GetCredentials(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tePoint, err := h.API.Endpoints.GetURL(ep)\n\tif err != nil {\n\t\treturn err\n\t}\n\theaders := make(map[string]string)\n\theaders[\"Authorization\"] = \"Basic \" + crypto.Base64Encode([]byte(creds.Key+\":\"+creds.Secret))\n\n\tpath := fmt.Sprintf(\"%s/%s\", ePoint, endpoint)\n\n\titem := &request.Item{\n\t\tMethod: method,\n\t\tPath: path,\n\t\tHeaders: headers,\n\t\tResult: result,\n\t\tVerbose: h.Verbose,\n\t\tHTTPDebugging: h.HTTPDebugging,\n\t\tHTTPRecording: h.HTTPRecording,\n\t}\n\n\treturn h.SendPayload(ctx, f, func() (*request.Item, error) {\n\t\titem.Body = bytes.NewBufferString(values.Encode())\n\t\treturn item, nil\n\t}, request.AuthenticatedRequest)\n}", "func (p *Poloniex) SendHTTPRequest(ctx context.Context, ep exchange.URL, path string, result interface{}) error {\n\tendpoint, err := p.API.Endpoints.GetURL(ep)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\titem := &request.Item{\n\t\tMethod: http.MethodGet,\n\t\tPath: endpoint + path,\n\t\tResult: result,\n\t\tVerbose: p.Verbose,\n\t\tHTTPDebugging: p.HTTPDebugging,\n\t\tHTTPRecording: p.HTTPRecording,\n\t}\n\n\treturn p.SendPayload(ctx, request.Unset, func() (*request.Item, error) {\n\t\treturn item, nil\n\t}, request.UnauthenticatedRequest)\n}", "func SendHTTPRequest(ctx context.Context, method, urlPath string, headers map[string]string, body io.Reader, verbose bool) ([]byte, error) {\n\tmethod = strings.ToUpper(method)\n\n\tif method != http.MethodOptions && method != http.MethodGet &&\n\t\tmethod != http.MethodHead && method != http.MethodPost &&\n\t\tmethod != http.MethodPut && method != http.MethodDelete &&\n\t\tmethod != http.MethodTrace && method != http.MethodConnect {\n\t\treturn nil, errors.New(\"invalid HTTP method specified\")\n\t}\n\n\treq, err := http.NewRequestWithContext(ctx, method, urlPath, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor k, v := range headers {\n\t\treq.Header.Add(k, v)\n\t}\n\n\tif verbose {\n\t\tlog.Debugf(log.Global, \"Request path: %s\", urlPath)\n\t\tfor k, d := range req.Header {\n\t\t\tlog.Debugf(log.Global, \"Request header [%s]: %s\", k, d)\n\t\t}\n\t\tlog.Debugf(log.Global, \"Request type: %s\", method)\n\t\tif body != nil {\n\t\t\tlog.Debugf(log.Global, \"Request body: %v\", body)\n\t\t}\n\t}\n\n\tm.RLock()\n\tif _HTTPUserAgent != \"\" && req.Header.Get(\"User-Agent\") == \"\" {\n\t\treq.Header.Add(\"User-Agent\", _HTTPUserAgent)\n\t}\n\n\tif _HTTPClient == nil {\n\t\tm.RUnlock()\n\t\tm.Lock()\n\t\t// Set *http.Client with default timeout if not populated.\n\t\t_HTTPClient = NewHTTPClientWithTimeout(defaultTimeout)\n\t\tm.Unlock()\n\t\tm.RLock()\n\t}\n\n\tresp, err := _HTTPClient.Do(req)\n\tm.RUnlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tcontents, err := io.ReadAll(resp.Body)\n\n\tif verbose {\n\t\tlog.Debugf(log.Global, \"HTTP status: %s, Code: %v\",\n\t\t\tresp.Status,\n\t\t\tresp.StatusCode)\n\t\tlog.Debugf(log.Global, \"Raw response: %s\", string(contents))\n\t}\n\n\treturn contents, err\n}", "func (h *HUOBIHADAX) SendHTTPRequest(path string, result interface{}) error {\n\treturn h.SendPayload(http.MethodGet, path, nil, nil, result, false, false, h.Verbose, h.HTTPDebugging)\n}", "func (rs *RequestSender) send(req *http.Request) (res *http.Response, err error) {\n\tif req.URL == nil {\n\t\treturn nil, ErrURLUnset\n\t}\n\tif rs.tracer != nil {\n\t\ttf := rs.tracer.Start(req)\n\t\tif tf != nil {\n\t\t\tdefer func() { tf.Finish(req, res, err) }()\n\t\t}\n\t}\n\tres, err = rs.client.Do(req)\n\treturn\n}", "func (h *unauthorizedHandler) ServeHTTP(w stdhttp.ResponseWriter, r *stdhttp.Request) {\n\tw.WriteHeader(stdhttp.StatusUnauthorized)\n}", "func (s *Standard) sendRequest(context service.Context, requestMethod string, requestURL string, responseObject interface{}) error {\n\n\tserverToken := s.serverToken()\n\tif serverToken == \"\" {\n\t\treturn errors.Newf(\"client\", \"unable to obtain server token for %s %s\", requestMethod, requestURL)\n\t}\n\n\trequest, err := http.NewRequest(requestMethod, requestURL, nil)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"client\", \"unable to create new request for %s %s\", requestMethod, requestURL)\n\t}\n\n\tif err = service.CopyRequestTrace(context.Request(), request); err != nil {\n\t\treturn errors.Wrapf(err, \"client\", \"unable to copy request trace\")\n\t}\n\n\trequest.Header.Add(TidepoolAuthenticationTokenHeaderName, serverToken)\n\n\tresponse, err := s.httpClient.Do(request)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"client\", \"unable to perform request %s %s\", requestMethod, requestURL)\n\t}\n\tdefer response.Body.Close()\n\n\tswitch response.StatusCode {\n\tcase http.StatusOK:\n\t\tif responseObject != nil {\n\t\t\tif err = json.NewDecoder(response.Body).Decode(responseObject); err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"client\", \"error decoding JSON response from %s %s\", request.Method, request.URL.String())\n\t\t\t}\n\t\t}\n\t\treturn nil\n\tcase http.StatusUnauthorized:\n\t\treturn NewUnauthorizedError()\n\tdefault:\n\t\treturn NewUnexpectedResponseError(response, request)\n\t}\n}", "func unauthorized(rw http.ResponseWriter, r *http.Request) {\n\n}", "func (by *Bybit) SendUSDCAuthHTTPRequest(ctx context.Context, ePath exchange.URL, method, path string, data interface{}, result UnmarshalTo, f request.EndpointLimit) error {\n\tcreds, err := by.GetCredentials(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif result == nil {\n\t\tresult = &USDCError{}\n\t}\n\n\tendpointPath, err := by.API.Endpoints.GetURL(ePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = by.SendPayload(ctx, f, func() (*request.Item, error) {\n\t\tnowTimeInMilli := strconv.FormatInt(time.Now().UnixMilli(), 10)\n\t\theaders := make(map[string]string)\n\t\tvar payload, hmacSigned []byte\n\n\t\tif data != nil {\n\t\t\td, ok := data.(map[string]interface{})\n\t\t\tif !ok {\n\t\t\t\treturn nil, common.GetTypeAssertError(\"map[string]interface{}\", data)\n\t\t\t}\n\t\t\tpayload, err = json.Marshal(d)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tsignInput := nowTimeInMilli + creds.Key + defaultRecvWindow + string(payload)\n\t\thmacSigned, err = crypto.GetHMAC(crypto.HashSHA256, []byte(signInput), []byte(creds.Secret))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\theaders[\"Content-Type\"] = \"application/json\"\n\t\theaders[\"X-BAPI-API-KEY\"] = creds.Key\n\t\theaders[\"X-BAPI-SIGN\"] = crypto.HexEncodeToString(hmacSigned)\n\t\theaders[\"X-BAPI-SIGN-TYPE\"] = \"2\"\n\t\theaders[\"X-BAPI-TIMESTAMP\"] = nowTimeInMilli\n\t\theaders[\"X-BAPI-RECV-WINDOW\"] = defaultRecvWindow\n\n\t\treturn &request.Item{\n\t\t\tMethod: method,\n\t\t\tPath: endpointPath + path,\n\t\t\tHeaders: headers,\n\t\t\tBody: bytes.NewBuffer(payload),\n\t\t\tResult: &result,\n\t\t\tVerbose: by.Verbose,\n\t\t\tHTTPDebugging: by.HTTPDebugging,\n\t\t\tHTTPRecording: by.HTTPRecording}, nil\n\t}, request.AuthenticatedRequest)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn result.GetError(true)\n}", "func SendHttpRequest(method, url, payload string) (int, string, error) {\n\treturn util.SendHttpRequest(\n\t\tmethod,\n\t\turl,\n\t\tpayload,\n\t\t[]string{access.CLIENT_CERT_SN_KEY, TestOperatorSerialNumber})\n}", "func (d *Dao) doHTTPRequest(c context.Context, uri, ip string, params url.Values, res interface{}) (err error) {\n\tenc, err := d.sign(params)\n\tif err != nil {\n\t\terr = pkgerr.Wrapf(err, \"uri:%s,params:%v\", uri, params)\n\t\treturn\n\t}\n\tif enc != \"\" {\n\t\turi = uri + \"?\" + enc\n\t}\n\n\treq, err := xhttp.NewRequest(xhttp.MethodGet, uri, nil)\n\tif err != nil {\n\t\terr = pkgerr.Wrapf(err, \"method:%s,uri:%s\", xhttp.MethodGet, uri)\n\t\treturn\n\t}\n\treq.Header.Set(_userAgent, \"[email protected] \"+env.AppID)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn d.client.Do(c, req, res)\n}", "func (h *HUOBIHADAX) SendAuthenticatedHTTPRequest(method, endpoint string, values url.Values, result interface{}) error {\n\tif !h.AuthenticatedAPISupport {\n\t\treturn fmt.Errorf(exchange.WarningAuthenticatedRequestWithoutCredentialsSet, h.Name)\n\t}\n\n\tvalues.Set(\"AccessKeyId\", h.APIKey)\n\tvalues.Set(\"SignatureMethod\", \"HmacSHA256\")\n\tvalues.Set(\"SignatureVersion\", \"2\")\n\tvalues.Set(\"Timestamp\", time.Now().UTC().Format(\"2006-01-02T15:04:05\"))\n\n\tendpoint = fmt.Sprintf(\"/v%s/%s\", huobihadaxAPIVersion, endpoint)\n\tpayload := fmt.Sprintf(\"%s\\napi.hadax.com\\n%s\\n%s\",\n\t\tmethod, endpoint, values.Encode())\n\n\theaders := make(map[string]string)\n\theaders[\"Content-Type\"] = \"application/x-www-form-urlencoded\"\n\n\thmac := common.GetHMAC(common.HashSHA256, []byte(payload), []byte(h.APISecret))\n\tvalues.Set(\"Signature\", common.Base64Encode(hmac))\n\n\turlPath := common.EncodeURLValues(fmt.Sprintf(\"%s%s\", h.APIUrl, endpoint),\n\t\tvalues)\n\treturn h.SendPayload(method, urlPath, headers, bytes.NewBufferString(\"\"), result, true, false, h.Verbose, h.HTTPDebugging)\n}", "func (p *Poloniex) SendAuthenticatedHTTPRequest(ctx context.Context, ep exchange.URL, method, endpoint string, values url.Values, result interface{}) error {\n\tcreds, err := p.GetCredentials(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tePoint, err := p.API.Endpoints.GetURL(ep)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn p.SendPayload(ctx, request.Unset, func() (*request.Item, error) {\n\t\theaders := make(map[string]string)\n\t\theaders[\"Content-Type\"] = \"application/x-www-form-urlencoded\"\n\t\theaders[\"Key\"] = creds.Key\n\t\tvalues.Set(\"nonce\", p.Requester.GetNonce(true).String())\n\t\tvalues.Set(\"command\", endpoint)\n\n\t\thmac, err := crypto.GetHMAC(crypto.HashSHA512,\n\t\t\t[]byte(values.Encode()),\n\t\t\t[]byte(creds.Secret))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\theaders[\"Sign\"] = crypto.HexEncodeToString(hmac)\n\n\t\tpath := fmt.Sprintf(\"%s/%s\", ePoint, poloniexAPITradingEndpoint)\n\t\treturn &request.Item{\n\t\t\tMethod: method,\n\t\t\tPath: path,\n\t\t\tHeaders: headers,\n\t\t\tBody: bytes.NewBufferString(values.Encode()),\n\t\t\tResult: result,\n\t\t\tNonceEnabled: true,\n\t\t\tVerbose: p.Verbose,\n\t\t\tHTTPDebugging: p.HTTPDebugging,\n\t\t\tHTTPRecording: p.HTTPRecording,\n\t\t}, nil\n\t}, request.AuthenticatedRequest)\n}", "func TestRequestURL_BasicAuth_NoCreds(t *testing.T) {\n\tmockClient := &mockAuthHttpClient{}\n\tc := &httpClient{Client: mockClient}\n\tu := &url.URL{Scheme: \"http\", Host: \"localhost\", Path: \"/\"}\n\tresp, err := c.RequestURL(u)\n\tif err != nil {\n\t\tt.Errorf(\"Got error: %v\", err)\n\t}\n\tif resp == nil {\n\t\tt.Errorf(\"Got nil response!\")\n\t}\n\tif resp.StatusCode != 401 {\n\t\tt.Errorf(\"Got non-401 response code: %d\", resp.StatusCode)\n\t}\n}", "func (r *Responder) ProxyAuthRequired() { r.write(http.StatusProxyAuthRequired) }", "func (h *HUOBI) SendAuthenticatedHTTPRequest(ctx context.Context, ep exchange.URL, method, endpoint string, values url.Values, data, result interface{}, isVersion2API bool) error {\n\tvar err error\n\tcreds, err := h.GetCredentials(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tePoint, err := h.API.Endpoints.GetURL(ep)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif values == nil {\n\t\tvalues = url.Values{}\n\t}\n\n\tinterim := json.RawMessage{}\n\tnewRequest := func() (*request.Item, error) {\n\t\tvalues.Set(\"AccessKeyId\", creds.Key)\n\t\tvalues.Set(\"SignatureMethod\", \"HmacSHA256\")\n\t\tvalues.Set(\"SignatureVersion\", \"2\")\n\t\tvalues.Set(\"Timestamp\", time.Now().UTC().Format(\"2006-01-02T15:04:05\"))\n\n\t\tif isVersion2API {\n\t\t\tendpoint = \"/v\" + huobiAPIVersion2 + endpoint\n\t\t} else {\n\t\t\tendpoint = \"/v\" + huobiAPIVersion + endpoint\n\t\t}\n\n\t\tpayload := fmt.Sprintf(\"%s\\napi.huobi.pro\\n%s\\n%s\",\n\t\t\tmethod, endpoint, values.Encode())\n\n\t\theaders := make(map[string]string)\n\n\t\tif method == http.MethodGet {\n\t\t\theaders[\"Content-Type\"] = \"application/x-www-form-urlencoded\"\n\t\t} else {\n\t\t\theaders[\"Content-Type\"] = \"application/json\"\n\t\t}\n\n\t\tvar hmac []byte\n\t\thmac, err = crypto.GetHMAC(crypto.HashSHA256,\n\t\t\t[]byte(payload),\n\t\t\t[]byte(creds.Secret))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvalues.Set(\"Signature\", crypto.Base64Encode(hmac))\n\t\turlPath := ePoint + common.EncodeURLValues(endpoint, values)\n\n\t\tvar body []byte\n\t\tif data != nil {\n\t\t\tbody, err = json.Marshal(data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\treturn &request.Item{\n\t\t\tMethod: method,\n\t\t\tPath: urlPath,\n\t\t\tHeaders: headers,\n\t\t\tBody: bytes.NewReader(body),\n\t\t\tResult: &interim,\n\t\t\tVerbose: h.Verbose,\n\t\t\tHTTPDebugging: h.HTTPDebugging,\n\t\t\tHTTPRecording: h.HTTPRecording,\n\t\t}, nil\n\t}\n\n\terr = h.SendPayload(ctx, request.Unset, newRequest, request.AuthenticatedRequest)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif isVersion2API {\n\t\tvar errCap ResponseV2\n\t\tif err = json.Unmarshal(interim, &errCap); err == nil {\n\t\t\tif errCap.Code != 200 && errCap.Message != \"\" {\n\t\t\t\treturn fmt.Errorf(\"%w error code: %v error message: %s\", request.ErrAuthRequestFailed, errCap.Code, errCap.Message)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tvar errCap Response\n\t\tif err = json.Unmarshal(interim, &errCap); err == nil {\n\t\t\tif errCap.Status == huobiStatusError && errCap.ErrorMessage != \"\" {\n\t\t\t\treturn fmt.Errorf(\"%w error code: %v error message: %s\", request.ErrAuthRequestFailed, errCap.ErrorCode, errCap.ErrorMessage)\n\t\t\t}\n\t\t}\n\t}\n\terr = json.Unmarshal(interim, result)\n\tif err != nil {\n\t\treturn common.AppendError(err, request.ErrAuthRequestFailed)\n\t}\n\treturn nil\n}", "func (h *HitBTC) SendHTTPRequest(ctx context.Context, ep exchange.URL, path string, result interface{}) error {\n\tendpoint, err := h.API.Endpoints.GetURL(ep)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\titem := &request.Item{\n\t\tMethod: http.MethodGet,\n\t\tPath: endpoint + path,\n\t\tResult: result,\n\t\tVerbose: h.Verbose,\n\t\tHTTPDebugging: h.HTTPDebugging,\n\t\tHTTPRecording: h.HTTPRecording,\n\t}\n\n\treturn h.SendPayload(ctx, marketRequests, func() (*request.Item, error) {\n\t\treturn item, nil\n\t}, request.UnauthenticatedRequest)\n}", "func (h *httpCloud) sendHTTPRequest(requestType string, url string, requestBody io.Reader) ([]byte, error) {\n\treq, err := http.NewRequest(requestType, url, requestBody)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient := &http.Client{\n\t\tTransport: http.DefaultTransport,\n\t\tTimeout: HttpProviderTimeout,\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn body, nil\n\t}\n}", "func (h *HTTPHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\tvar err error\n\n\tvar paramsPrefix string = http.CanonicalHeaderKey(\"X-UrlFetch-\")\n\tparams := http.Header{}\n\tfor key, values := range req.Header {\n\t\tif strings.HasPrefix(key, paramsPrefix) {\n\t\t\tparams[key] = values\n\t\t}\n\t}\n\n\tfor key := range params {\n\t\treq.Header.Del(key)\n\t}\n\n\tif h.Authenticator != nil {\n\t\tauth := req.Header.Get(\"Proxy-Authorization\")\n\t\tif auth == \"\" {\n\t\t\th.ProxyAuthorizationRequired(rw, req)\n\t\t\treturn\n\t\t}\n\n\t\tparts := strings.SplitN(auth, \" \", 2)\n\t\tif len(parts) == 2 {\n\t\t\tswitch parts[0] {\n\t\t\tcase \"Basic\":\n\t\t\t\tif auth, err := base64.StdEncoding.DecodeString(parts[1]); err == nil {\n\t\t\t\t\tparts := strings.Split(string(auth), \":\")\n\t\t\t\t\tusername := parts[0]\n\t\t\t\t\tpassword := parts[1]\n\n\t\t\t\t\tif err := h.Authenticator.Authenticate(username, password); err != nil {\n\t\t\t\t\t\thttp.Error(rw, \"403 Forbidden\", http.StatusForbidden)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tglog.Errorf(\"Unrecognized auth type: %#v\", parts[0])\n\t\t\t\thttp.Error(rw, \"403 Forbidden\", http.StatusForbidden)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\treq.Header.Del(\"Proxy-Authorization\")\n\t}\n\n\tif req.Method == http.MethodConnect {\n\t\thost, port, err := net.SplitHostPort(req.Host)\n\t\tif err != nil {\n\t\t\thost = req.Host\n\t\t\tport = \"443\"\n\t\t}\n\n\t\tglog.Infof(\"%s \\\"%s %s:%s %s\\\" - -\", req.RemoteAddr, req.Method, host, port, req.Proto)\n\n\t\tdial := h.Dial\n\t\tif dial == nil {\n\t\t\tdial = h.Transport.Dial\n\t\t}\n\n\t\tconn, err := dial(\"tcp\", net.JoinHostPort(host, port))\n\t\tif err != nil {\n\t\t\thttp.Error(rw, err.Error(), http.StatusBadGateway)\n\t\t\treturn\n\t\t}\n\n\t\thijacker, ok := rw.(http.Hijacker)\n\t\tif !ok {\n\t\t\thttp.Error(rw, fmt.Sprintf(\"%#v is not http.Hijacker\", rw), http.StatusBadGateway)\n\t\t\treturn\n\t\t}\n\t\tlconn, _, err := hijacker.Hijack()\n\t\tif err != nil {\n\t\t\thttp.Error(rw, err.Error(), http.StatusBadGateway)\n\t\t\treturn\n\t\t}\n\n\t\tio.WriteString(lconn, \"HTTP/1.1 200 OK\\r\\n\\r\\n\")\n\n\t\tdefer lconn.Close()\n\t\tdefer conn.Close()\n\n\t\tgo yaputil.IOCopy(conn, lconn)\n\t\tyaputil.IOCopy(lconn, conn)\n\n\t\treturn\n\t}\n\n\tif req.Host == \"\" {\n\t\thttp.Error(rw, \"400 Bad Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif req.URL.Host == \"\" {\n\t\treq.URL.Host = req.Host\n\t}\n\n\tif req.ContentLength == 0 {\n\t\tio.Copy(ioutil.Discard, req.Body)\n\t\treq.Body.Close()\n\t\treq.Body = nil\n\t}\n\n\tglog.Infof(\"%s \\\"%s %s %s\\\" - -\", req.RemoteAddr, req.Method, req.URL.String(), req.Proto)\n\n\tif req.URL.Scheme == \"\" {\n\t\treq.URL.Scheme = \"http\"\n\t}\n\n\tresp, err := h.Transport.RoundTrip(req)\n\tif err != nil {\n\t\tmsg := err.Error()\n\t\tif strings.HasPrefix(msg, \"Invaid DNS Record: \") {\n\t\t\thttp.Error(rw, \"403 Forbidden\", http.StatusForbidden)\n\t\t} else {\n\t\t\thttp.Error(rw, err.Error(), http.StatusBadGateway)\n\t\t}\n\t\treturn\n\t}\n\n\tfor key, values := range resp.Header {\n\t\tfor _, value := range values {\n\t\t\trw.Header().Add(key, value)\n\t\t}\n\t}\n\trw.WriteHeader(resp.StatusCode)\n\n\tdefer resp.Body.Close()\n\n\tvar r io.Reader = resp.Body\n\tyaputil.IOCopy(rw, r)\n}", "func (c *Client) SendRequest(method string, rawURL string, data url.Values,\n\theaders map[string]interface{}) (*http.Response, error) {\n\tu, err := url.Parse(rawURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalueReader := &strings.Reader{}\n\tgoVersion := runtime.Version()\n\n\tif method == http.MethodGet {\n\t\tif data != nil {\n\t\t\tv, _ := form.EncodeToStringWith(data, delimiter, escapee, keepZeros)\n\t\t\tregex := regexp.MustCompile(`\\.\\d+`)\n\t\t\ts := regex.ReplaceAllString(v, \"\")\n\n\t\t\tu.RawQuery = s\n\t\t}\n\t}\n\n\tif method == http.MethodPost {\n\t\tvalueReader = strings.NewReader(data.Encode())\n\t}\n\n\treq, err := http.NewRequest(method, u.String(), valueReader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.SetBasicAuth(c.basicAuth())\n\n\t// E.g. \"User-Agent\": \"twilio-go/1.0.0 (darwin amd64) go/go1.17.8\"\n\tuserAgent := fmt.Sprintf(\"twilio-go/%s (%s %s) go/%s\", LibraryVersion, runtime.GOOS, runtime.GOARCH, goVersion)\n\n\tif len(c.UserAgentExtensions) > 0 {\n\t\tuserAgent += \" \" + strings.Join(c.UserAgentExtensions, \" \")\n\t}\n\n\treq.Header.Add(\"User-Agent\", userAgent)\n\n\tif method == http.MethodPost {\n\t\treq.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\t}\n\n\tfor k, v := range headers {\n\t\treq.Header.Add(k, fmt.Sprint(v))\n\t}\n\n\treturn c.doWithErr(req)\n}", "func SendRequest(m, u string, h map[string]string) (*http.Response, []byte, error) {\n\treq, err := CreateRequest(m, u, h)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\thttpClient := &http.Client{Timeout: time.Second * 10}\n\n\tresp, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn resp, nil, err\n\t}\n\n\treturn resp, body, nil\n}", "func (c *Coinbene) SendHTTPRequest(ep exchange.URL, path string, f request.EndpointLimit, result interface{}) error {\n\tendpoint, err := c.API.Endpoints.GetURL(ep)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar resp json.RawMessage\n\terrCap := struct {\n\t\tCode int `json:\"code\"`\n\t\tMessage string `json:\"message\"`\n\t}{}\n\n\tif err := c.SendPayload(context.Background(), &request.Item{\n\t\tMethod: http.MethodGet,\n\t\tPath: endpoint + path,\n\t\tResult: &resp,\n\t\tVerbose: c.Verbose,\n\t\tHTTPDebugging: c.HTTPDebugging,\n\t\tHTTPRecording: c.HTTPRecording,\n\t\tEndpoint: f,\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tif err := json.Unmarshal(resp, &errCap); err == nil {\n\t\tif errCap.Code != 200 && errCap.Message != \"\" {\n\t\t\treturn errors.New(errCap.Message)\n\t\t}\n\t}\n\treturn json.Unmarshal(resp, result)\n}", "func (client *Client) SendHTTPRequest(req *Request) (*Response, error) {\r\n\thttpResponse := new(http.Response)\r\n\r\n\tfor i := 1; i <= 3; i++ {\r\n\t\tvar err error\r\n\t\thttpResponse, err = client.client.Do(req.request)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\r\n\t\tlog.Tracef(\"Server response code: %d\", httpResponse.StatusCode)\r\n\r\n\t\tif httpResponse.StatusCode == 429 {\r\n\t\t\tif i < 3 {\r\n\t\t\t\tlog.Warnf(\"Retrying Received 429 (Too Many Requests) from server trying to access %v (attempt %v) - sleeping 60 seconds\",\r\n\t\t\t\t\treq.request.URL, i)\r\n\t\t\t\ttime.Sleep(60 * time.Second)\r\n\t\t\t} else {\r\n\t\t\t\treturn nil, fmt.Errorf(\"[ERR] Received 429 after three attempts to access %v - aborting\", req.request.URL)\r\n\t\t\t}\r\n\t\t} else if httpResponse.StatusCode == 200 {\r\n\t\t\tbreak\r\n\t\t} else {\r\n\t\t\trespBody, err := ioutil.ReadAll(httpResponse.Body)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn nil, err\r\n\t\t\t}\r\n\t\t\tlog.Tracef(\"[TRACE] Server response: HTTP %v: %v\", httpResponse.StatusCode, string(respBody))\r\n\t\t\treturn nil, fmt.Errorf(\"Server returned HTTP code %v for %v: %v\", httpResponse.StatusCode, req.request.URL, string(respBody))\r\n\t\t}\r\n\t}\r\n\r\n\treturn &Response{httpResponse}, nil\r\n}", "func handleGetRequest(rw rest.ResponseWriter, req *rest.Request) {\n\trw.WriteJson(map[string]string{\"body\": \"use POST https://localhost:433/sync, include authentication\"})\n}", "func send(session *Session, req *Request) (*Response, error) {\n\t// Set timeout to request context.\n\t// Default timeout is 30s.\n\ttimeout := time.Second * 30\n\tif req.Timeout > 0 {\n\t\ttimeout = time.Second * time.Duration(req.Timeout)\n\t} else if session.Timeout > 0 {\n\t\ttimeout = time.Second * time.Duration(session.Timeout)\n\t}\n\tctx, timeoutCancel := context.WithTimeout(context.Background(), timeout)\n\n\t// set proxy to request context.\n\tif req.Proxy != nil {\n\t\tctx = context.WithValue(ctx, \"http\", req.Proxy.HTTP)\n\t\tctx = context.WithValue(ctx, \"https\", req.Proxy.HTTPS)\n\t} else if session.Proxy != nil {\n\t\tctx = context.WithValue(ctx, \"http\", session.Proxy.HTTP)\n\t\tctx = context.WithValue(ctx, \"https\", session.Proxy.HTTPS)\n\t}\n\n\t// set RedirectNum to request context.\n\t// default RedirectNum is 10.\n\tif req.RedirectNum == 0 {\n\t\tctx = context.WithValue(ctx, \"redirectNum\", 10)\n\t} else if req.RedirectNum > 0 {\n\t\tctx = context.WithValue(ctx, \"redirectNum\", req.RedirectNum)\n\t} else {\n\t\tctx = context.WithValue(ctx, \"redirectNum\", 0)\n\t}\n\n\t// Make new http.Request with context\n\thttpReq, err := http.NewRequestWithContext(ctx, req.Method, req.URL, nil)\n\tif err != nil {\n\t\ttimeoutCancel()\n\t\treturn nil, WrapErr(err, \"build Request error, please check request url or request method\")\n\t}\n\n\t// Handle the Headers.\n\thttpReq.Header = mergeHeaders(req.Headers, session.Headers)\n\n\t// Handle the DataForm, Body or JsonBody.\n\t// Set right Content-Type.\n\tif req.PostForm != nil {\n\t\thttpReq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\t\tdata := req.PostForm.URLEncode()\n\t\thttpReq.Body = ioutil.NopCloser(strings.NewReader(data))\n\t} else if req.Body != nil {\n\t\thttpReq.Body = ioutil.NopCloser(bytes.NewReader(req.Body))\n\t} else if req.JsonBody != nil {\n\t\thttpReq.Header.Set(\"Content-Type\", \"application/json\")\n\t\thttpReq.Body = ioutil.NopCloser(bytes.NewReader(req.JsonBody))\n\t} else if req.MultipartForm != nil {\n\t\thttpReq.Header.Set(\"Content-Type\", req.MultipartForm.ContentType())\n\t\thttpReq.Body = ioutil.NopCloser(req.MultipartForm.Reader())\n\t}\n\n\t// Handle Cookies\n\tif req.Cookies != nil {\n\t\tfor _, cookie := range req.Cookies {\n\t\t\thttpReq.AddCookie(cookie)\n\t\t}\n\t}\n\n\tresp, err := session.client.Do(httpReq) // do request\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"context deadline exceeded\") { // check timeout error\n\t\t\ttimeoutCancel()\n\t\t\treturn nil, WrapErr(ErrTimeout, err.Error())\n\t\t}\n\t\ttimeoutCancel()\n\t\treturn nil, WrapErr(err, \"Request Error\")\n\t}\n\tdefer func() {\n\t\tif err := resp.Body.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\tresponse, err := buildResponse(req, resp)\n\tif err != nil {\n\t\ttimeoutCancel()\n\t\treturn nil, WrapErr(err, \"build Response Error\")\n\t}\n\n\ttimeoutCancel() // cancel the timeout context after request succeed.\n\treturn response, nil\n}", "func performHTTPRequest(req *http.Request, sess *UserSession) ([]byte, []string) {\n\treq.Header.Set(\"User-Agent\", \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36\")\n\treq.Header.Set(\"Accept\", \"application/json, text/javascript, */*; q=0.01\")\n\t// form token is bound to vid\n\treq.Header.Set(`Cookie`, `vid=`+sess.vid+`; identifier=`+sess.identifier+`; login-options={\"stay\":true,\"no_ip_check\":true,\"leave_others\":true}; prf_ls_uad=price.a.200.normal; rtif-legacy=1; login-options={\"stay\":true,\"no_ip_check\":true,\"leave_others\":true}`)\n\n\t/*\n\t // this is for debug proxying\n\t proxy, _ :=url.Parse(\"http://127.0.0.1:8080\")\n\t tr := &http.Transport{\n\t \tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t \tProxy: http.ProxyURL(proxy),\n\t }\n\t*/\n\n\ttr := &http.Transport{}\n\t// for avoiding infinite redirect loops\n\tclient := &http.Client{\n\t\tTransport: tr,\n\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tfmt.Println(\"[!] HTTP request failed to\" + req.URL.Host + req.URL.Path)\n\t\tpanic(err)\n\t}\n\tdefer resp.Body.Close()\n\n\trespBody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tfmt.Println(\"[!] HTTP request failed to\" + req.URL.Host + req.URL.Path)\n\t\tpanic(err)\n\t}\n\t// fmt.Println(string(resp.Header.Values(\"Set-Cookie\")[0]))\n\n\treturn respBody, resp.Header.Values(\"Set-Cookie\")\n}", "func (c *Client) SendRequest(req *Request, option *SignOption) (bceResponse *Response, err error) {\n\tif option == nil {\n\t\toption = &SignOption{}\n\t}\n\n\toption.AddHeader(\"User-Agent\", c.GetUserAgent())\n\toption.AddHeader(\"Content-Type\", \"application/json\")\n\tif c.RetryPolicy == nil {\n\t\tc.RetryPolicy = NewDefaultRetryPolicy(3, 20*time.Second)\n\t}\n\tvar buf []byte\n\tif req.Body != nil {\n\t\tbuf, _ = ioutil.ReadAll(req.Body)\n\t}\n\n\tfor i := 0; ; i++ {\n\t\tbceResponse, err = nil, nil\n\t\tif option.Credentials != nil {\n\t\t\tGenerateAuthorization(*option.Credentials, *req, option)\n\t\t} else {\n\t\t\tGenerateAuthorization(*c.Credentials, *req, option)\n\t\t}\n\t\tif c.debug {\n\t\t\tutil.Debug(\"\", fmt.Sprintf(\"Request: httpMethod = %s, requestUrl = %s, requestHeader = %v\",\n\t\t\t\treq.Method, req.URL.String(), req.Header))\n\t\t}\n\t\tt0 := time.Now()\n\t\treq.Body = ioutil.NopCloser(bytes.NewBuffer(buf))\n\t\tresp, httpError := c.httpClient.Do(req.raw())\n\t\tt1 := time.Now()\n\t\tbceResponse = NewResponse(resp)\n\t\tif c.debug {\n\t\t\tutil.Debug(\"\", fmt.Sprintf(\"http request: %s do use time: %v\", req.URL.String(), t1.Sub(t0)))\n\t\t\tstatusCode := -1\n\t\t\tresString := \"\"\n\t\t\tvar resHead http.Header\n\t\t\tif resp != nil {\n\t\t\t\tstatusCode = resp.StatusCode\n\t\t\t\tre, err := bceResponse.GetBodyContent()\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.Debug(\"\", fmt.Sprintf(\"getbodycontent error: %v\", err))\n\t\t\t\t}\n\t\t\t\tresString = string(re)\n\t\t\t\tresHead = resp.Header\n\t\t\t}\n\t\t\tutil.Debug(\"\", fmt.Sprintf(\"Response: status code = %d, httpMethod = %s, requestUrl = %s\",\n\t\t\t\tstatusCode, req.Method, req.URL.String()))\n\t\t\tutil.Debug(\"\", fmt.Sprintf(\"Response Header: = %v\", resHead))\n\t\t\tutil.Debug(\"\", fmt.Sprintf(\"Response body: = %s\", resString))\n\t\t}\n\n\t\tif httpError != nil {\n\t\t\tduration := c.RetryPolicy.GetDelayBeforeNextRetry(httpError, i+1)\n\t\t\tif duration <= 0 {\n\t\t\t\terr = httpError\n\t\t\t\treturn bceResponse, err\n\t\t\t}\n\t\t\ttime.Sleep(duration)\n\t\t\tcontinue\n\t\t}\n\t\tif resp.StatusCode >= http.StatusBadRequest {\n\t\t\terr = buildError(bceResponse)\n\t\t}\n\t\tif err == nil {\n\t\t\treturn bceResponse, err\n\t\t}\n\n\t\tduration := c.RetryPolicy.GetDelayBeforeNextRetry(err, i+1)\n\t\tif duration <= 0 {\n\t\t\treturn bceResponse, err\n\t\t}\n\n\t\ttime.Sleep(duration)\n\t}\n}", "func HTTPRequest(user m.AuthUser, req *http.Request, verbose bool) ([]byte, string) {\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tError(err, \"There was a problem in making the request\")\n\n\tdefer resp.Body.Close()\n\trespBody, err := ioutil.ReadAll(resp.Body)\n\tError(err, \"There was a problem reading the response body\")\n\n\t//b.CheckAuth(user, resp.Status)\n\n\tif verbose {\n\t\tfmt.Println(\"Response Headers:\", resp.Header)\n\t\tfmt.Println(\"Response Status:\", resp.Status)\n\t\tfmt.Println(\"Response Body:\", string(respBody))\n\t}\n\treturn respBody, resp.Status\n}", "func send(req *Request, t RoundTripper) (resp *Response, err error) {\n\tif t == nil {\n\t\tt = DefaultTransport\n\t\tif t == nil {\n\t\t\terr = errors.New(\"http: no Client.Transport or DefaultTransport\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tif req.URL == nil {\n\t\treturn nil, errors.New(\"http: nil Request.URL\")\n\t}\n\n\tif req.RequestURI != \"\" {\n\t\treturn nil, errors.New(\"http: Request.RequestURI can't be set in client requests.\")\n\t}\n\n\t// Most the callers of send (Get, Post, et al) don't need\n\t// Headers, leaving it uninitialized. We guarantee to the\n\t// Transport that this has been initialized, though.\n\tif req.Header == nil {\n\t\treq.Header = make(Header)\n\t}\n\n\tif u := req.URL.User; u != nil {\n\t\tusername := u.Username()\n\t\tpassword, _ := u.Password()\n\t\treq.Header.Set(\"Authorization\", \"Basic \"+basicAuth(username, password))\n\t}\n\tresp, err = t.RoundTrip(req)\n\tif err != nil {\n\t\tif resp != nil {\n\t\t\tlog.Logger.Warn(\"RoundTripper returned a response & error; ignoring response\")\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (r *Responder) Unauthorized() { r.write(http.StatusUnauthorized) }", "func forbiddenHTTPHandler(w http.ResponseWriter, r *http.Request) {\n\tincMetric(metricAPIReq)\n\tutils.DebugHTTPDump(r, settingsmgr.DebugHTTP(settings.ServiceAdmin, settings.NoEnvironment), true)\n\t// Send response\n\tutils.HTTPResponse(w, \"\", http.StatusForbidden, []byte(errorContent))\n\tincMetric(metricAPIErr)\n}", "func DoHttpRequest(cfg *config.Configuration, req *http.Request, useCreds bool) (*http.Response, error) {\n\tvar creds auth.Creds\n\tif useCreds {\n\t\tc, err := auth.GetCreds(cfg, req)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcreds = c\n\t}\n\n\treturn doHttpRequest(cfg, req, creds)\n}", "func NoAuthHandler(handler RequestHandler) httprouter.Handle {\n\treturn func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {\n\t\t// Measure time spent executing shit\n\t\tstart := time.Now()\n\n\t\t// Pass to the real handler\n\t\tresponse, statusCode, err := handler(r, params)\n\n\t\t// Logs [source IP] [request method] [request URL] [HTTP status] [time spent serving request]\n\t\tlog.Printf(\"%v\\t \\\"%v - %v\\\"\\t%v\\t%v\", sourceIP(r), r.Method, r.RequestURI, statusCode, time.Since(start))\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), statusCode)\n\t\t\treturn\n\t\t}\n\n\t\tw.WriteHeader(statusCode)\n\t\tfmt.Fprintln(w, response)\n\t}\n}", "func (req *Request) SendRequest(url, method string, bodyData []byte, headers []string, skipTLS bool, timeout time.Duration) *datastructure.Response {\n\n\t// Create a custom request\n\tvar (\n\t\terr error\n\t\tresponse datastructure.Response\n\t\tstart time.Time\n\t)\n\n\tstart = time.Now()\n\n\tif !strings.HasPrefix(url, \"http://\") && !strings.HasPrefix(url, \"https://\") {\n\t\t_error := errors.New(\"PREFIX_URL_NOT_VALID\")\n\t\tlog.Debug(\"sendRequest | Error! \", _error, \" URL: \", url)\n\t\tresponse.Error = _error\n\t\treturn &response\n\t}\n\n\tmethod = strings.ToUpper(method)\n\n\t// Validate method\n\tif !req.methodIsAllowed(method) {\n\t\tlog.Debug(\"sendRequest | Method [\" + method + \"] is not allowed!\")\n\t\t_error := errors.New(\"METHOD_NOT_ALLOWED\")\n\t\tresponse.Error = _error\n\t\treturn &response\n\t}\n\n\t// Manage TLS configuration\n\treq.SetTLS(skipTLS)\n\t// Set infinite timeout as default http/net\n\treq.SetTimeout(timeout)\n\n\treq.URL = url\n\treq.Data = bodyData\n\n\tswitch req.Method {\n\tcase \"GET\":\n\t\treq.initGetRequest()\n\t\treq.Req, err = http.NewRequest(req.Method, req.URL, nil)\n\tcase \"POST\":\n\t\treq.initPostRequest()\n\t\treq.Req, err = http.NewRequest(req.Method, req.URL, bytes.NewReader(req.Data))\n\tcase \"PUT\":\n\t\treq.Req, err = http.NewRequest(req.Method, req.URL, nil)\n\tcase \"DELETE\":\n\t\treq.Req, err = http.NewRequest(req.Method, req.URL, nil)\n\tdefault:\n\t\tlog.Debug(\"sendRequest | Unknown method -> \" + method)\n\t\terr = errors.New(\"HTTP_METHOD_NOT_MANAGED\")\n\t}\n\n\tif err != nil {\n\t\tlog.Debug(\"sendRequest | Error while initializing a new request -> \", err)\n\t\tresponse.Error = err\n\t\treturn &response\n\t}\n\terr = req.CreateHeaderList(headers...)\n\tif err != nil {\n\t\tlog.Debug(\"sendRequest | Error while initializing the headers -> \", err)\n\t\tresponse.Error = err\n\t\treturn &response\n\t}\n\n\tcontentlengthPresent := false\n\tif strings.Compare(req.Req.Header.Get(\"Content-Length\"), \"\") == 0 {\n\t\tcontentlengthPresent = true\n\t}\n\n\tif req.Method == \"POST\" && !contentlengthPresent {\n\t\tcontentLength := strconv.FormatInt(req.Req.ContentLength, 10)\n\t\tlog.Debug(\"sendRequest | Content-length not provided, setting new one -> \", contentLength)\n\t\treq.Req.Header.Add(\"Content-Length\", contentLength)\n\t}\n\n\tlog.Debugf(\"sendRequest | Executing request .. %+v\\n\", req.Req)\n\tclient := &http.Client{Transport: req.Tr, Timeout: req.Timeout}\n\n\tresp, err := client.Do(req.Req)\n\n\tif err != nil {\n\t\tlog.Debug(\"Error executing request | ERR:\", err)\n\t\tresponse.Error = errors.New(\"ERROR_SENDING_REQUEST -> \" + err.Error())\n\t\treturn &response\n\t}\n\tdefer resp.Body.Close()\n\n\tresponse.Headers = make(map[string]string, len(resp.Header))\n\tfor k, v := range resp.Header {\n\t\tresponse.Headers[k] = strings.Join(v, `,`)\n\t}\n\tresponse.Cookie = resp.Cookies()\n\tlog.Debug(\"sendRequest | Request executed, reading response ...\")\n\tbodyResp, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Debug(\"sendRequest | Unable to read response! | Err: \", err)\n\t\tresponse.Error = errors.New(\"ERROR_READING_RESPONSE -> \" + err.Error())\n\t\treturn &response\n\t}\n\n\tresponse.Body = bodyResp\n\tresponse.StatusCode = resp.StatusCode\n\tresponse.Error = nil\n\telapsed := time.Since(start)\n\tresponse.Time = elapsed\n\tresponse.Response = resp\n\tlog.Debug(\"sendRequest | Elapsed -> \", elapsed, \" | STOP!\")\n\treturn &response\n}", "func ProvideHTTPRequest() core.HTTPRequest {\n\n\treturn core.NewHTTPRequest(&http.Client{Timeout: 60 * time.Second})\n}", "func (h *HTTP2Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\tvar err error\n\n\treqHostname := req.Host\n\tif host, _, err := net.SplitHostPort(req.Host); err == nil {\n\t\treqHostname = host\n\t}\n\n\tvar h2 bool = req.ProtoMajor == 2 && req.ProtoMinor == 0\n\tvar isProxyRequest bool = !yaputil.ContainsString(h.ServerNames, reqHostname)\n\n\tvar paramsPrefix string = http.CanonicalHeaderKey(\"X-UrlFetch-\")\n\tparams := http.Header{}\n\tfor key, values := range req.Header {\n\t\tif strings.HasPrefix(key, paramsPrefix) {\n\t\t\tparams[key] = values\n\t\t}\n\t}\n\n\tfor key := range params {\n\t\treq.Header.Del(key)\n\t}\n\n\tif isProxyRequest && h.DisableProxy {\n\t\thttp.Error(rw, \"403 Forbidden\", http.StatusForbidden)\n\t\treturn\n\t}\n\n\tvar username, password string\n\tif isProxyRequest && h.Authenticator != nil {\n\t\tauth := req.Header.Get(\"Proxy-Authorization\")\n\t\tif auth == \"\" {\n\t\t\th.ProxyAuthorizationRequired(rw, req)\n\t\t\treturn\n\t\t}\n\n\t\tparts := strings.SplitN(auth, \" \", 2)\n\t\tif len(parts) == 2 {\n\t\t\tswitch parts[0] {\n\t\t\tcase \"Basic\":\n\t\t\t\tif auth, err := base64.StdEncoding.DecodeString(parts[1]); err == nil {\n\t\t\t\t\tparts := strings.Split(string(auth), \":\")\n\t\t\t\t\tusername = parts[0]\n\t\t\t\t\tpassword = parts[1]\n\n\t\t\t\t\tif err := h.Authenticator.Authenticate(username, password); err != nil {\n\t\t\t\t\t\thttp.Error(rw, \"403 Forbidden\", http.StatusForbidden)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tglog.Errorf(\"Unrecognized auth type: %#v\", parts[0])\n\t\t\t\thttp.Error(rw, \"403 Forbidden\", http.StatusForbidden)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\treq.Header.Del(\"Proxy-Authorization\")\n\t}\n\n\tif req.Method == http.MethodConnect {\n\t\thost, port, err := net.SplitHostPort(req.Host)\n\t\tif err != nil {\n\t\t\thost = req.Host\n\t\t\tport = \"443\"\n\t\t}\n\n\t\tglog.Infof(\"[%v 0x%04x %s] %s \\\"%s %s %s\\\" - -\",\n\t\t\treq.TLS.ServerName, req.TLS.Version, username, req.RemoteAddr, req.Method, req.Host, req.Proto)\n\n\t\tdial := h.Dial\n\t\tif dial == nil {\n\t\t\tdial = h.Transport.Dial\n\t\t}\n\n\t\tconn, err := dial(\"tcp\", net.JoinHostPort(host, port))\n\t\tif err != nil {\n\t\t\thttp.Error(rw, err.Error(), http.StatusBadGateway)\n\t\t\treturn\n\t\t}\n\n\t\tvar w io.Writer\n\t\tvar r io.Reader\n\n\t\t// http2 only support Flusher, http1/1.1 support Hijacker\n\t\tif h2 {\n\t\t\tflusher, ok := rw.(http.Flusher)\n\t\t\tif !ok {\n\t\t\t\thttp.Error(rw, fmt.Sprintf(\"%#v is not http.Flusher\", rw), http.StatusBadGateway)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trw.WriteHeader(http.StatusOK)\n\t\t\tflusher.Flush()\n\n\t\t\tw = FlushWriter{rw}\n\t\t\tr = req.Body\n\t\t} else {\n\t\t\thijacker, ok := rw.(http.Hijacker)\n\t\t\tif !ok {\n\t\t\t\thttp.Error(rw, fmt.Sprintf(\"%#v is not http.Hijacker\", rw), http.StatusBadGateway)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlconn, _, err := hijacker.Hijack()\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(rw, err.Error(), http.StatusBadGateway)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer lconn.Close()\n\n\t\t\tw = lconn\n\t\t\tr = lconn\n\n\t\t\tio.WriteString(lconn, \"HTTP/1.1 200 OK\\r\\n\\r\\n\")\n\t\t}\n\n\t\tdefer conn.Close()\n\n\t\tgo yaputil.IOCopy(conn, r)\n\t\tyaputil.IOCopy(w, conn)\n\n\t\treturn\n\t}\n\n\tif req.Host == \"\" {\n\t\thttp.Error(rw, \"403 Forbidden\", http.StatusForbidden)\n\t\treturn\n\t}\n\n\tif req.URL.Host == \"\" {\n\t\treq.URL.Host = req.Host\n\t}\n\n\tif req.ContentLength == 0 {\n\t\tio.Copy(ioutil.Discard, req.Body)\n\t\treq.Body.Close()\n\t\treq.Body = nil\n\t}\n\n\tglog.Infof(\"[%v 0x%04x %s] %s \\\"%s %s %s\\\" - -\",\n\t\treq.TLS.ServerName, req.TLS.Version, username, req.RemoteAddr, req.Method, req.URL.String(), req.Proto)\n\n\tif req.URL.Scheme == \"\" {\n\t\treq.URL.Scheme = \"http\"\n\t}\n\n\tif h2 {\n\t\treq.ProtoMajor = 1\n\t\treq.ProtoMinor = 1\n\t\treq.Proto = \"HTTP/1.1\"\n\t}\n\n\tif !isProxyRequest && h.Fallback != nil {\n\t\tif h.Fallback.Scheme == \"file\" {\n\t\t\thttp.FileServer(http.Dir(h.Fallback.Path)).ServeHTTP(rw, req)\n\t\t\treturn\n\t\t}\n\t\treq.URL.Scheme = h.Fallback.Scheme\n\t\treq.URL.Host = h.Fallback.Host\n\t\tif ip, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {\n\t\t\txff := req.Header.Get(\"X-Forwarded-For\")\n\t\t\tif xff == \"\" {\n\t\t\t\treq.Header.Set(\"X-Forwarded-For\", ip)\n\t\t\t} else {\n\t\t\t\treq.Header.Set(\"X-Forwarded-For\", xff+\", \"+ip)\n\t\t\t}\n\t\t\treq.Header.Set(\"X-Forwarded-Proto\", \"https\")\n\t\t\treq.Header.Set(\"X-Real-IP\", ip)\n\t\t}\n\t}\n\n\tresp, err := h.Transport.RoundTrip(req)\n\tglog.Infof(\"%+v\", req)\n\tif err != nil {\n\t\tmsg := err.Error()\n\t\tif strings.HasPrefix(msg, \"Invaid DNS Record: \") {\n\t\t\thttp.Error(rw, \"403 Forbidden\", http.StatusForbidden)\n\t\t} else {\n\t\t\thttp.Error(rw, err.Error(), http.StatusBadGateway)\n\t\t}\n\t\treturn\n\t}\n\n\tif h2 {\n\t\tresp.Header.Del(\"Connection\")\n\t\tresp.Header.Del(\"Keep-Alive\")\n\t}\n\n\tfor key, values := range resp.Header {\n\t\tfor _, value := range values {\n\t\t\trw.Header().Add(key, value)\n\t\t}\n\t}\n\trw.WriteHeader(resp.StatusCode)\n\n\tdefer resp.Body.Close()\n\n\tvar r io.Reader = resp.Body\n\tyaputil.IOCopy(rw, r)\n}", "func DoHTTPRequest(request *http.Request) (*http.Response, error) {\n\ttimeout := time.Duration(httpTimeout)\n\tclient := &http.Client{\n\t\tTimeout: timeout,\n\t}\n\treturn client.Do(request)\n}", "func TestRequestURL_BasicAuth_NoAuthHeader(t *testing.T) {\n\tmockResp := &http.Response{\n\t\tStatusCode: 401,\n\t}\n\tmockClient := makeMockHttpClient(mockResp)\n\tc := &httpClient{Client: mockClient, HTTPUsername: \"user\", HTTPPassword: \"pass\"}\n\tu := &url.URL{Scheme: \"http\", Host: \"localhost\", Path: \"/\"}\n\tresp, err := c.RequestURL(u)\n\tif err != nil {\n\t\tt.Errorf(\"Got error: %v\", err)\n\t}\n\tif resp == nil {\n\t\tt.Errorf(\"Got nil response!\")\n\t}\n\tif resp.StatusCode != 401 {\n\t\tt.Errorf(\"Got non-401 response code: %d\", resp.StatusCode)\n\t}\n}", "func Unauthenticated() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\tWriteResponseMessage(w, \"Unathenticated request\", \"\", 400, true)\n\t})\n}", "func sendGet(request datastructures.MiddlewareRequest) []byte {\n\t//url := \"https://ground0.hackx.com/log-analyzer-prod/auth/\" + method + \"?\" + data\n\turl := \"https://ground0.hackx.com/log-analyzer-prod/\" + request.Method + \"?\" + request.Data\n\tlog.Info(\"URL:>\", url)\n\t//\tdata = []byte(`{\"title\":\"Buy cheese and bread for breakfast.\"}`)\n\treq, err := http.NewRequest(\"POST\", url, bytes.NewBuffer([]byte(\"\")))\n\tif err != nil {\n\t\tlog.Error(\"sendGet | Ouch! Seems that we POST an error | ERR: \", err)\n\t}\n\treq.Header.Set(\"X-Custom-Header\", \"login_test\")\n\treq.Header.Set(\"Content-Type\", \"text/plain\")\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\t// Raise os.Exit\n\t\tpanic(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tlog.Info(\"response Status:\", resp.Status)\n\tlog.Info(\"response Headers:\", resp.Header)\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\tlog.Info(\"response Body:\", string(body))\n\n\treturn body\n}", "func (s *Server) Authenticate(wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\ts.Session = grequests.NewSession(nil)\n\tresp, err := s.Session.Get(s.URL+loginURL, nil)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tlog.Fatalln(\"Failed to get csrf: \" + s.Name)\n\t}\n\n\tdoc, err := goquery.NewDocumentFromReader(strings.NewReader((resp.String())))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tlog.Fatalln(\"Failed to parse html: \" + s.Name)\n\t}\n\n\tcsrfToken := \"\"\n\tdoc.Find(\"meta\").Each(func(i int, s *goquery.Selection) {\n\t\tif name, _ := s.Attr(\"name\"); name == \"csrf-token\" {\n\t\t\tcsrfToken, _ = s.Attr(\"content\")\n\t\t\treturn\n\t\t}\n\t})\n\n\tresp, err = s.Session.Get(s.URL+verifyURL, nil)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tlog.Fatalln(\"Failed to verify: \" + s.Name)\n\t}\n\n\ts.Headers = &map[string]string{\n\t\t\"csrf-token\": csrfToken,\n\t\t\"Connection\": \"keep-alive\",\n\t\t\"Content-Type\": \"application/json\",\n\t\t\"Accept\": \"application/x-www-form-urlencoded; charset=utf-8\",\n\t}\n\n\tresp, err = s.Session.Post(s.URL+passwordURL, &grequests.RequestOptions{\n\t\tJSON: map[string]string{\n\t\t\t\"username\": s.Username,\n\t\t\t\"password\": s.Password,\n\t\t},\n\t\t// Cookies: cookies.Cookies,\n\t\tHeaders: *s.Headers,\n\t})\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tlog.Fatalln(\"Failed to login: \" + s.Name)\n\t}\n\n\t// pretty.Println(resp.String())\n\tif !s.IsThirdParty {\n\t\t// log.Println(\"TOTP logic here...\")\n\t\ttotp := gotp.NewDefaultTOTP(s.Seed)\n\t\t// log.Println(totp.Now())\n\t\tresp, err = s.Session.Post(s.URL+challengeURL, &grequests.RequestOptions{\n\t\t\tJSON: map[string]string{\n\t\t\t\t\"username\": s.Username,\n\t\t\t\t\"password\": s.Password,\n\t\t\t\t\"challenge\": totp.Now(),\n\t\t\t},\n\t\t\t// Cookies: cookies.Cookies,\n\t\t\tHeaders: *s.Headers,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tlog.Fatalln(\"Failed to login: \" + s.Name)\n\t\t}\n\t\t// pretty.Println(resp.String())\n\t}\n}", "func doHttpRequest(cfg *config.Configuration, req *http.Request, creds auth.Creds) (*http.Response, error) {\n\tvar (\n\t\tres *http.Response\n\t\tcause string\n\t\terr error\n\t)\n\n\tif cfg.NtlmAccess(auth.GetOperationForRequest(req)) {\n\t\tcause = \"ntlm\"\n\t\tres, err = doNTLMRequest(cfg, req, true)\n\t} else {\n\t\tcause = \"http\"\n\t\tres, err = NewHttpClient(cfg, req.Host).Do(req)\n\t}\n\n\tif res == nil {\n\t\tres = &http.Response{\n\t\t\tStatusCode: 0,\n\t\t\tHeader: make(http.Header),\n\t\t\tRequest: req,\n\t\t\tBody: ioutil.NopCloser(bytes.NewBufferString(\"\")),\n\t\t}\n\t}\n\n\tif err != nil {\n\t\tif errors.IsAuthError(err) {\n\t\t\tSetAuthType(cfg, req, res)\n\t\t\tdoHttpRequest(cfg, req, creds)\n\t\t} else {\n\t\t\terr = errors.Wrap(err, cause)\n\t\t}\n\t} else {\n\t\terr = handleResponse(cfg, res, creds)\n\t}\n\n\tif err != nil {\n\t\tif res != nil {\n\t\t\tSetErrorResponseContext(cfg, err, res)\n\t\t} else {\n\t\t\tsetErrorRequestContext(cfg, err, req)\n\t\t}\n\t}\n\n\treturn res, err\n}", "func (r DisableSecurityHubRequest) Send(ctx context.Context) (*DisableSecurityHubResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &DisableSecurityHubResponse{\n\t\tDisableSecurityHubOutput: r.Request.Data.(*DisableSecurityHubOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (ec *Client) Send(amount int) {\n\tam := fmt.Sprintf(\"%v\", amount*int(params.Ether))\n\tmsg, err := ec.r.PayToken(ec.token, ec.other, am)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif msg == \"The method is not allowed for the requested URL.\" {\n\t\tpanic(msg)\n\t}\n}", "func doNothing(w http.ResponseWriter, r *http.Request) {}", "func (emailHunter *EmailHunter) sendRequest(formValues url.Values, emailHunterURL string) (*http.Response, error) {\n\tformValues.Set(\"api_key\", emailHunter.APIKey)\n\treq, err := http.NewRequest(\"GET\", emailHunterURL, strings.NewReader(formValues.Encode()))\n\treq.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\tresp, err := emailHunter.HTTPClient.Do(req)\n\n\treturn resp, err\n}", "func sendHTTPRequestToKTT(kttClient *KttClient, request *http.Request) KttResponse {\n\t//Perform POST request against KTT only in \"normal\" mode\n\tif applicationMode == \"normal\" {\n\n\t\tresponse, err := kttClient.Client.Do(request)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdefer response.Body.Close()\n\n\t\tlog.Printf(\"Status code %v\", response.StatusCode)\n\n\t\tif response.StatusCode == http.StatusCreated {\n\t\t\tstatistics.TicketsCreated += 1\n\t\t\tbodyBytes, err := ioutil.ReadAll(response.Body)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tbodyString := string(bodyBytes)\n\t\t\t//fmt.Println(bodyString)\n\t\t\treturn KttResponse(bodyString)\n\t\t} else {\n\t\t\tstatistics.Errors += 1\n\t\t}\n\n\t}\n\n\tif applicationMode == \"test\" {\n\t\tlog.Println(\"In test mode, pass ticket creation...\")\n\t}\n\n\treturn \"\"\n}", "func TestUnauthenticatedRequest(t *testing.T) {\n\tConvey(\"Simple unauthenticated request\", t, func() {\n\t\tConvey(\"Unauthenticated GET to / path should return a 200 response\", func() {\n\t\t\tw := makeUnauthenticatedRequest(\"GET\", \"/\")\n\t\t\tSo(w.Code, ShouldEqual, http.StatusOK)\n\t\t})\n\t\tConvey(\"Unauthenticated GET to /protected path should return a 401 response\", func() {\n\t\t\tw := makeUnauthenticatedRequest(\"GET\", \"/protected\")\n\t\t\tSo(w.Code, ShouldEqual, http.StatusUnauthorized)\n\t\t})\n\t})\n}", "func (cc *Client) AuthedRawRequest(method string, path string) (*http.Response, error) {\n\treturn cc.AuthedRequest(method, path, nil, nil)\n}", "func (a *Middleware) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.TLS == nil {\n\t\ttrace.WriteError(w, trace.AccessDenied(\"missing authentication\"))\n\t\treturn\n\t}\n\tuser, err := a.GetUser(*r.TLS)\n\tif err != nil {\n\t\ttrace.WriteError(w, err)\n\t\treturn\n\t}\n\n\tremoteAddr := r.RemoteAddr\n\t// If the request is coming from a trusted proxy and the proxy is sending a\n\t// TeleportImpersonateHeader, we will impersonate the user in the header\n\t// instead of the user in the TLS certificate.\n\t// This is used by the proxy to impersonate the end user when making requests\n\t// without re-signing the client certificate.\n\timpersonateUser := r.Header.Get(TeleportImpersonateUserHeader)\n\tif impersonateUser != \"\" {\n\t\tif !isProxyRole(user) {\n\t\t\ttrace.WriteError(w, trace.AccessDenied(\"Credentials forwarding is only permitted for Proxy\"))\n\t\t\treturn\n\t\t}\n\t\t// If the service is not configured to allow credentials forwarding, reject the request.\n\t\tif !a.EnableCredentialsForwarding {\n\t\t\ttrace.WriteError(w, trace.AccessDenied(\"Credentials forwarding is not permitted by this service\"))\n\t\t\treturn\n\t\t}\n\n\t\tif user, err = a.extractIdentityFromImpersonationHeader(impersonateUser); err != nil {\n\t\t\ttrace.WriteError(w, err)\n\t\t\treturn\n\t\t}\n\t\tremoteAddr = r.Header.Get(TeleportImpersonateIPHeader)\n\t}\n\n\t// If the request is coming from a trusted proxy, we already know the user\n\t// and we will impersonate him. At this point, we need to remove the\n\t// TeleportImpersonateHeader from the request, otherwise the proxy will\n\t// attempt sending the request to upstream servers with the impersonation\n\t// header from a fake user.\n\tr.Header.Del(TeleportImpersonateUserHeader)\n\tr.Header.Del(TeleportImpersonateIPHeader)\n\n\t// determine authenticated user based on the request parameters\n\tctx := r.Context()\n\tctx = authz.ContextWithUserCertificate(ctx, certFromConnState(r.TLS))\n\tclientSrcAddr, err := utils.ParseAddr(remoteAddr)\n\tif err == nil {\n\t\tctx = authz.ContextWithClientAddr(ctx, clientSrcAddr)\n\t}\n\tctx = authz.ContextWithUser(ctx, user)\n\ta.Handler.ServeHTTP(w, r.WithContext(ctx))\n}", "func (rc RequestCall) SendMethodNotAllowed(ctx context.Context) {\n\trc.Response.ForceWriteHeader(http.StatusMethodNotAllowed)\n\n\ttelemetry.From(ctx).RegisterStatusCode(http.StatusMethodNotAllowed)\n}", "func (h *HUOBIHADAX) SendAuthenticatedHTTPPostRequest(method, endpoint, postBodyValues string, result interface{}) error {\n\tif !h.AuthenticatedAPISupport {\n\t\treturn fmt.Errorf(exchange.WarningAuthenticatedRequestWithoutCredentialsSet, h.Name)\n\t}\n\n\tsignatureParams := url.Values{}\n\tsignatureParams.Set(\"AccessKeyId\", h.APIKey)\n\tsignatureParams.Set(\"SignatureMethod\", \"HmacSHA256\")\n\tsignatureParams.Set(\"SignatureVersion\", \"2\")\n\tsignatureParams.Set(\"Timestamp\", time.Now().UTC().Format(\"2006-01-02T15:04:05\"))\n\n\tendpoint = fmt.Sprintf(\"/v%s/%s\", huobihadaxAPIVersion, endpoint)\n\tpayload := fmt.Sprintf(\"%s\\napi.hadax.com\\n%s\\n%s\",\n\t\tmethod, endpoint, signatureParams.Encode())\n\n\theaders := make(map[string]string)\n\theaders[\"Content-Type\"] = \"application/json\"\n\theaders[\"Accept-Language\"] = \"zh-cn\"\n\n\thmac := common.GetHMAC(common.HashSHA256, []byte(payload), []byte(h.APISecret))\n\tsignatureParams.Set(\"Signature\", common.Base64Encode(hmac))\n\turlPath := common.EncodeURLValues(fmt.Sprintf(\"%s%s\", h.APIUrl, endpoint),\n\t\tsignatureParams)\n\treturn h.SendPayload(method, urlPath, headers, bytes.NewBufferString(postBodyValues), result, true, false, h.Verbose, h.HTTPDebugging)\n}", "func SetNoHTTP(v bool) {\n\traw.NoHTTP = v\n}", "func (l *AuthMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\ttoken := r.Header.Get(\"X-Cloudsyncer-Authtoken\")\n\tusername := r.Header.Get(\"X-Cloudsyncer-Username\")\n\n\tif username == \"\" {\n\t\thandleErr(w, 403, nil, \"username not provided or empty\")\n\t\treturn\n\t}\n\n\tif len(username) > 255 {\n\t\thandleErr(w, 403, nil, \"username too long\")\n\t\treturn\n\t}\n\n\tif token == \"\" {\n\t\thandleErr(w, 403, nil, \"Token not provided or empty\")\n\t\treturn\n\t}\n\n\tif len(token) > 255 {\n\t\thandleErr(w, 413, nil, \"Token too long\")\n\t\treturn\n\t}\n\tuser := db.GetUser(username)\n\tif user == nil {\n\t\thandleErr(w, 403, nil, \"Invalid credentials\")\n\t\treturn\n\t}\n\tif session := db.GetSession(user, token); session == nil {\n\t\thandleErr(w, 403, nil, \"Invalid credentials\")\n\t\treturn\n\t} else {\n\t\tcontext.Set(r, \"session\", session)\n\t\tcontext.Set(r, \"user\", user)\n\t}\n\tnext(w, r)\n\n}", "func (r *ActivityUnstarRepoForAuthenticatedUserReq) HTTPRequest(ctx context.Context, opt ...RequestOption) (*http.Request, error) {\n\treturn buildHTTPRequest(ctx, r, opt)\n}", "func (c *RestClient) ShallForwardUnauthenticatedRESTRequests() bool {\n\treturn c.forwardUnauthenticatedRESTRequests\n}", "func (b *BitcoinClient) sendRequest(req *http.Request) (*http.Response, error) {\n\tres, err := b.Client.Do(req)\n\tif err != nil {\n\t\tlog.Println(ErrUnresponsive)\n\t\treturn nil, ErrUnresponsive\n\t}\n\n\treturn res, nil\n}", "func sendReq(req *http.Request, result interface{}) (resp *http.Response, err error) {\n\n\tswitch {\n\t// TODO: This wil dump the authorization token. Which it probably shouldn't.\n\tcase debug():\n\t\treqDump, dumpErr := httputil.DumpRequestOut(req, true)\n\t\treqStr := string(reqDump)\n\t\tif dumpErr != nil {\n\t\t\tfmt.Printf(\"Error dumping request (display as generic object): %v\\n\", dumpErr)\n\t\t\treqStr = fmt.Sprintf(\"%v\", req)\n\t\t}\n\t\tfmt.Printf(\"%s %s\\n\", t.Title(\"Request\"), t.Text(reqStr))\n\t\tfmt.Println()\n\tcase verbose():\n\t\tfmt.Printf(\"%s %s\\n\", t.Title(\"Request:\"), t.Text(\"%s %s\", req.Method, req.URL))\n\t\tfmt.Println()\n\t}\n\n\tresp, err = hubClient.Do(req)\n\tif err == nil {\n\n\t\tif debug() {\n\t\t\trespDump, dumpErr := httputil.DumpResponse(resp, true)\n\t\t\trespStr := string(respDump)\n\t\t\tif dumpErr != nil {\n\t\t\t\tfmt.Printf(\"Error dumping response (display as generic object): %v\\n\", dumpErr)\n\t\t\t\trespStr = fmt.Sprintf(\"%v\", resp)\n\t\t\t}\n\t\t\tfmt.Printf(\"%s\\n%s\\n\", t.Title(\"Respose:\"), t.Text(respStr))\n\t\t\tfmt.Println()\n\t\t}\n\n\t\t// Do this after the Dump, the dump reads out the response for reprting and\n\t\t// replaces the reader with anotherone that has the data.\n\t\terr = checkReturnCode(*resp)\n\t\tif result != nil {\n\t\t\tif err == nil {\n\t\t\t\terr = unmarshal(resp, result)\n\t\t\t}\n\t\t}\n\n\t}\n\treturn resp, err\n}", "func (ch *clientSecureChannel) sendRequest(ctx context.Context, op *ua.ServiceOperation) error {\n\t// Check if time to renew security token.\n\tif !ch.tokenRenewalTime.IsZero() && time.Now().After(ch.tokenRenewalTime) {\n\t\tch.tokenRenewalTime = ch.tokenRenewalTime.Add(60000 * time.Millisecond)\n\t\tch.renewToken(ctx)\n\t}\n\n\tch.sendingSemaphore.Lock()\n\tdefer ch.sendingSemaphore.Unlock()\n\n\treq := op.Request()\n\n\tif ch.trace {\n\t\tb, _ := json.MarshalIndent(req, \"\", \" \")\n\t\tlog.Printf(\"%s%s\", reflect.TypeOf(req).Elem().Name(), b)\n\t}\n\n\tswitch req := req.(type) {\n\tcase *ua.OpenSecureChannelRequest:\n\t\terr := ch.sendOpenSecureChannelRequest(ctx, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *ua.CloseSecureChannelRequest:\n\t\terr := ch.sendServiceRequest(ctx, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// send a success response to ourselves (the server will just close it's socket).\n\t\tselect {\n\t\tcase op.ResponseCh() <- &ua.CloseSecureChannelResponse{ResponseHeader: ua.ResponseHeader{RequestHandle: req.RequestHandle, Timestamp: time.Now()}}:\n\t\tdefault:\n\t\t}\n\tdefault:\n\t\terr := ch.sendServiceRequest(ctx, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (h *HTTPX) doUnsafe(req *retryablehttp.Request) (*http.Response, error) {\n\tmethod := req.Method\n\theaders := req.Header\n\ttargetURL := req.URL.String()\n\tbody := req.Body\n\treturn rawhttp.DoRaw(method, targetURL, h.RequestOverride.URIPath, headers, body)\n}", "func (r *IdentityRequest) Send() {\n\tdata, _ := json.Marshal(r.Frame)\n\tr.Request.Send(data)\n\tmessage := r.Request.Read()\n\tfmt.Println(string(message))\n}", "func forbidden(rw http.ResponseWriter, r *http.Request) {\n\n}", "func (r Request) Send() (*http.Response, error) {\n\n\t// Set url\n\turl := resourceUrl + r.Path\n\n\t// Define client\n\tclient := &http.Client{}\n\n\t// Request\n\trequest, err := http.NewRequest(r.Method, url, bytes.NewBuffer(r.Body))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Define header\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", \"Bearer \"+r.Token)\n\n\t// Send request & get response\n\tresponse, err := client.Do(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Return data\n\treturn response, nil\n\n}", "func (au *Authenticator) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tsesSt, errGST := au.cx.getSessionStateFromRequest(r)\n\tif errGST != nil {\n\t\tvar authErrorReason string = \"\"\n\t\tvar wasError bool = false\n\t\tif errGST != session.ErrNoSessionId {\n\t\t\tif errGST == session.ErrInvalidScheme {\n\t\t\t\tauthErrorReason = WWWAuthenticateErrorInvalidRequest + \",\\n\" + \"error_description=\\\"Bearer scheme not provided\\\"\"\n\t\t\t\twasError = true\n\t\t\t} else if errGST == session.ErrInvalidSessionId {\n\t\t\t\tauthErrorReason = WWWAuthenticateErrorInvalidToken + \",\\n\" + \"error_description=\\\"token extracted not a valid session token\\\"\"\n\t\t\t\twasError = true\n\t\t\t}\n\t\t\tau.cx.logError(errGST, \"issue getting session from request\", \"\",\n\t\t\t\thttp.StatusInternalServerError)\n\t\t}\n\t\tcxWithAuthError := context.WithValue(r.Context(), authSessionErrorKey, wasError)\n\t\tcxWithAuthErrorValue := context.WithValue(cxWithAuthError, authSessionErrorValueKey, authErrorReason)\n\t\tcxWithSessionActive := context.WithValue(cxWithAuthErrorValue, authSessionActiveKey, false)\n\t\tcxWithUserAuthFalse := context.WithValue(cxWithSessionActive, authUserAuthenticatedKey, false)\n\n\t\trWithUserAuthFalse := r.WithContext(cxWithUserAuthFalse)\n\t\tau.handler.ServeHTTP(w, rWithUserAuthFalse)\n\t\treturn\n\t}\n\n\t//create a new request context containing the authenticated user\n\tcxWithSessionActive := context.WithValue(r.Context(), authSessionActiveKey, true)\n\tcxWithSessionState := context.WithValue(cxWithSessionActive, authSessionStateKey, sesSt)\n\n\tcxWithKey := context.WithValue(cxWithSessionState, authUserAuthenticatedKey, sesSt.Authenticated)\n\n\t//create a new request using that new context\n\trWithSession := r.WithContext(cxWithKey)\n\n\t//call the real handler, passing the new request\n\tau.handler.ServeHTTP(w, rWithSession)\n}", "func (c *HTTPClient) request(req *http.Request) (*http.Response, error) {\n\t// Adds headers used on ALL requests\n\treq.Header.Add(\"User-Agent\", c.UserAgent)\n\n\t// Executes the request\n\tres, err := c.Client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If the server is in IUAM mode, solve the challenge and retry\n\tif res.StatusCode == 503 && res.Header.Get(\"Server\") == \"cloudflare-nginx\" {\n\t\tdefer res.Body.Close()\n\t\tvar rb []byte\n\t\trb, err = ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn c.bypassCF(req, rb)\n\t}\n\treturn res, err\n}", "func (mc *MockClient) SendRequest(Method, Url string, bdy io.Reader, hdr http.Header, cookies []*http.Cookie) (*http.Response, error) {\n\tresponse, ok := mc.getResponse(Method, Url)\n\tif !ok {\n\t\t//return nil, errors.New(\"No Mock data for given Method+Url\")\n\t\treturn nil, fmt.Errorf(\"No Mock data for %s\", Method+Url)\n\t}\n\treturn &response, nil\n}", "func (r *AppsResetAuthorizationReq) HTTPRequest(ctx context.Context, opt ...RequestOption) (*http.Request, error) {\n\treturn buildHTTPRequest(ctx, r, opt)\n}", "func (rs *RequestSender) Send() (*http.Response, error) {\n\treturn rs.send(rs.req())\n}", "func (ea *EnsureAuth) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tpermitted, _ := Security.HasPermission(r,ea.permission)\n\tif permitted {\n\t\tea.handler.ServeHTTP(w, r)\n\t} else {\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\tfmt.Fprint(w, \"Access denied\")\n\t}\n}", "func (m *atmAuthenticatorMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\ttokenString := r.Header.Get(\"token\")\n\ttoken, err := uuid.Parse(tokenString)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tatmErr := m.atmClient.IsAuthenticated(r.Context(), token)\n\tif atmErr != nil {\n\t\tif atmErr.IsAuthenticated() {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn\n\t}\n\tm.next.ServeHTTP(w, r)\n}", "func LogHTTPRequest(next http.Handler) http.Handler {\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlogData := &HTTPLogInfo{\n\t\t\tMethod: r.Method,\n\t\t\tProto: r.Proto,\n\t\t\tURI: r.URL.String(),\n\t\t\tHost: r.Host,\n\t\t\tUserAgent: r.UserAgent(),\n\t\t}\n\t\tlogData.IPAddr = getRemoteAddress(r)\n\n\t\t_, err := r.Cookie(\"cookie-name\")\n\t\tif err == nil {\n\t\t\tlogData.UserID = getUserIDFromSession(w, r)\n\t\t}\n\n\t\thttpResponseData := httpsnoop.CaptureMetrics(next, w, r)\n\n\t\tlogData.StatusCode = httpResponseData.Code\n\t\tlogData.ContentLength = httpResponseData.Written\n\t\tlogData.Date = time.Now().UTC().Format(layout)\n\t\tlogData.Duration = httpResponseData.Duration\n\n\t\tutils.LogHTTP.Println(getFormattedLog(logData))\n\t})\n}", "func (r RevocationEndpoint) DoHTTPRequest(method string, req, resp interface{}) error {\n\treturn doHTTPRequest(method, r.endpoint, req, resp)\n}", "func (ea *EnsureAuth) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif !IsUserAuthenticated(r) {\n\t\tretErr := &Error{\n\t\t\tClientError: true,\n\t\t\tServerError: false,\n\t\t\tMessage: errUnauthorized.Error(),\n\t\t\tContext: fmt.Sprintf(\"method=%s path=%s\", r.Method, r.URL.Path),\n\t\t\tCode: 0,\n\t\t}\n\t\twwwHeaderValue := WWWAuthenticateBearerRealm\n\t\twwwErrVal := getSessionErrorKeyValueFromContext(r)\n\t\tif IsAuthError(r) && len(wwwErrVal) > 0 {\n\t\t\twwwHeaderValue += \",\\n\" + wwwErrVal\n\t\t}\n\t\tw.Header().Add(HeaderWWWAuthenticate, wwwHeaderValue)\n\t\tea.cx.handleErrorJson(w, r, nil, \"request to access authenticated resource, but user is not authenticated\",\n\t\t\tretErr, http.StatusUnauthorized)\n\t\treturn\n\t}\n\tea.handler.ServeHTTP(w, r)\n}", "func (p *authPipe) SendAuthRequest(authType int32, data []byte) error {\n\tc := p.c\n\tc.msgBuilder.initMsg(pgwirebase.ServerMsgAuth)\n\tc.msgBuilder.putInt32(authType)\n\tc.msgBuilder.write(data)\n\treturn c.msgBuilder.finishMsg(c.conn)\n}", "func (s *Surf) httpRequest(param *DownloadParam) (resp *http.Response, err error) {\n\treq, err := http.NewRequest(param.GetMethod(), param.GetUrl().String(), param.GetBody())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header = param.GetHeader()\n\n\tif param.GetTryTimes() <= 0 {\n\t\tfor {\n\t\t\tresp, err = param.GetClient().Do(req)\n\t\t\tif err != nil {\n\t\t\t\tif !param.IsEnableCookie() {\n\t\t\t\t\treq.Header.Set(\"User-Agent\", FKUserAgent.GlobalUserAgent.CreateRandomWebBrowserUA())\n\t\t\t\t}\n\t\t\t\ttime.Sleep(param.GetRetryPause())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t} else {\n\t\tfor i := 0; i < param.GetTryTimes(); i++ {\n\t\t\tresp, err = param.GetClient().Do(req)\n\t\t\tif err != nil {\n\t\t\t\tif !param.IsEnableCookie() {\n\t\t\t\t\treq.Header.Set(\"User-Agent\", FKUserAgent.GlobalUserAgent.CreateRandomWebBrowserUA())\n\t\t\t\t}\n\t\t\t\ttime.Sleep(param.GetRetryPause())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn resp, err\n}", "func (c *Client) SendWithAuth(req *http.Request, v interface{}) error {\n\tif (c.Token == nil) || (c.Token.ExpiresAt.Before(time.Now())) {\n\t\tresp, err := c.GetAccessToken()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tc.Token = resp\n\t}\n\treq.Header.Set(\"Authorization\", \"Bearer \"+c.Token.Token)\n\n\treturn c.Send(req, v)\n}", "func doNothing(error, http.ResponseWriter, *http.Request) {}", "func sendRequest(config Configuration, payload []byte, url string) {\n\tfmt.Println(\"Sending request to remote...\")\n\tclient := &http.Client{}\n\trequest, err := http.NewRequest(\"POST\", url, strings.NewReader(string(payload)))\n\trequest.SetBasicAuth(config.Login, config.Password)\n\tresp, err := client.Do(request)\n\n\tif err != nil {\n\t\tfmt.Println(\"Request connection error!\")\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Println(\"Request sent. Status:\", resp.Status)\n}", "func (r DisableUserRequest) Send(ctx context.Context) (*DisableUserResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &DisableUserResponse{\n\t\tDisableUserOutput: r.Request.Data.(*DisableUserOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (r *Responder) Forbidden() { r.write(http.StatusForbidden) }", "func setAuthorization(req *http.Request, apiKey string) {\n\treq.SetBasicAuth(apiKey, \"\")\n}", "func (p *OAuthProxy) AuthenticateOnly(rw http.ResponseWriter, req *http.Request) {\n\tlogger := log.NewLogEntry()\n\terr := p.Authenticate(rw, req)\n\tif err != nil {\n\t\tp.StatsdClient.Incr(\"application_error\", []string{\"action:auth\", \"error:unauthorized_request\"}, 1.0)\n\t\tlogger.Error(err, \"error authenticating\")\n\t\thttp.Error(rw, \"unauthorized request\", http.StatusUnauthorized)\n\t}\n\trw.WriteHeader(http.StatusAccepted)\n}", "func (h *Input) ServeHTTP(res http.ResponseWriter, req *http.Request) {\n\thandler := h.serveWrite\n\n\th.authenticateIfSet(handler, res, req)\n}", "func (t NtlmTransport) RoundTrip(req *http.Request) (res *http.Response, err error) {\n\t// first send NTLM Negotiate header\n\tr, _ := http.NewRequest(\"GET\", req.URL.String(), strings.NewReader(\"\"))\n\tr.Header.Add(\"Authorization\", \"NTLM \"+encBase64(negotiateSP()))\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: t.Insecure},\n\t}\n\tclient := http.Client{Transport: tr, Timeout: time.Minute}\n\tresp, err := client.Do(r)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err == nil && resp.StatusCode == http.StatusUnauthorized {\n\n\t\t// it's necessary to reuse the same http connection\n\t\t// in order to do that it's required to read Body and close it\n\t\t_, err = io.Copy(ioutil.Discard, resp.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\terr = resp.Body.Close()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// retrieve Www-Authenticate header from response\n\n\t\tntlmChallengeHeader := resp.Header.Get(\"WWW-Authenticate\")\n\t\tif ntlmChallengeHeader == \"\" {\n\t\t\treturn nil, errors.New(\"Wrong WWW-Authenticate header\")\n\t\t}\n\n\t\tntlmChallengeString := strings.Replace(ntlmChallengeHeader, \"NTLM \", \"\", -1)\n\t\tchallengeBytes, err := decBase64(ntlmChallengeString)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tsession, err := ntlm.CreateClientSession(ntlm.Version1, ntlm.ConnectionlessMode)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tsession.SetUserInfo(t.User, t.Password, t.Domain)\n\n\t\t// parse NTLM challenge\n\t\tchallenge, err := ntlm.ParseChallengeMessage(challengeBytes)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = session.ProcessChallengeMessage(challenge)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// authenticate user\n\t\tauthenticate, err := session.GenerateAuthenticateMessage()\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// set NTLM Authorization header\n\t\treq.Header.Set(\"Authorization\", \"NTLM \"+encBase64(authenticate.Bytes()))\n\t\tresp, err = client.Do(req)\n\n\t}\n\n\treturn resp, err\n}", "func (j *Jenkins) SendPureRequest(path string, httpParameters *devops.HttpParameters) ([]byte, error) {\n\tresBody, _, err := j.SendPureRequestWithHeaderResp(path, httpParameters)\n\n\treturn resBody, err\n}", "func SendRequest(endpoint string, data []byte) (*http.Response, error) {\n\n\ttr := &http.Transport{\n\t\tMaxIdleConns: 10,\n\t\tIdleConnTimeout: 30 * time.Second,\n\t\tDisableCompression: true,\n\t}\n\n\tvar resp *http.Response\n\tvar err error\n\n\tclient := &http.Client{Transport: tr}\n\n\tresp, err = client.Post(endpoint, \"application/json\", bytes.NewBuffer(data))\n\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\treturn resp, nil\n}", "func (h *HTTPGuard) Allowed(req *http.Request) error {\n\tif !h.NetworkEnabled {\n\t\treturn ErrNtwkDisabled\n\t}\n\treturn nil\n}", "func (c *Component) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tlisteners := AuthListeners()\n\n\tif logout, err := url.Parse(com.GetString(\"logout_url\")); err == nil {\n\t\tif r.URL.Path == logout.Path {\n\t\t\tif returnTo := r.URL.Query().Get(\"return\"); returnTo != \"\" {\n\t\t\t\thttp.Redirect(w, r, returnTo, http.StatusFound)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, listener := range listeners {\n\t\t\t\tif err := listener.WebAuthLogout(w, r); err != nil {\n\t\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tq := url.Values{}\n\t\t\tq.Set(\"return\", r.Referer())\n\t\t\treturnURL := &url.URL{\n\t\t\t\tScheme: logout.Scheme,\n\t\t\t\tHost: logout.Host,\n\t\t\t\tPath: logout.Path,\n\t\t\t\tRawQuery: q.Encode(),\n\t\t\t}\n\t\t\thttp.Redirect(w, r, auth0.DefaultClient().LogoutURL(returnURL.String()), http.StatusFound)\n\t\t\treturn\n\t\t}\n\t}\n\n\ttoken, err := auth0.DefaultClient().NewToken(r.URL.Query().Get(\"code\"))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tfor _, listener := range listeners {\n\t\tif err := listener.WebAuthLogin(w, r, token); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\tstate := r.URL.Query().Get(\"state\")\n\tif state != \"\" {\n\t\thttp.Redirect(w, r, state, http.StatusFound)\n\t\treturn\n\t}\n\thttp.Redirect(w, r, r.Referer(), http.StatusFound)\n}", "func TestMakeRequestThatGetsDenied(t *testing.T) {\n\tproxier := New()\n\tfor {\n\t\tresp, err := proxier.DoRequestRaw(context.Background(), \"GET\", gimmeproxy.GimmeProxyURL, nil)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error %s: \\n\", err.Error())\n\t\t\tbreak\n\t\t}\n\t\tfmt.Printf(\"Response code: %d\\n\", resp.StatusCode)\n\t}\n\n}", "func (a *Authenticator) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t// Check the username and password against the directory.\n\tuid, pwd := r.FormValue(\"uid\"), r.FormValue(\"pwd\")\n\tp, err := a.dir.CheckCredentials(uid, pwd)\n\tif err != nil {\n\t\thttp.Error(w, \"Incorrect credentials.\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\tts, err := a.generateJwt(p)\n\tif err != nil {\n\t\thttp.Error(w, \"Incorrect credentials.\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\tw.Header().Add(\"Content-Type\", \"text/plain\")\n\tw.Write(ts)\n}", "func DoRequest(configuration Configuration, url string, method string) *http.Response {\n\treq, err := http.NewRequest(method, url, nil)\n\n\treq.SetBasicAuth(configuration.Username, configuration.Password)\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn resp\n}", "func jsonAuthFail(w http.ResponseWriter) {\n\tw.Header().Add(\"WWW-Authenticate\", `Basic realm=\"btcwallet RPC\"`)\n\thttp.Error(w, \"401 Unauthorized.\", http.StatusUnauthorized)\n}", "func (h *Handler) requestDenied(w http.ResponseWriter, r *http.Request, m string) {\n\tlog.Println(errorRoot, m)\n\n\tlog.Printf(\"ORIGIN: %v\\n\", r.Header.Get(originHeader))\n\tlog.Printf(\"METHOD: %v\\n\", r.Method)\n\n\theaders := r.Header.Get(requestHeadersHeader)\n\tlog.Printf(\"HEADERS: %v\\n\\n\", headers)\n\tfor _, h := range strings.Split(headers, \",\") {\n\t\th = http.CanonicalHeaderKey(h)\n\t\tlog.Printf(\"%v: %v\\n\", h, r.Header.Get(h))\n\t}\n\n\tw.WriteHeader(http.StatusForbidden)\n\treturn\n}", "func unauthenticated(fn http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif s, _ := Session.Get(r, \"s\"); s != nil && s.Values[\"Id\"] != nil {\n\t\t\thttp.Redirect(w, r, \"/main/\", 302)\n\t\t\treturn\n\t\t}\n\t\tfn(w, r)\n\t}\n}", "func (server *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tif req.Method != \"CONNECT\" {\n\t\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\t// WriteBody\n\t\t_, _ = io.WriteString(w, \"405 must CONNECT\\n\")\n\t\treturn\n\t}\n\tconn, _, err := w.(http.Hijacker).Hijack()\n\tif err != nil {\n\t\tlog.Print(\"rpc hijacking \", req.RemoteAddr, \" got error: \", err.Error())\n\t\treturn\n\t}\n\t// send HTTP/1.0 200 Connected to Gee RPC\n\t_, _ = io.WriteString(conn, \"HTTP/1.0 \"+connected+\"\\n\\n\")\n\tserver.ServeConn(conn)\n}" ]
[ "0.61772037", "0.60453176", "0.5982576", "0.5896887", "0.5788983", "0.57063794", "0.57053524", "0.56688964", "0.56660306", "0.5622297", "0.55792046", "0.5493512", "0.54784316", "0.54572445", "0.54503506", "0.5448253", "0.5438047", "0.5422755", "0.53780687", "0.5362405", "0.5359291", "0.5354064", "0.534609", "0.5331768", "0.5316769", "0.5314293", "0.52971697", "0.5279987", "0.5237633", "0.52209723", "0.5217114", "0.5216336", "0.52028155", "0.51998043", "0.51854444", "0.518453", "0.51523995", "0.5133021", "0.51164573", "0.5115907", "0.5115481", "0.51144624", "0.51074636", "0.5106947", "0.5103305", "0.50989044", "0.50833714", "0.50828147", "0.5061009", "0.5059614", "0.50476277", "0.5037789", "0.50274456", "0.5014676", "0.5009401", "0.5001024", "0.50001943", "0.49624938", "0.49553445", "0.49543548", "0.49425784", "0.49394193", "0.49347383", "0.49336788", "0.4915876", "0.49091646", "0.4905658", "0.49021497", "0.48983866", "0.48919708", "0.48907873", "0.4889457", "0.48827934", "0.48791566", "0.48788935", "0.4872296", "0.48710346", "0.4868268", "0.48678878", "0.48637483", "0.4862473", "0.4861292", "0.48350865", "0.48324645", "0.48306027", "0.4829502", "0.48292804", "0.4824105", "0.4821643", "0.4818705", "0.4818296", "0.4816103", "0.48160124", "0.4812699", "0.4810508", "0.48067108", "0.48004973", "0.47924843", "0.47895676", "0.4784758" ]
0.53575665
21
SendAuthenticatedHTTPRequest sends authenticated requests to the HUOBI API
func (h *HUOBI) SendAuthenticatedHTTPRequest(ctx context.Context, ep exchange.URL, method, endpoint string, values url.Values, data, result interface{}, isVersion2API bool) error { var err error creds, err := h.GetCredentials(ctx) if err != nil { return err } ePoint, err := h.API.Endpoints.GetURL(ep) if err != nil { return err } if values == nil { values = url.Values{} } interim := json.RawMessage{} newRequest := func() (*request.Item, error) { values.Set("AccessKeyId", creds.Key) values.Set("SignatureMethod", "HmacSHA256") values.Set("SignatureVersion", "2") values.Set("Timestamp", time.Now().UTC().Format("2006-01-02T15:04:05")) if isVersion2API { endpoint = "/v" + huobiAPIVersion2 + endpoint } else { endpoint = "/v" + huobiAPIVersion + endpoint } payload := fmt.Sprintf("%s\napi.huobi.pro\n%s\n%s", method, endpoint, values.Encode()) headers := make(map[string]string) if method == http.MethodGet { headers["Content-Type"] = "application/x-www-form-urlencoded" } else { headers["Content-Type"] = "application/json" } var hmac []byte hmac, err = crypto.GetHMAC(crypto.HashSHA256, []byte(payload), []byte(creds.Secret)) if err != nil { return nil, err } values.Set("Signature", crypto.Base64Encode(hmac)) urlPath := ePoint + common.EncodeURLValues(endpoint, values) var body []byte if data != nil { body, err = json.Marshal(data) if err != nil { return nil, err } } return &request.Item{ Method: method, Path: urlPath, Headers: headers, Body: bytes.NewReader(body), Result: &interim, Verbose: h.Verbose, HTTPDebugging: h.HTTPDebugging, HTTPRecording: h.HTTPRecording, }, nil } err = h.SendPayload(ctx, request.Unset, newRequest, request.AuthenticatedRequest) if err != nil { return err } if isVersion2API { var errCap ResponseV2 if err = json.Unmarshal(interim, &errCap); err == nil { if errCap.Code != 200 && errCap.Message != "" { return fmt.Errorf("%w error code: %v error message: %s", request.ErrAuthRequestFailed, errCap.Code, errCap.Message) } } } else { var errCap Response if err = json.Unmarshal(interim, &errCap); err == nil { if errCap.Status == huobiStatusError && errCap.ErrorMessage != "" { return fmt.Errorf("%w error code: %v error message: %s", request.ErrAuthRequestFailed, errCap.ErrorCode, errCap.ErrorMessage) } } } err = json.Unmarshal(interim, result) if err != nil { return common.AppendError(err, request.ErrAuthRequestFailed) } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (h *HUOBIHADAX) SendAuthenticatedHTTPRequest(method, endpoint string, values url.Values, result interface{}) error {\n\tif !h.AuthenticatedAPISupport {\n\t\treturn fmt.Errorf(exchange.WarningAuthenticatedRequestWithoutCredentialsSet, h.Name)\n\t}\n\n\tvalues.Set(\"AccessKeyId\", h.APIKey)\n\tvalues.Set(\"SignatureMethod\", \"HmacSHA256\")\n\tvalues.Set(\"SignatureVersion\", \"2\")\n\tvalues.Set(\"Timestamp\", time.Now().UTC().Format(\"2006-01-02T15:04:05\"))\n\n\tendpoint = fmt.Sprintf(\"/v%s/%s\", huobihadaxAPIVersion, endpoint)\n\tpayload := fmt.Sprintf(\"%s\\napi.hadax.com\\n%s\\n%s\",\n\t\tmethod, endpoint, values.Encode())\n\n\theaders := make(map[string]string)\n\theaders[\"Content-Type\"] = \"application/x-www-form-urlencoded\"\n\n\thmac := common.GetHMAC(common.HashSHA256, []byte(payload), []byte(h.APISecret))\n\tvalues.Set(\"Signature\", common.Base64Encode(hmac))\n\n\turlPath := common.EncodeURLValues(fmt.Sprintf(\"%s%s\", h.APIUrl, endpoint),\n\t\tvalues)\n\treturn h.SendPayload(method, urlPath, headers, bytes.NewBufferString(\"\"), result, true, false, h.Verbose, h.HTTPDebugging)\n}", "func (h *HitBTC) SendAuthenticatedHTTPRequest(ctx context.Context, ep exchange.URL, method, endpoint string, values url.Values, f request.EndpointLimit, result interface{}) error {\n\tcreds, err := h.GetCredentials(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tePoint, err := h.API.Endpoints.GetURL(ep)\n\tif err != nil {\n\t\treturn err\n\t}\n\theaders := make(map[string]string)\n\theaders[\"Authorization\"] = \"Basic \" + crypto.Base64Encode([]byte(creds.Key+\":\"+creds.Secret))\n\n\tpath := fmt.Sprintf(\"%s/%s\", ePoint, endpoint)\n\n\titem := &request.Item{\n\t\tMethod: method,\n\t\tPath: path,\n\t\tHeaders: headers,\n\t\tResult: result,\n\t\tVerbose: h.Verbose,\n\t\tHTTPDebugging: h.HTTPDebugging,\n\t\tHTTPRecording: h.HTTPRecording,\n\t}\n\n\treturn h.SendPayload(ctx, f, func() (*request.Item, error) {\n\t\titem.Body = bytes.NewBufferString(values.Encode())\n\t\treturn item, nil\n\t}, request.AuthenticatedRequest)\n}", "func (p *Poloniex) SendAuthenticatedHTTPRequest(ctx context.Context, ep exchange.URL, method, endpoint string, values url.Values, result interface{}) error {\n\tcreds, err := p.GetCredentials(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tePoint, err := p.API.Endpoints.GetURL(ep)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn p.SendPayload(ctx, request.Unset, func() (*request.Item, error) {\n\t\theaders := make(map[string]string)\n\t\theaders[\"Content-Type\"] = \"application/x-www-form-urlencoded\"\n\t\theaders[\"Key\"] = creds.Key\n\t\tvalues.Set(\"nonce\", p.Requester.GetNonce(true).String())\n\t\tvalues.Set(\"command\", endpoint)\n\n\t\thmac, err := crypto.GetHMAC(crypto.HashSHA512,\n\t\t\t[]byte(values.Encode()),\n\t\t\t[]byte(creds.Secret))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\theaders[\"Sign\"] = crypto.HexEncodeToString(hmac)\n\n\t\tpath := fmt.Sprintf(\"%s/%s\", ePoint, poloniexAPITradingEndpoint)\n\t\treturn &request.Item{\n\t\t\tMethod: method,\n\t\t\tPath: path,\n\t\t\tHeaders: headers,\n\t\t\tBody: bytes.NewBufferString(values.Encode()),\n\t\t\tResult: result,\n\t\t\tNonceEnabled: true,\n\t\t\tVerbose: p.Verbose,\n\t\t\tHTTPDebugging: p.HTTPDebugging,\n\t\t\tHTTPRecording: p.HTTPRecording,\n\t\t}, nil\n\t}, request.AuthenticatedRequest)\n}", "func (c *Coinbene) SendAuthHTTPRequest(ep exchange.URL, method, path, epPath string, isSwap bool,\n\tparams, result interface{}, f request.EndpointLimit) error {\n\tif !c.AllowAuthenticatedRequest() {\n\t\treturn fmt.Errorf(\"%s %w\", c.Name, exchange.ErrAuthenticatedRequestWithoutCredentialsSet)\n\t}\n\tendpoint, err := c.API.Endpoints.GetURL(ep)\n\tif err != nil {\n\t\treturn err\n\t}\n\tauthPath := coinbeneAuthPath\n\tif isSwap {\n\t\tauthPath = coinbeneSwapAuthPath\n\t}\n\tnow := time.Now()\n\ttimestamp := now.UTC().Format(\"2006-01-02T15:04:05.999Z\")\n\tvar finalBody io.Reader\n\tvar preSign string\n\tswitch {\n\tcase params != nil && method == http.MethodGet:\n\t\tp, ok := params.(url.Values)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"params is not of type url.Values\")\n\t\t}\n\t\tpreSign = timestamp + method + authPath + epPath + \"?\" + p.Encode()\n\t\tpath = common.EncodeURLValues(path, p)\n\tcase params != nil:\n\t\tvar i interface{}\n\t\tswitch p := params.(type) {\n\t\tcase url.Values:\n\t\t\tm := make(map[string]string)\n\t\t\tfor k, v := range p {\n\t\t\t\tm[k] = strings.Join(v, \"\")\n\t\t\t}\n\t\t\ti = m\n\t\tdefault:\n\t\t\ti = p\n\t\t}\n\t\ttempBody, err := json.Marshal(i)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfinalBody = bytes.NewBufferString(string(tempBody))\n\t\tpreSign = timestamp + method + authPath + epPath + string(tempBody)\n\tdefault:\n\t\tpreSign = timestamp + method + authPath + epPath\n\t}\n\ttempSign := crypto.GetHMAC(crypto.HashSHA256,\n\t\t[]byte(preSign),\n\t\t[]byte(c.API.Credentials.Secret))\n\theaders := make(map[string]string)\n\theaders[\"Content-Type\"] = \"application/json\"\n\theaders[\"ACCESS-KEY\"] = c.API.Credentials.Key\n\theaders[\"ACCESS-SIGN\"] = crypto.HexEncodeToString(tempSign)\n\theaders[\"ACCESS-TIMESTAMP\"] = timestamp\n\n\tvar resp json.RawMessage\n\terrCap := struct {\n\t\tCode int `json:\"code\"`\n\t\tMessage string `json:\"message\"`\n\t}{}\n\n\t// Expiry of timestamp doesn't appear to be documented, so making a reasonable assumption\n\tctx, cancel := context.WithDeadline(context.Background(), now.Add(15*time.Second))\n\tdefer cancel()\n\tif err := c.SendPayload(ctx, &request.Item{\n\t\tMethod: method,\n\t\tPath: endpoint + path,\n\t\tHeaders: headers,\n\t\tBody: finalBody,\n\t\tResult: &resp,\n\t\tAuthRequest: true,\n\t\tVerbose: c.Verbose,\n\t\tHTTPDebugging: c.HTTPDebugging,\n\t\tHTTPRecording: c.HTTPRecording,\n\t\tEndpoint: f,\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tif err := json.Unmarshal(resp, &errCap); err == nil {\n\t\tif errCap.Code != 200 && errCap.Message != \"\" {\n\t\t\treturn errors.New(errCap.Message)\n\t\t}\n\t}\n\treturn json.Unmarshal(resp, result)\n}", "func (h *HUOBIHADAX) SendAuthenticatedHTTPPostRequest(method, endpoint, postBodyValues string, result interface{}) error {\n\tif !h.AuthenticatedAPISupport {\n\t\treturn fmt.Errorf(exchange.WarningAuthenticatedRequestWithoutCredentialsSet, h.Name)\n\t}\n\n\tsignatureParams := url.Values{}\n\tsignatureParams.Set(\"AccessKeyId\", h.APIKey)\n\tsignatureParams.Set(\"SignatureMethod\", \"HmacSHA256\")\n\tsignatureParams.Set(\"SignatureVersion\", \"2\")\n\tsignatureParams.Set(\"Timestamp\", time.Now().UTC().Format(\"2006-01-02T15:04:05\"))\n\n\tendpoint = fmt.Sprintf(\"/v%s/%s\", huobihadaxAPIVersion, endpoint)\n\tpayload := fmt.Sprintf(\"%s\\napi.hadax.com\\n%s\\n%s\",\n\t\tmethod, endpoint, signatureParams.Encode())\n\n\theaders := make(map[string]string)\n\theaders[\"Content-Type\"] = \"application/json\"\n\theaders[\"Accept-Language\"] = \"zh-cn\"\n\n\thmac := common.GetHMAC(common.HashSHA256, []byte(payload), []byte(h.APISecret))\n\tsignatureParams.Set(\"Signature\", common.Base64Encode(hmac))\n\turlPath := common.EncodeURLValues(fmt.Sprintf(\"%s%s\", h.APIUrl, endpoint),\n\t\tsignatureParams)\n\treturn h.SendPayload(method, urlPath, headers, bytes.NewBufferString(postBodyValues), result, true, false, h.Verbose, h.HTTPDebugging)\n}", "func (by *Bybit) SendUSDCAuthHTTPRequest(ctx context.Context, ePath exchange.URL, method, path string, data interface{}, result UnmarshalTo, f request.EndpointLimit) error {\n\tcreds, err := by.GetCredentials(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif result == nil {\n\t\tresult = &USDCError{}\n\t}\n\n\tendpointPath, err := by.API.Endpoints.GetURL(ePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = by.SendPayload(ctx, f, func() (*request.Item, error) {\n\t\tnowTimeInMilli := strconv.FormatInt(time.Now().UnixMilli(), 10)\n\t\theaders := make(map[string]string)\n\t\tvar payload, hmacSigned []byte\n\n\t\tif data != nil {\n\t\t\td, ok := data.(map[string]interface{})\n\t\t\tif !ok {\n\t\t\t\treturn nil, common.GetTypeAssertError(\"map[string]interface{}\", data)\n\t\t\t}\n\t\t\tpayload, err = json.Marshal(d)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tsignInput := nowTimeInMilli + creds.Key + defaultRecvWindow + string(payload)\n\t\thmacSigned, err = crypto.GetHMAC(crypto.HashSHA256, []byte(signInput), []byte(creds.Secret))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\theaders[\"Content-Type\"] = \"application/json\"\n\t\theaders[\"X-BAPI-API-KEY\"] = creds.Key\n\t\theaders[\"X-BAPI-SIGN\"] = crypto.HexEncodeToString(hmacSigned)\n\t\theaders[\"X-BAPI-SIGN-TYPE\"] = \"2\"\n\t\theaders[\"X-BAPI-TIMESTAMP\"] = nowTimeInMilli\n\t\theaders[\"X-BAPI-RECV-WINDOW\"] = defaultRecvWindow\n\n\t\treturn &request.Item{\n\t\t\tMethod: method,\n\t\t\tPath: endpointPath + path,\n\t\t\tHeaders: headers,\n\t\t\tBody: bytes.NewBuffer(payload),\n\t\t\tResult: &result,\n\t\t\tVerbose: by.Verbose,\n\t\t\tHTTPDebugging: by.HTTPDebugging,\n\t\t\tHTTPRecording: by.HTTPRecording}, nil\n\t}, request.AuthenticatedRequest)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn result.GetError(true)\n}", "func SendHTTPRequest(method, url, token string, body []byte) ([]byte, *http.Response, error) {\n\treq, err := http.NewRequest(method, url, bytes.NewBuffer(body))\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"Bearer %s\", token))\n\n\tclient := &http.Client{}\n\n\tresponse, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer response.Body.Close()\n\n\tbody, readErr := ioutil.ReadAll(response.Body)\n\tif readErr != nil {\n\t\tlog.Fatal(readErr)\n\t}\n\n\treturn body, response, nil\n}", "func (h *HUOBIHADAX) SendHTTPRequest(path string, result interface{}) error {\n\treturn h.SendPayload(http.MethodGet, path, nil, nil, result, false, false, h.Verbose, h.HTTPDebugging)\n}", "func (h *HUOBI) SendHTTPRequest(ctx context.Context, ep exchange.URL, path string, result interface{}) error {\n\tendpoint, err := h.API.Endpoints.GetURL(ep)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar tempResp json.RawMessage\n\n\titem := &request.Item{\n\t\tMethod: http.MethodGet,\n\t\tPath: endpoint + path,\n\t\tResult: &tempResp,\n\t\tVerbose: h.Verbose,\n\t\tHTTPDebugging: h.HTTPDebugging,\n\t\tHTTPRecording: h.HTTPRecording,\n\t}\n\n\terr = h.SendPayload(ctx, request.Unset, func() (*request.Item, error) {\n\t\treturn item, nil\n\t}, request.UnauthenticatedRequest)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar errCap errorCapture\n\tif err := json.Unmarshal(tempResp, &errCap); err == nil {\n\t\tif errCap.ErrMsgType1 != \"\" {\n\t\t\treturn fmt.Errorf(\"error code: %v error message: %s\", errCap.CodeType1,\n\t\t\t\terrors.New(errCap.ErrMsgType1))\n\t\t}\n\t\tif errCap.ErrMsgType2 != \"\" {\n\t\t\treturn fmt.Errorf(\"error code: %v error message: %s\", errCap.CodeType2,\n\t\t\t\terrors.New(errCap.ErrMsgType2))\n\t\t}\n\t}\n\treturn json.Unmarshal(tempResp, result)\n}", "func (s *Server) Authenticate(wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\ts.Session = grequests.NewSession(nil)\n\tresp, err := s.Session.Get(s.URL+loginURL, nil)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tlog.Fatalln(\"Failed to get csrf: \" + s.Name)\n\t}\n\n\tdoc, err := goquery.NewDocumentFromReader(strings.NewReader((resp.String())))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tlog.Fatalln(\"Failed to parse html: \" + s.Name)\n\t}\n\n\tcsrfToken := \"\"\n\tdoc.Find(\"meta\").Each(func(i int, s *goquery.Selection) {\n\t\tif name, _ := s.Attr(\"name\"); name == \"csrf-token\" {\n\t\t\tcsrfToken, _ = s.Attr(\"content\")\n\t\t\treturn\n\t\t}\n\t})\n\n\tresp, err = s.Session.Get(s.URL+verifyURL, nil)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tlog.Fatalln(\"Failed to verify: \" + s.Name)\n\t}\n\n\ts.Headers = &map[string]string{\n\t\t\"csrf-token\": csrfToken,\n\t\t\"Connection\": \"keep-alive\",\n\t\t\"Content-Type\": \"application/json\",\n\t\t\"Accept\": \"application/x-www-form-urlencoded; charset=utf-8\",\n\t}\n\n\tresp, err = s.Session.Post(s.URL+passwordURL, &grequests.RequestOptions{\n\t\tJSON: map[string]string{\n\t\t\t\"username\": s.Username,\n\t\t\t\"password\": s.Password,\n\t\t},\n\t\t// Cookies: cookies.Cookies,\n\t\tHeaders: *s.Headers,\n\t})\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tlog.Fatalln(\"Failed to login: \" + s.Name)\n\t}\n\n\t// pretty.Println(resp.String())\n\tif !s.IsThirdParty {\n\t\t// log.Println(\"TOTP logic here...\")\n\t\ttotp := gotp.NewDefaultTOTP(s.Seed)\n\t\t// log.Println(totp.Now())\n\t\tresp, err = s.Session.Post(s.URL+challengeURL, &grequests.RequestOptions{\n\t\t\tJSON: map[string]string{\n\t\t\t\t\"username\": s.Username,\n\t\t\t\t\"password\": s.Password,\n\t\t\t\t\"challenge\": totp.Now(),\n\t\t\t},\n\t\t\t// Cookies: cookies.Cookies,\n\t\t\tHeaders: *s.Headers,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tlog.Fatalln(\"Failed to login: \" + s.Name)\n\t\t}\n\t\t// pretty.Println(resp.String())\n\t}\n}", "func handleGetRequest(rw rest.ResponseWriter, req *rest.Request) {\n\trw.WriteJson(map[string]string{\"body\": \"use POST https://localhost:433/sync, include authentication\"})\n}", "func (a APIKeyAuthentication) Authenticate(req *http.Request, endpoint string, params []byte) error {\n\ttimestamp := fmt.Sprintf(\"%v\", time.Now().UTC().Unix())\n\tmessage := fmt.Sprintf(\"%v%s/%s\", timestamp, req.Method, endpoint)\n\tif params != nil {\n\t\tmessage += string(params)\n\t}\n\th := hmac.New(sha256.New, []byte(a.Secret))\n\th.Write([]byte(message))\n\n\tsignature := hex.EncodeToString(h.Sum(nil))\n\n\treq.Header.Set(\"CB-ACCESS-KEY\", a.Key)\n\treq.Header.Set(\"CB-ACCESS-SIGN\", signature)\n\treq.Header.Set(\"CB-ACCESS-TIMESTAMP\", timestamp)\n\treq.Header.Set(\"CB-VERSION\", APIVersion)\n\n\treturn nil\n}", "func (u *UserInfo) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\n\tauthorization := \"no\"\n\n\tfor header, value := range req.Header {\n\t\tif header == \"Authorization\" {\n\t\t\tauthorization = value[0]\n\t\t}\n\t}\n\n\tif authorization == \"no\" {\n\t\tfmt.Fprintln(rw, \"error_description:The request could not be authorized\")\n\t\treturn\n\t}\n\tkv := strings.Split(authorization, \" \")\n\tif len(kv) != 2 || kv[0] != \"Bearer\" {\n\t\tfmt.Fprintln(rw, \"error_description:The request could not be authorized\")\n\t\treturn\n\t}\n\n\tclaims := get(u.userinfoURL, authorization)\n\tif claims == \"error\" {\n\t\treturn\n\t}\n\n\tm := make(map[string]string)\n\terr := json.Unmarshal([]byte(claims), &m)\n\tif err != nil {\n\t\tfmt.Fprintln(rw, \"eeeerror_description:The request could not be authorized\")\n\t\treturn\n\n\t}\n\n\tfor k, v := range m {\n\t\tif k == \"sub\" {\n\t\t\treq.Header.Set(\"gridname\", v)\n\t\t}\n\n\t}\n\tu.next.ServeHTTP(rw, req)\n\n}", "func (p *Poloniex) SendHTTPRequest(ctx context.Context, ep exchange.URL, path string, result interface{}) error {\n\tendpoint, err := p.API.Endpoints.GetURL(ep)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\titem := &request.Item{\n\t\tMethod: http.MethodGet,\n\t\tPath: endpoint + path,\n\t\tResult: result,\n\t\tVerbose: p.Verbose,\n\t\tHTTPDebugging: p.HTTPDebugging,\n\t\tHTTPRecording: p.HTTPRecording,\n\t}\n\n\treturn p.SendPayload(ctx, request.Unset, func() (*request.Item, error) {\n\t\treturn item, nil\n\t}, request.UnauthenticatedRequest)\n}", "func (h *HitBTC) SendHTTPRequest(ctx context.Context, ep exchange.URL, path string, result interface{}) error {\n\tendpoint, err := h.API.Endpoints.GetURL(ep)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\titem := &request.Item{\n\t\tMethod: http.MethodGet,\n\t\tPath: endpoint + path,\n\t\tResult: result,\n\t\tVerbose: h.Verbose,\n\t\tHTTPDebugging: h.HTTPDebugging,\n\t\tHTTPRecording: h.HTTPRecording,\n\t}\n\n\treturn h.SendPayload(ctx, marketRequests, func() (*request.Item, error) {\n\t\treturn item, nil\n\t}, request.UnauthenticatedRequest)\n}", "func (ea *EnsureAuth) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif !IsUserAuthenticated(r) {\n\t\tretErr := &Error{\n\t\t\tClientError: true,\n\t\t\tServerError: false,\n\t\t\tMessage: errUnauthorized.Error(),\n\t\t\tContext: fmt.Sprintf(\"method=%s path=%s\", r.Method, r.URL.Path),\n\t\t\tCode: 0,\n\t\t}\n\t\twwwHeaderValue := WWWAuthenticateBearerRealm\n\t\twwwErrVal := getSessionErrorKeyValueFromContext(r)\n\t\tif IsAuthError(r) && len(wwwErrVal) > 0 {\n\t\t\twwwHeaderValue += \",\\n\" + wwwErrVal\n\t\t}\n\t\tw.Header().Add(HeaderWWWAuthenticate, wwwHeaderValue)\n\t\tea.cx.handleErrorJson(w, r, nil, \"request to access authenticated resource, but user is not authenticated\",\n\t\t\tretErr, http.StatusUnauthorized)\n\t\treturn\n\t}\n\tea.handler.ServeHTTP(w, r)\n}", "func (h AuthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tbytes, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\twriteError(w, http.StatusInternalServerError, \"could not read http body\")\n\t\treturn\n\t}\n\n\tvar authBody authRequestBody\n\n\terr = json.Unmarshal(bytes, &authBody)\n\tif err != nil {\n\t\twriteError(w, http.StatusUnprocessableEntity, \"invalid json\")\n\t\treturn\n\t}\n\n\tif authBody.Email == \"\" || authBody.Password == \"\" {\n\t\twriteError(w, http.StatusBadRequest, \"Empty email or password.\")\n\t\treturn\n\t}\n\n\tuser, err := h.UserRepo.FindUser(\"WHERE email = $1\", authBody.Email)\n\tif err != nil {\n\t\tif err == easyalert.ErrRecordDoesNotExist {\n\t\t\twriteError(w, http.StatusUnauthorized, \"Invalid credentials.\")\n\t\t\treturn\n\t\t}\n\n\t\twriteError(w, http.StatusInternalServerError, \"an unknown error occured\")\n\t\treturn\n\t}\n\n\tif !user.ValidPassword(authBody.Password) {\n\t\twriteError(w, http.StatusUnauthorized, \"Invalid credentials.\")\n\t\treturn\n\t}\n\n\tresponseBody := authResponseBody{user.Token}\n\n\tresponseBodyBytes, err := json.Marshal(responseBody)\n\tif err != nil {\n\t\twriteError(w, http.StatusInternalServerError, \"could not marshal response body\")\n\t\treturn\n\t}\n\n\tbody, err := prettifyJSON(string(responseBodyBytes))\n\tif err != nil {\n\t\twriteError(w, http.StatusInternalServerError, \"could not prettify json response\")\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(body))\n}", "func SendHTTPRequest(ctx context.Context, method, urlPath string, headers map[string]string, body io.Reader, verbose bool) ([]byte, error) {\n\tmethod = strings.ToUpper(method)\n\n\tif method != http.MethodOptions && method != http.MethodGet &&\n\t\tmethod != http.MethodHead && method != http.MethodPost &&\n\t\tmethod != http.MethodPut && method != http.MethodDelete &&\n\t\tmethod != http.MethodTrace && method != http.MethodConnect {\n\t\treturn nil, errors.New(\"invalid HTTP method specified\")\n\t}\n\n\treq, err := http.NewRequestWithContext(ctx, method, urlPath, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor k, v := range headers {\n\t\treq.Header.Add(k, v)\n\t}\n\n\tif verbose {\n\t\tlog.Debugf(log.Global, \"Request path: %s\", urlPath)\n\t\tfor k, d := range req.Header {\n\t\t\tlog.Debugf(log.Global, \"Request header [%s]: %s\", k, d)\n\t\t}\n\t\tlog.Debugf(log.Global, \"Request type: %s\", method)\n\t\tif body != nil {\n\t\t\tlog.Debugf(log.Global, \"Request body: %v\", body)\n\t\t}\n\t}\n\n\tm.RLock()\n\tif _HTTPUserAgent != \"\" && req.Header.Get(\"User-Agent\") == \"\" {\n\t\treq.Header.Add(\"User-Agent\", _HTTPUserAgent)\n\t}\n\n\tif _HTTPClient == nil {\n\t\tm.RUnlock()\n\t\tm.Lock()\n\t\t// Set *http.Client with default timeout if not populated.\n\t\t_HTTPClient = NewHTTPClientWithTimeout(defaultTimeout)\n\t\tm.Unlock()\n\t\tm.RLock()\n\t}\n\n\tresp, err := _HTTPClient.Do(req)\n\tm.RUnlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tcontents, err := io.ReadAll(resp.Body)\n\n\tif verbose {\n\t\tlog.Debugf(log.Global, \"HTTP status: %s, Code: %v\",\n\t\t\tresp.Status,\n\t\t\tresp.StatusCode)\n\t\tlog.Debugf(log.Global, \"Raw response: %s\", string(contents))\n\t}\n\n\treturn contents, err\n}", "func (au *Authenticator) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tsesSt, errGST := au.cx.getSessionStateFromRequest(r)\n\tif errGST != nil {\n\t\tvar authErrorReason string = \"\"\n\t\tvar wasError bool = false\n\t\tif errGST != session.ErrNoSessionId {\n\t\t\tif errGST == session.ErrInvalidScheme {\n\t\t\t\tauthErrorReason = WWWAuthenticateErrorInvalidRequest + \",\\n\" + \"error_description=\\\"Bearer scheme not provided\\\"\"\n\t\t\t\twasError = true\n\t\t\t} else if errGST == session.ErrInvalidSessionId {\n\t\t\t\tauthErrorReason = WWWAuthenticateErrorInvalidToken + \",\\n\" + \"error_description=\\\"token extracted not a valid session token\\\"\"\n\t\t\t\twasError = true\n\t\t\t}\n\t\t\tau.cx.logError(errGST, \"issue getting session from request\", \"\",\n\t\t\t\thttp.StatusInternalServerError)\n\t\t}\n\t\tcxWithAuthError := context.WithValue(r.Context(), authSessionErrorKey, wasError)\n\t\tcxWithAuthErrorValue := context.WithValue(cxWithAuthError, authSessionErrorValueKey, authErrorReason)\n\t\tcxWithSessionActive := context.WithValue(cxWithAuthErrorValue, authSessionActiveKey, false)\n\t\tcxWithUserAuthFalse := context.WithValue(cxWithSessionActive, authUserAuthenticatedKey, false)\n\n\t\trWithUserAuthFalse := r.WithContext(cxWithUserAuthFalse)\n\t\tau.handler.ServeHTTP(w, rWithUserAuthFalse)\n\t\treturn\n\t}\n\n\t//create a new request context containing the authenticated user\n\tcxWithSessionActive := context.WithValue(r.Context(), authSessionActiveKey, true)\n\tcxWithSessionState := context.WithValue(cxWithSessionActive, authSessionStateKey, sesSt)\n\n\tcxWithKey := context.WithValue(cxWithSessionState, authUserAuthenticatedKey, sesSt.Authenticated)\n\n\t//create a new request using that new context\n\trWithSession := r.WithContext(cxWithKey)\n\n\t//call the real handler, passing the new request\n\tau.handler.ServeHTTP(w, rWithSession)\n}", "func (d *Dao) doHTTPRequest(c context.Context, uri, ip string, params url.Values, res interface{}) (err error) {\n\tenc, err := d.sign(params)\n\tif err != nil {\n\t\terr = pkgerr.Wrapf(err, \"uri:%s,params:%v\", uri, params)\n\t\treturn\n\t}\n\tif enc != \"\" {\n\t\turi = uri + \"?\" + enc\n\t}\n\n\treq, err := xhttp.NewRequest(xhttp.MethodGet, uri, nil)\n\tif err != nil {\n\t\terr = pkgerr.Wrapf(err, \"method:%s,uri:%s\", xhttp.MethodGet, uri)\n\t\treturn\n\t}\n\treq.Header.Set(_userAgent, \"[email protected] \"+env.AppID)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn d.client.Do(c, req, res)\n}", "func PerformRequest(method string, r http.Handler, path string, auth bool) *httptest.ResponseRecorder {\n\treq, _ := http.NewRequest(method, path, nil)\n\tif auth {\n\t\treq.Header.Set(\"Authorization\", \"Basic \"+authString)\n\t}\n\tw := httptest.NewRecorder()\n\tr.ServeHTTP(w, req)\n\treturn w\n}", "func performHTTPRequest(req *http.Request, sess *UserSession) ([]byte, []string) {\n\treq.Header.Set(\"User-Agent\", \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36\")\n\treq.Header.Set(\"Accept\", \"application/json, text/javascript, */*; q=0.01\")\n\t// form token is bound to vid\n\treq.Header.Set(`Cookie`, `vid=`+sess.vid+`; identifier=`+sess.identifier+`; login-options={\"stay\":true,\"no_ip_check\":true,\"leave_others\":true}; prf_ls_uad=price.a.200.normal; rtif-legacy=1; login-options={\"stay\":true,\"no_ip_check\":true,\"leave_others\":true}`)\n\n\t/*\n\t // this is for debug proxying\n\t proxy, _ :=url.Parse(\"http://127.0.0.1:8080\")\n\t tr := &http.Transport{\n\t \tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t \tProxy: http.ProxyURL(proxy),\n\t }\n\t*/\n\n\ttr := &http.Transport{}\n\t// for avoiding infinite redirect loops\n\tclient := &http.Client{\n\t\tTransport: tr,\n\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tfmt.Println(\"[!] HTTP request failed to\" + req.URL.Host + req.URL.Path)\n\t\tpanic(err)\n\t}\n\tdefer resp.Body.Close()\n\n\trespBody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tfmt.Println(\"[!] HTTP request failed to\" + req.URL.Host + req.URL.Path)\n\t\tpanic(err)\n\t}\n\t// fmt.Println(string(resp.Header.Values(\"Set-Cookie\")[0]))\n\n\treturn respBody, resp.Header.Values(\"Set-Cookie\")\n}", "func (m *atmAuthenticatorMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\ttokenString := r.Header.Get(\"token\")\n\ttoken, err := uuid.Parse(tokenString)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tatmErr := m.atmClient.IsAuthenticated(r.Context(), token)\n\tif atmErr != nil {\n\t\tif atmErr.IsAuthenticated() {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn\n\t}\n\tm.next.ServeHTTP(w, r)\n}", "func (h *Handler) serveAuthenticateDBUser(w http.ResponseWriter, r *http.Request) {}", "func (c *Client) auth(req *fasthttp.Request) error {\n\tif c.key == \"\" || len(c.secret) == 0 {\n\t\treturn errors.New(\"API key and secret not configured\")\n\t}\n\n\tvar payload bytes.Buffer\n\n\tts := strconv.FormatInt(unixTime(), 10)\n\n\tpayload.WriteString(ts)\n\tpayload.Write(req.Header.Method())\n\tpayload.Write(req.URI().RequestURI())\n\tif req.Body() != nil {\n\t\tpayload.Write(req.Body())\n\t}\n\n\thash := hmac.New(sha256.New, c.secret)\n\thash.Write(payload.Bytes())\n\n\treq.Header.Set(HeaderKey, c.key)\n\treq.Header.Set(HeaderSign, hex.EncodeToString(hash.Sum(nil)))\n\treq.Header.Set(HeaderTS, ts)\n\tif c.subaccount != \"\" {\n\t\treq.Header.Set(HeaderSubaccount, c.subaccount)\n\t}\n\treturn nil\n}", "func beginAuth(w http.ResponseWriter, r *http.Request) {\n\tgothic.BeginAuthHandler(w, r)\n}", "func Authenticate(h route.Handle) route.Handle {\n\treturn func(r *http.Request) (route.HandleObject, error) {\n\t\tv1reponse := new(route.V1)\n\n\t\tlog.Debugf(\"Authenticating %s\", r.URL.String())\n\t\tuser := User{}\n\t\tcookie, err := r.Cookie(\"_SID_TXNAPP_\")\n\t\tif err != nil {\n\t\t\treturn v1reponse, err\n\t\t}\n\t\terr = apicalls.Auth.Authenticate(cookie, &user)\n\t\tif err != nil {\n\t\t\treturn v1reponse, err\n\t\t}\n\n\t\tif user.UserID == 0 {\n\t\t\treturn v1reponse, errors.New(\"User is not authenticated\", http.StatusForbidden)\n\t\t}\n\t\tctx := context.WithValue(r.Context(), \"user\", user)\n\t\tr = r.WithContext(ctx)\n\t\treturn h(r)\n\t}\n}", "func CreateAuthenticatedHTTPTarget(target http.RoundTripper, config Config) (http.RoundTripper, error) {\n\n\tkeycontent, err := ioutil.ReadFile(config.KeyFile)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to load private key from %s : %s\", config.KeyFile, err.Error())\n\t}\n\n\tkey, err := parsePrivateKey(keycontent)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to parse private key from %s : %s\", config.KeyFile, err.Error())\n\t}\n\n\tsignerFn := func(chain train.Chain) (resp *http.Response, err error) {\n\t\treq := chain.Request()\n\t\tvar buf *bytes.Buffer\n\n\t\tif req.Body != nil {\n\t\t\tbuf = new(bytes.Buffer)\n\t\t\tbuf.ReadFrom(req.Body)\n\t\t\treq.Body = nopCloser{buf}\n\t\t}\n\t\tif req.Header.Get(\"date\") == \"\" {\n\t\t\treq.Header.Set(\"date\", time.Now().UTC().Format(http.TimeFormat))\n\t\t}\n\t\tif req.Header.Get(\"content-type\") == \"\" {\n\t\t\treq.Header.Set(\"content-type\", \"application/json\")\n\t\t}\n\t\tif req.Header.Get(\"accept\") == \"\" {\n\t\t\treq.Header.Set(\"accept\", \"application/json\")\n\t\t}\n\n\t\tif req.Header.Get(\"host\") == \"\" {\n\t\t\treq.Header.Set(\"host\", req.URL.Host)\n\t\t}\n\t\tvar signheaders []string\n\t\tif (req.Method == \"PUT\" || req.Method == \"POST\") && buf != nil {\n\t\t\tsignheaders = []string{\"(request-target)\", \"host\", \"date\", \"content-length\", \"content-type\", \"x-content-sha256\"}\n\t\t\tif req.Header.Get(\"content-length\") == \"\" {\n\t\t\t\treq.Header.Set(\"content-length\", fmt.Sprintf(\"%d\", buf.Len()))\n\t\t\t}\n\t\t\thasher := sha256.New()\n\t\t\thasher.Write(buf.Bytes())\n\n\t\t\treq.Header.Set(\"x-content-sha256\", base64.StdEncoding.EncodeToString(hasher.Sum(nil)))\n\t\t} else {\n\t\t\tsignheaders = []string{\"date\", \"host\", \"(request-target)\"}\n\t\t}\n\n\t\tvar signbuffer bytes.Buffer\n\t\tfor idx, header := range signheaders {\n\t\t\tsignbuffer.WriteString(header)\n\t\t\tsignbuffer.WriteString(\": \")\n\t\t\tif header == \"(request-target)\" {\n\n\t\t\t\tsignbuffer.WriteString(strings.ToLower(req.Method))\n\t\t\t\tsignbuffer.WriteString(\" \")\n\t\t\t\tsignbuffer.WriteString(req.URL.RequestURI())\n\t\t\t} else {\n\t\t\t\tsignbuffer.WriteString(req.Header.Get(header))\n\t\t\t}\n\t\t\tif idx < len(signheaders)-1 {\n\t\t\t\tsignbuffer.WriteString(\"\\n\")\n\t\t\t}\n\t\t}\n\n\t\th := sha256.New()\n\t\th.Write(signbuffer.Bytes())\n\t\tdigest := h.Sum(nil)\n\t\tsignature, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, digest)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treq.Header.Add(\"Authorization\",\n\t\t\tfmt.Sprintf(\"Signature headers=\\\"%s\\\",keyId=\\\"%s/%s/%s\\\",algorithm=\\\"rsa-sha256\\\",signature=\\\"%s\\\",version=\\\"1\\\"\",\n\t\t\t\tstrings.Join(signheaders, \" \"), config.Tenant, config.User, config.Fingerprint, base64.StdEncoding.EncodeToString(signature)))\n\n\t\tdreq, _ := httputil.DumpRequestOut(req, true)\n\t\tlogrus.Debug(string(dreq))\n\t\tthersp, err := chain.Proceed(req)\n\t\tif err != nil {\n\t\t\tlogrus.Debug(err)\n\t\t\treturn nil, err\n\t\t}\n\t\tdresp, _ := httputil.DumpResponse(thersp, true)\n\t\tlogrus.Debug(string(dresp))\n\t\treturn thersp, err\n\t}\n\treturn train.TransportWith(target, train.InterceptorFunc(signerFn)), nil\n}", "func (l *AuthMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\ttoken := r.Header.Get(\"X-Cloudsyncer-Authtoken\")\n\tusername := r.Header.Get(\"X-Cloudsyncer-Username\")\n\n\tif username == \"\" {\n\t\thandleErr(w, 403, nil, \"username not provided or empty\")\n\t\treturn\n\t}\n\n\tif len(username) > 255 {\n\t\thandleErr(w, 403, nil, \"username too long\")\n\t\treturn\n\t}\n\n\tif token == \"\" {\n\t\thandleErr(w, 403, nil, \"Token not provided or empty\")\n\t\treturn\n\t}\n\n\tif len(token) > 255 {\n\t\thandleErr(w, 413, nil, \"Token too long\")\n\t\treturn\n\t}\n\tuser := db.GetUser(username)\n\tif user == nil {\n\t\thandleErr(w, 403, nil, \"Invalid credentials\")\n\t\treturn\n\t}\n\tif session := db.GetSession(user, token); session == nil {\n\t\thandleErr(w, 403, nil, \"Invalid credentials\")\n\t\treturn\n\t} else {\n\t\tcontext.Set(r, \"session\", session)\n\t\tcontext.Set(r, \"user\", user)\n\t}\n\tnext(w, r)\n\n}", "func (c *Coinbene) SendHTTPRequest(ep exchange.URL, path string, f request.EndpointLimit, result interface{}) error {\n\tendpoint, err := c.API.Endpoints.GetURL(ep)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar resp json.RawMessage\n\terrCap := struct {\n\t\tCode int `json:\"code\"`\n\t\tMessage string `json:\"message\"`\n\t}{}\n\n\tif err := c.SendPayload(context.Background(), &request.Item{\n\t\tMethod: http.MethodGet,\n\t\tPath: endpoint + path,\n\t\tResult: &resp,\n\t\tVerbose: c.Verbose,\n\t\tHTTPDebugging: c.HTTPDebugging,\n\t\tHTTPRecording: c.HTTPRecording,\n\t\tEndpoint: f,\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tif err := json.Unmarshal(resp, &errCap); err == nil {\n\t\tif errCap.Code != 200 && errCap.Message != \"\" {\n\t\t\treturn errors.New(errCap.Message)\n\t\t}\n\t}\n\treturn json.Unmarshal(resp, result)\n}", "func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\n\t// Extract auth code\n\tauthReq := h.client.NewAuthorizeRequest(osincli.CODE)\n\tauthData, err := authReq.HandleRequest(req)\n\tif err != nil {\n\t\tglog.V(4).Infof(\"Error handling request: %v\", err)\n\t\th.handleError(err, w, req)\n\t\treturn\n\t}\n\n\tglog.V(4).Infof(\"Got auth data\")\n\n\t// Validate state before making any server-to-server calls\n\tok, err := h.state.Check(authData.State, req)\n\tif err != nil {\n\t\tglog.V(4).Infof(\"Error verifying state: %v\", err)\n\t\th.handleError(err, w, req)\n\t\treturn\n\t}\n\tif !ok {\n\t\tglog.V(4).Infof(\"State is invalid\")\n\t\terr := errors.New(\"State is invalid\")\n\t\th.handleError(err, w, req)\n\t\treturn\n\t}\n\n\t// Exchange code for a token\n\taccessReq := h.client.NewAccessRequest(osincli.AUTHORIZATION_CODE, authData)\n\taccessData, err := accessReq.GetToken()\n\tif err != nil {\n\t\tglog.V(4).Infof(\"Error getting access token: %v\", err)\n\t\th.handleError(err, w, req)\n\t\treturn\n\t}\n\n\tglog.V(5).Infof(\"Got access data\")\n\n\tidentity, ok, err := h.provider.GetUserIdentity(accessData)\n\tif err != nil {\n\t\tglog.V(4).Infof(\"Error getting userIdentityInfo info: %v\", err)\n\t\th.handleError(err, w, req)\n\t\treturn\n\t}\n\tif !ok {\n\t\tglog.V(4).Infof(\"Could not get userIdentityInfo info from access token\")\n\t\terr := errors.New(\"Could not get userIdentityInfo info from access token\")\n\t\th.handleError(err, w, req)\n\t\treturn\n\t}\n\n\tuser, err := h.mapper.UserFor(identity)\n\tif err != nil {\n\t\tglog.V(4).Infof(\"Error creating or updating mapping for: %#v due to %v\", identity, err)\n\t\th.handleError(err, w, req)\n\t\treturn\n\t}\n\tglog.V(4).Infof(\"Got userIdentityMapping: %#v\", user)\n\n\t_, err = h.success.AuthenticationSucceeded(user, authData.State, w, req)\n\tif err != nil {\n\t\tglog.V(4).Infof(\"Error calling success handler: %v\", err)\n\t\th.handleError(err, w, req)\n\t\treturn\n\t}\n}", "func sendRequest(req *http.Request, credentials Credentials) ([]byte, error) {\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Connection\", \"close\")\n\treq.Header.Add(\"Authorization\", credentials.AuthToken)\n\n\tresp, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\treturn data, err\n}", "func (h *Input) ServeHTTP(res http.ResponseWriter, req *http.Request) {\n\thandler := h.serveWrite\n\n\th.authenticateIfSet(handler, res, req)\n}", "func (a *Authenticator) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t// Check the username and password against the directory.\n\tuid, pwd := r.FormValue(\"uid\"), r.FormValue(\"pwd\")\n\tp, err := a.dir.CheckCredentials(uid, pwd)\n\tif err != nil {\n\t\thttp.Error(w, \"Incorrect credentials.\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\tts, err := a.generateJwt(p)\n\tif err != nil {\n\t\thttp.Error(w, \"Incorrect credentials.\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\tw.Header().Add(\"Content-Type\", \"text/plain\")\n\tw.Write(ts)\n}", "func (a *Auth) HandleUserAccessTokenHTTPRequest(w http.ResponseWriter, twRequest *http.Request, logger logger.Logger) error {\n\tauthorizationCode := twRequest.URL.Query().Get(\"code\")\n\tif len(authorizationCode) == 0 {\n\t\terr := errors.New(\"The query 'code' parameter is missing, please try again or contact us at [email protected]\")\n\t\tif twError := twRequest.URL.Query().Get(\"error\"); len(twError) > 0 {\n\t\t\terr = errors.New(twRequest.URL.Query().Get(\"error_description\"))\n\t\t}\n\t\treturn err\n\t}\n\ttoken, err := a.oauth2.RequestUserAcccessToken(authorizationCode)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(token.AccessToken) <= 0 {\n\t\treturn errors.New(\"Token empty, please ask for a new authorization code\")\n\t}\n\n\ttwitchRequest := core.NewRequest(a.oauth2, &token)\n\ttwitchRequest.Logger = logger.Share()\n\ttwitchRequest.Logger.SetPrefix(\"LIBRARY\")\n\n\tchannelService := service.ChannelService{}\n\tuserService := service.UserService{}\n\tcredentialRepository := repository.NewCredentialRepository(a.db, logger)\n\tchannelRepository := repository.NewChannelRepository(a.db, logger)\n\tuserRepository := repository.NewUserRepository(a.db, logger)\n\n\tchannel, err := channelService.GetInfo(twitchRequest)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tuser, err := userService.GetByName(channel.Name, twitchRequest)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsChannel := transformer.TransformCoreChannelToStorageChannel(channel)\n\tcredential := transformer.TransformCoreTokenResponseToStorageCredential(token)\n\tsUser := transformer.TransformCoreUserToStorageUser(user)\n\n\t// TODO: Review channel ID Auth\n\tcredential.ChannelID = sChannel.ID\n\tcredential.ChannelName = sChannel.Name\n\tcredential.Email = sChannel.Email\n\tif false == credentialRepository.StoreCredential(credential) {\n\t\treturn errors.New(\"Error getting credential\")\n\t}\n\tif false == channelRepository.StoreChannel(sChannel) {\n\t\treturn errors.New(\"Error storing channel\")\n\t}\n\tif false == userRepository.StoreUser(sUser) {\n\t\treturn errors.New(\"Error storing user\")\n\t}\n\n\treturn nil\n}", "func (s *Standard) sendRequest(context service.Context, requestMethod string, requestURL string, responseObject interface{}) error {\n\n\tserverToken := s.serverToken()\n\tif serverToken == \"\" {\n\t\treturn errors.Newf(\"client\", \"unable to obtain server token for %s %s\", requestMethod, requestURL)\n\t}\n\n\trequest, err := http.NewRequest(requestMethod, requestURL, nil)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"client\", \"unable to create new request for %s %s\", requestMethod, requestURL)\n\t}\n\n\tif err = service.CopyRequestTrace(context.Request(), request); err != nil {\n\t\treturn errors.Wrapf(err, \"client\", \"unable to copy request trace\")\n\t}\n\n\trequest.Header.Add(TidepoolAuthenticationTokenHeaderName, serverToken)\n\n\tresponse, err := s.httpClient.Do(request)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"client\", \"unable to perform request %s %s\", requestMethod, requestURL)\n\t}\n\tdefer response.Body.Close()\n\n\tswitch response.StatusCode {\n\tcase http.StatusOK:\n\t\tif responseObject != nil {\n\t\t\tif err = json.NewDecoder(response.Body).Decode(responseObject); err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"client\", \"error decoding JSON response from %s %s\", request.Method, request.URL.String())\n\t\t\t}\n\t\t}\n\t\treturn nil\n\tcase http.StatusUnauthorized:\n\t\treturn NewUnauthorizedError()\n\tdefault:\n\t\treturn NewUnexpectedResponseError(response, request)\n\t}\n}", "func newAuthenticatedRequest(t testing.TB, method, path string, body io.Reader, user, pass string) *http.Request {\n\treq := newRequest(t, method, path, body)\n\treq.SetBasicAuth(user, pass)\n\treq.Header.Add(\"Accept\", resticAPIV2)\n\treturn req\n}", "func (c *Client) SendRequest(req *Request, option *SignOption) (bceResponse *Response, err error) {\n\tif option == nil {\n\t\toption = &SignOption{}\n\t}\n\n\toption.AddHeader(\"User-Agent\", c.GetUserAgent())\n\toption.AddHeader(\"Content-Type\", \"application/json\")\n\tif c.RetryPolicy == nil {\n\t\tc.RetryPolicy = NewDefaultRetryPolicy(3, 20*time.Second)\n\t}\n\tvar buf []byte\n\tif req.Body != nil {\n\t\tbuf, _ = ioutil.ReadAll(req.Body)\n\t}\n\n\tfor i := 0; ; i++ {\n\t\tbceResponse, err = nil, nil\n\t\tif option.Credentials != nil {\n\t\t\tGenerateAuthorization(*option.Credentials, *req, option)\n\t\t} else {\n\t\t\tGenerateAuthorization(*c.Credentials, *req, option)\n\t\t}\n\t\tif c.debug {\n\t\t\tutil.Debug(\"\", fmt.Sprintf(\"Request: httpMethod = %s, requestUrl = %s, requestHeader = %v\",\n\t\t\t\treq.Method, req.URL.String(), req.Header))\n\t\t}\n\t\tt0 := time.Now()\n\t\treq.Body = ioutil.NopCloser(bytes.NewBuffer(buf))\n\t\tresp, httpError := c.httpClient.Do(req.raw())\n\t\tt1 := time.Now()\n\t\tbceResponse = NewResponse(resp)\n\t\tif c.debug {\n\t\t\tutil.Debug(\"\", fmt.Sprintf(\"http request: %s do use time: %v\", req.URL.String(), t1.Sub(t0)))\n\t\t\tstatusCode := -1\n\t\t\tresString := \"\"\n\t\t\tvar resHead http.Header\n\t\t\tif resp != nil {\n\t\t\t\tstatusCode = resp.StatusCode\n\t\t\t\tre, err := bceResponse.GetBodyContent()\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.Debug(\"\", fmt.Sprintf(\"getbodycontent error: %v\", err))\n\t\t\t\t}\n\t\t\t\tresString = string(re)\n\t\t\t\tresHead = resp.Header\n\t\t\t}\n\t\t\tutil.Debug(\"\", fmt.Sprintf(\"Response: status code = %d, httpMethod = %s, requestUrl = %s\",\n\t\t\t\tstatusCode, req.Method, req.URL.String()))\n\t\t\tutil.Debug(\"\", fmt.Sprintf(\"Response Header: = %v\", resHead))\n\t\t\tutil.Debug(\"\", fmt.Sprintf(\"Response body: = %s\", resString))\n\t\t}\n\n\t\tif httpError != nil {\n\t\t\tduration := c.RetryPolicy.GetDelayBeforeNextRetry(httpError, i+1)\n\t\t\tif duration <= 0 {\n\t\t\t\terr = httpError\n\t\t\t\treturn bceResponse, err\n\t\t\t}\n\t\t\ttime.Sleep(duration)\n\t\t\tcontinue\n\t\t}\n\t\tif resp.StatusCode >= http.StatusBadRequest {\n\t\t\terr = buildError(bceResponse)\n\t\t}\n\t\tif err == nil {\n\t\t\treturn bceResponse, err\n\t\t}\n\n\t\tduration := c.RetryPolicy.GetDelayBeforeNextRetry(err, i+1)\n\t\tif duration <= 0 {\n\t\t\treturn bceResponse, err\n\t\t}\n\n\t\ttime.Sleep(duration)\n\t}\n}", "func (r RespondToAuthChallengeRequest) Send(ctx context.Context) (*RespondToAuthChallengeResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &RespondToAuthChallengeResponse{\n\t\tRespondToAuthChallengeOutput: r.Request.Data.(*RespondToAuthChallengeOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (j *Jusibe) doHTTPRequest(req *http.Request, body interface{}) (res *http.Response, err error) {\n\treq.URL.RawQuery = req.URL.Query().Encode()\n\tres, err = j.httpClient.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tcloseErr := res.Body.Close()\n\t\tif closeErr != nil {\n\t\t\terr = fmt.Errorf(\"%s, %s\", err, closeErr)\n\t\t}\n\t}()\n\n\tif res.StatusCode > 299 || res.StatusCode < 200 {\n\t\terr = fmt.Errorf(\"unexpected %d http response code\", res.StatusCode)\n\t\treturn\n\t}\n\n\terr = json.NewDecoder(res.Body).Decode(body)\n\n\treturn\n}", "func addAuthentication(req *http.Request) {\n\tif *ghAuthKey != \"\" {\n\t\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"token %s\", *ghAuthKey))\n\t} else {\n\t\treq.SetBasicAuth(*ghUserFl, *ghPassFl)\n\t}\n}", "func AuthenticateRequest(req *http.Request, rw http.ResponseWriter) (acct *Account, err error) {\n\tif mockAccount != nil {\n\t\treturn mockAccount, nil\n\t}\n\tctx := appengine.NewContext(req)\n\n\tif slug := req.Header.Get(Headers[\"account\"]); slug != \"\" {\n\t\tapiKey := req.Header.Get(Headers[\"key\"])\n\t\tacct, err = authenticateAccount(ctx, slug, apiKey)\n\t\tif err == nil {\n\t\t\tsession, _ := GetSession(ctx)\n\t\t\tsendSession(req, rw, session)\n\t\t}\n\t\treturn\n\t} else if username := req.Header.Get(Headers[\"slug\"]); username != \"\" {\n\t\tpassword := req.Header.Get(Headers[\"password\"])\n\t\tacct, err = authenticateAccountByUser(ctx, username, password)\n\t\tif err == nil {\n\t\t\tsession, _ := GetSession(ctx)\n\t\t\tsendSession(req, rw, session)\n\t\t}\n\t\treturn\n\t} else {\n\t\tsessionKey := sessionKeyFromRequest(req)\n\t\tif sessionKey == \"\" {\n\t\t\treturn nil, Unauthenticated\n\t\t}\n\t\tacct, _, err := authenticateSession(ctx, sessionKey)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn acct, nil\n\t}\n}", "func (service BasicAuthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif !service.Env.True(api.BasicEnabled) {\n\t\tservice.Log.Warn(api.BasicAuthNotImplemented, api.LogFields{})\n\t\tRespondWithStructuredError(w, api.BasicAuthNotImplemented, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tvar respBody struct {\n\t\tUsername string\n\t\tPassword string\n\t}\n\n\tif err := DecodeJSON(r.Body, &respBody); err != nil {\n\t\tservice.Log.WarnError(api.BasicAuthError, err, api.LogFields{})\n\t\tRespondWithStructuredError(w, api.BasicAuthError, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\terrors := structuredErrors{}\n\n\tif respBody.Username == \"\" {\n\t\tservice.Log.Warn(api.BasicAuthMissingUsername, api.LogFields{})\n\t\terrors.addError(\"USERNAME_MISSING\", api.BasicAuthMissingUsername)\n\t}\n\n\tif respBody.Password == \"\" {\n\t\tservice.Log.Warn(api.BasicAuthMissingPassword, api.LogFields{})\n\t\terrors.addError(\"PASSWORD_MISSING\", api.BasicAuthMissingPassword)\n\t}\n\n\tif errors.hasErrors() {\n\t\tRespondWithErrors(w, errors, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\taccount := &api.Account{\n\t\tUsername: respBody.Username,\n\t}\n\n\t// Associate with a database context.\n\tif _, err := account.Get(service.Database, 0); err != nil {\n\t\tservice.Log.WarnError(api.AccountUpdateError, err, api.LogFields{\"username\": account.Username})\n\t\tRespondWithStructuredError(w, api.AccountUpdateError, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Validate the user name and password combination\n\tif err := account.BasicAuthentication(service.Database, respBody.Password); err != nil {\n\t\tservice.Log.WarnError(api.BasicAuthInvalid, err, api.LogFields{\"account\": account.ID})\n\t\tRespondWithStructuredError(w, api.BasicAuthInvalid, http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tsessionKey, authErr := service.Session.UserDidAuthenticate(account.ID, simplestore.NullString())\n\tif authErr != nil {\n\t\tservice.Log.WarnError(\"bad session get\", authErr, api.LogFields{\"account\": account.ID})\n\t\tRespondWithStructuredError(w, \"bad session get\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tservice.Cookie.AddSessionKeyToResponse(w, sessionKey)\n\n\t// If we need to flush the storage first then do so now.\n\tif service.Env.True(api.FlushStorage) {\n\t\tservice.Log.Info(api.PurgeAccountData, api.LogFields{\"account\": account.ID})\n\t\tdelErr := service.Store.DeleteApplication(account.ID)\n\t\tif delErr != nil {\n\t\t\tservice.Log.Warn(\"Unable to purge the application data on login\", api.LogFields{\"account\": account.ID})\n\t\t}\n\t}\n\n\tservice.Log.Info(api.BasicAuthValid, api.LogFields{\"account\": account.ID})\n}", "func sendReq(req *http.Request, result interface{}) (resp *http.Response, err error) {\n\n\tswitch {\n\t// TODO: This wil dump the authorization token. Which it probably shouldn't.\n\tcase debug():\n\t\treqDump, dumpErr := httputil.DumpRequestOut(req, true)\n\t\treqStr := string(reqDump)\n\t\tif dumpErr != nil {\n\t\t\tfmt.Printf(\"Error dumping request (display as generic object): %v\\n\", dumpErr)\n\t\t\treqStr = fmt.Sprintf(\"%v\", req)\n\t\t}\n\t\tfmt.Printf(\"%s %s\\n\", t.Title(\"Request\"), t.Text(reqStr))\n\t\tfmt.Println()\n\tcase verbose():\n\t\tfmt.Printf(\"%s %s\\n\", t.Title(\"Request:\"), t.Text(\"%s %s\", req.Method, req.URL))\n\t\tfmt.Println()\n\t}\n\n\tresp, err = hubClient.Do(req)\n\tif err == nil {\n\n\t\tif debug() {\n\t\t\trespDump, dumpErr := httputil.DumpResponse(resp, true)\n\t\t\trespStr := string(respDump)\n\t\t\tif dumpErr != nil {\n\t\t\t\tfmt.Printf(\"Error dumping response (display as generic object): %v\\n\", dumpErr)\n\t\t\t\trespStr = fmt.Sprintf(\"%v\", resp)\n\t\t\t}\n\t\t\tfmt.Printf(\"%s\\n%s\\n\", t.Title(\"Respose:\"), t.Text(respStr))\n\t\t\tfmt.Println()\n\t\t}\n\n\t\t// Do this after the Dump, the dump reads out the response for reprting and\n\t\t// replaces the reader with anotherone that has the data.\n\t\terr = checkReturnCode(*resp)\n\t\tif result != nil {\n\t\t\tif err == nil {\n\t\t\t\terr = unmarshal(resp, result)\n\t\t\t}\n\t\t}\n\n\t}\n\treturn resp, err\n}", "func (req *Request) GenerateAuth(creds *Credentials) error {\r\n\thawkcreds := &hawk.Credentials{\r\n\t\tID: creds.UserID,\r\n\t\tKey: creds.APIKey,\r\n\t\tHash: sha256.New,\r\n\t}\r\n\r\n\tclientAuth := hawk.NewRequestAuth(req.request, hawkcreds, 0)\r\n\tclientAuth.Ext = creds.OrganizationID\r\n\r\n\tif req.request.Body != nil {\r\n\t\treqBody, err := ioutil.ReadAll(req.request.Body)\r\n\t\tif err != nil {\r\n\t\t\treturn err\r\n\t\t}\r\n\t\treq.request.Body = ioutil.NopCloser(bytes.NewBuffer(reqBody))\r\n\t\tif len(reqBody) > 0 {\r\n\t\t\tlog.Debugf(\"[DEBUG] Payload: %s\", string(reqBody))\r\n\t\t\tpayloadHash := clientAuth.PayloadHash(\"application/json\")\r\n\t\t\tpayloadHash.Write(reqBody)\r\n\t\t\tclientAuth.SetHash(payloadHash)\r\n\t\t\treq.request.Header.Set(\"Content-Type\", \"application/json\")\r\n\t\t}\r\n\t}\r\n\r\n\treq.request.Header.Set(\"Authorization\", clientAuth.RequestHeader())\r\n\treq.request.Header.Set(\"Accept\", \"application/json\")\r\n\r\n\treturn nil\r\n}", "func (ai *JWTAuthInteractor) HandleJWTAuth(w http.ResponseWriter, r *http.Request) {\n\tai.Logger.LogAccess(r)\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tai.Logger.LogError(err)\n\t\tai.JSONResponse.Error500(w)\n\t\treturn\n\t}\n\n\tvar req RequestJWTAuthHandleJWTAuth\n\terr = json.Unmarshal(body, &req)\n\tif err != nil {\n\t\tai.Logger.LogError(err)\n\t\tai.JSONResponse.Error500(w)\n\t\treturn\n\t}\n\n\tvar admin domain.Admin\n\tadmin, err = ai.AdminRepository.FindByJWTAuth(req)\n\tif err != nil {\n\t\tai.Logger.LogError(err)\n\t\tai.JSONResponse.Error500(w)\n\t\treturn\n\t}\n\n\tvar jwtAuth domain.JWTAuth\n\tif err = jwtAuth.SignIn(admin.Password, req.Password); err != nil {\n\t\tai.Logger.LogError(err)\n\t\tai.JSONResponse.Error403(w)\n\t\treturn\n\t}\n\n\tvar token string\n\ttoken, err = jwtAuth.NewJWT(admin)\n\tif err != nil {\n\t\tai.Logger.LogError(err)\n\t\tai.JSONResponse.Error500(w)\n\t\treturn\n\t}\n\n\trhja := &ResponseHandleJWTAuth{\n\t\tToken: token,\n\t}\n\tvar res []byte\n\tres, err = json.Marshal(rhja)\n\tif err != nil {\n\t\tai.Logger.LogError(err)\n\t\tai.JSONResponse.Error500(w)\n\t\treturn\n\t}\n\n\tai.JSONResponse.Success200(w, res)\n\treturn\n}", "func (emailHunter *EmailHunter) sendRequest(formValues url.Values, emailHunterURL string) (*http.Response, error) {\n\tformValues.Set(\"api_key\", emailHunter.APIKey)\n\treq, err := http.NewRequest(\"GET\", emailHunterURL, strings.NewReader(formValues.Encode()))\n\treq.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\tresp, err := emailHunter.HTTPClient.Do(req)\n\n\treturn resp, err\n}", "func (m *AuthMiddleware) Perform(w http.ResponseWriter, r *http.Request) error {\n\tstringIP, _, _ := net.SplitHostPort(r.RemoteAddr)\n\tip := net.ParseIP(stringIP)\n\tif !m.Context.Tournament.Public && !ip.IsLoopback() {\n\t\tw.Header().Set(\"WWW-Authenticate\", `Basic realm=\"Restricted\"`)\n\n\t\ts := strings.SplitN(r.Header.Get(\"Authorization\"), \" \", 2)\n\t\tif len(s) != 2 {\n\t\t\thttp.Error(w, \"Not authorized\", 401)\n\t\t\treturn errors.New(\"Not Authorized\")\n\t\t}\n\n\t\tb, err := base64.StdEncoding.DecodeString(s[1])\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), 401)\n\t\t\treturn errors.New(\"Not Authorized\")\n\t\t}\n\n\t\tpair := strings.SplitN(string(b), \":\", 2)\n\t\tif len(pair) != 2 {\n\t\t\thttp.Error(w, \"Not authorized\", 401)\n\t\t\treturn errors.New(\"Not Authorized\")\n\t\t}\n\n\t\tfor username, password := range m.Context.Tournament.Authorization {\n\t\t\tif pair[0] == username && pair[1] == password {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\thttp.Error(w, \"Not authorized\", 401)\n\t\treturn errors.New(\"Not Authorized\")\n\t}\n\treturn nil\n}", "func isAuthenticated(w http.ResponseWriter, r *http.Request) {\n\tisLoggedIn := isLoggedIn(r)\n\n\tresp := map[string]interface{}{\n\t\t\"success\": isLoggedIn,\n\t}\n\tapiResponse(resp, w)\n}", "func (r *ProtocolIncus) RequireAuthenticated(authenticated bool) {\n\tr.requireAuthenticated = authenticated\n}", "func (h jwtAuthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t// Let the top level caller handle if the requests should be\n\t// allowed, if there are no Authorization headers.\n\tif r.Header.Get(\"Authorization\") == \"\" {\n\t\th.handler.ServeHTTP(w, r)\n\t\treturn\n\t}\n\t// Validate Authorization header to be valid.\n\tjwt := InitJWT()\n\ttoken, e := jwtgo.ParseFromRequest(r, func(token *jwtgo.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwtgo.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\t\treturn jwt.secretAccessKey, nil\n\t})\n\tif e != nil || !token.Valid {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn\n\t}\n\th.handler.ServeHTTP(w, r)\n}", "func (a anchoreClient) authenticatedResty() *resty.Request {\n\treturn resty.R().SetBasicAuth(a.userName, a.password).SetHeader(\"User-Agent\", \"Pipeline/go\")\n}", "func (h *AuthenticationHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif handler, _, _ := h.noAuthRouter.Lookup(r.Method, r.URL.Path); handler != nil {\n\t\th.Handler.ServeHTTP(w, r)\n\t\treturn\n\t}\n\n\tctx := r.Context()\n\tscheme, err := ProbeAuthScheme(r)\n\tif err != nil {\n\t\tUnauthorizedError(ctx, h, w)\n\t\treturn\n\t}\n\n\tswitch scheme {\n\tcase tokenAuthScheme:\n\t\tctx, err = h.extractAuthorization(ctx, r)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tr = r.WithContext(ctx)\n\t\th.Handler.ServeHTTP(w, r)\n\t\treturn\n\tcase sessionAuthScheme:\n\t\tctx, err = h.extractSession(ctx, r)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tr = r.WithContext(ctx)\n\t\th.Handler.ServeHTTP(w, r)\n\t\treturn\n\t}\n\n\tUnauthorizedError(ctx, h, w)\n}", "func (c *Client) SendRequest(method string, rawURL string, data url.Values,\n\theaders map[string]interface{}) (*http.Response, error) {\n\tu, err := url.Parse(rawURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalueReader := &strings.Reader{}\n\tgoVersion := runtime.Version()\n\n\tif method == http.MethodGet {\n\t\tif data != nil {\n\t\t\tv, _ := form.EncodeToStringWith(data, delimiter, escapee, keepZeros)\n\t\t\tregex := regexp.MustCompile(`\\.\\d+`)\n\t\t\ts := regex.ReplaceAllString(v, \"\")\n\n\t\t\tu.RawQuery = s\n\t\t}\n\t}\n\n\tif method == http.MethodPost {\n\t\tvalueReader = strings.NewReader(data.Encode())\n\t}\n\n\treq, err := http.NewRequest(method, u.String(), valueReader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.SetBasicAuth(c.basicAuth())\n\n\t// E.g. \"User-Agent\": \"twilio-go/1.0.0 (darwin amd64) go/go1.17.8\"\n\tuserAgent := fmt.Sprintf(\"twilio-go/%s (%s %s) go/%s\", LibraryVersion, runtime.GOOS, runtime.GOARCH, goVersion)\n\n\tif len(c.UserAgentExtensions) > 0 {\n\t\tuserAgent += \" \" + strings.Join(c.UserAgentExtensions, \" \")\n\t}\n\n\treq.Header.Add(\"User-Agent\", userAgent)\n\n\tif method == http.MethodPost {\n\t\treq.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\t}\n\n\tfor k, v := range headers {\n\t\treq.Header.Add(k, fmt.Sprint(v))\n\t}\n\n\treturn c.doWithErr(req)\n}", "func (b *basicAuth) set(r *http.Request) { r.SetBasicAuth(b.username, b.password) }", "func AuthenticateHandler(w http.ResponseWriter, r *http.Request) {\n\tbody, err := ioutil.ReadAll(r.Body)\n\tdefer r.Body.Close()\n\tif err != nil {\n\t\tw.Write(toJSON(err))\n\t}\n\n\tvar user User\n\tjson.Unmarshal(body, &user)\n\n\t_, err = models.Authenticate(user.Username, user.Password)\n\tif err != nil {\n\t\tlog.Println(err)\n\t} else {\n\t\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\t\"username\": user.Username,\n\t\t\t\"exp\": time.Now().Add(time.Hour * 72).Unix(),\n\t\t})\n\t\ttokenString, err := token.SignedString(Secret)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\tlog.Println(tokenString)\n\t\tw.Write(toJSON(&Credentials{Username: user.Username, Token: tokenString}))\n\t}\n}", "func SendRequest(m, u string, h map[string]string) (*http.Response, []byte, error) {\n\treq, err := CreateRequest(m, u, h)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\thttpClient := &http.Client{Timeout: time.Second * 10}\n\n\tresp, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn resp, nil, err\n\t}\n\n\treturn resp, body, nil\n}", "func authEndpoint(rw http.ResponseWriter, req *http.Request) {\n\n\t// request has to be POST\n\tif req.Method != \"POST\" {\n\t\thttp.Error(rw, \"bad method, only post allowed\", http.StatusBadRequest)\n\t}\n\n\t// has to be authenticated, in a real we would use soemthing more\n\t// secure like certificates etc.\n\tuser, _, ok := req.BasicAuth()\n\n\tif !ok {\n\t\thttp.Error(rw, \"authentication required\", http.StatusForbidden)\n\t}\n\n\tlog.Println(\"basic authentication successful for \", user)\n\n\t// now we issue token and return it\n\n\t// This context will be passed to all methods.\n\tctx := req.Context()\n\n\t// Create an empty session object which will be passed to the request handlers\n\tmySessionData := newSession(\"\")\n\n\t// This will create an access request object and iterate through the registered TokenEndpointHandlers to validate the request.\n\taccessRequest, err := fositeInstance.NewAccessRequest(ctx, req, mySessionData)\n\n\t// Catch any errors, e.g.:\n\t// * unknown client\n\t// * invalid redirect\n\t// * ...\n\tif err != nil {\n\t\tlog.Printf(\"Error occurred in NewAccessRequest: %+v\", err)\n\t\tfositeInstance.WriteAccessError(rw, accessRequest, err)\n\t\treturn\n\t}\n\n\t// If this is a client_credentials grant, grant all requested scopes\n\t// NewAccessRequest validated that all requested scopes the client is allowed to perform\n\t// based on configured scope matching strategy.\n\tif accessRequest.GetGrantTypes().ExactOne(\"client_credentials\") {\n\t\tfor _, scope := range accessRequest.GetRequestedScopes() {\n\t\t\taccessRequest.GrantScope(scope)\n\t\t}\n\t}\n\n\t// Next we create a response for the access request. Again, we iterate through the TokenEndpointHandlers\n\t// and aggregate the result in response.\n\tresponse, err := fositeInstance.NewAccessResponse(ctx, accessRequest)\n\tif err != nil {\n\t\tlog.Printf(\"Error occurred in NewAccessResponse: %+v\", err)\n\t\tfositeInstance.WriteAccessError(rw, accessRequest, err)\n\t\treturn\n\t}\n\n\t// All done, send the response.\n\tfositeInstance.WriteAccessResponse(rw, accessRequest, response)\n\n}", "func (c *auth) request(ctx context.Context, method string, url string, data string, target interface{}) error {\n\tauthorization := buildBasicAuthorization(c.clientID, c.clientSecret)\n\n\treturn c.sC.Call(backendClientParams{\n\t\tctx: ctx,\n\t\tmethod: method,\n\t\turl: url,\n\t\tauthorization: authorization,\n\t\tbody: strings.NewReader(data),\n\t\ttarget: target,\n\t})\n}", "func (c *Component) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tlisteners := AuthListeners()\n\n\tif logout, err := url.Parse(com.GetString(\"logout_url\")); err == nil {\n\t\tif r.URL.Path == logout.Path {\n\t\t\tif returnTo := r.URL.Query().Get(\"return\"); returnTo != \"\" {\n\t\t\t\thttp.Redirect(w, r, returnTo, http.StatusFound)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, listener := range listeners {\n\t\t\t\tif err := listener.WebAuthLogout(w, r); err != nil {\n\t\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tq := url.Values{}\n\t\t\tq.Set(\"return\", r.Referer())\n\t\t\treturnURL := &url.URL{\n\t\t\t\tScheme: logout.Scheme,\n\t\t\t\tHost: logout.Host,\n\t\t\t\tPath: logout.Path,\n\t\t\t\tRawQuery: q.Encode(),\n\t\t\t}\n\t\t\thttp.Redirect(w, r, auth0.DefaultClient().LogoutURL(returnURL.String()), http.StatusFound)\n\t\t\treturn\n\t\t}\n\t}\n\n\ttoken, err := auth0.DefaultClient().NewToken(r.URL.Query().Get(\"code\"))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tfor _, listener := range listeners {\n\t\tif err := listener.WebAuthLogin(w, r, token); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\tstate := r.URL.Query().Get(\"state\")\n\tif state != \"\" {\n\t\thttp.Redirect(w, r, state, http.StatusFound)\n\t\treturn\n\t}\n\thttp.Redirect(w, r, r.Referer(), http.StatusFound)\n}", "func (c *CORS) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Access-Control-Allow-Methods\", \"GET, PUT, POST, PATCH, DELETE\")\n\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Content-Type, Authorization\")\n\tw.Header().Set(\"Access-Control-Expose-Headers\", \"Authorization\")\n\tw.Header().Set(\"Access-Control-Max-Age\", \"600\")\n\n\tif r.Method == \"OPTIONS\" {\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn\n\t}\n\n\t// Used to check if a sign in is being attempted, and logging the attempt in a table\n\t// Could be done with less code in a different function (such as SessionsHandler)\n\t// But it would cause tests to fail.\n\tif r.Method == \"POST\" && r.URL.Path == \"/v1/sessions\" {\n\t\tif r.Header.Get(\"Content-Type\") != \"application/json\" {\n\t\t\thttp.Error(w, \"Body must be in json\", http.StatusUnsupportedMediaType)\n\t\t\treturn\n\t\t}\n\t\tcred := &users.Credentials{}\n\t\tjsonResponseBody, _ := ioutil.ReadAll(r.Body)\n\t\tr.Body = ioutil.NopCloser(bytes.NewReader([]byte(jsonResponseBody)))\n\t\terr := json.Unmarshal(jsonResponseBody, cred)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Unable to unpack json into credentials\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tdsn := os.Getenv(\"DSN\")\n\t\tdb, err := sql.Open(\"mysql\", dsn)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Unable to open database\", http.StatusInternalServerError)\n\t\t}\n\t\tdefer db.Close()\n\t\trow, err := db.Query(\"SELECT id FROM user WHERE email=?\", cred.Email)\n\t\tif err == nil && row.Next() {\n\t\t\t// For updating the user stats table\n\t\t\tid := -1\n\t\t\terr = row.Scan(&id)\n\t\t\tif err == nil {\n\t\t\t\tinsert := \"INSERT INTO user_stats (id, date_time, client_ip) VALUES (?, ?, ?)\"\n\t\t\t\tip := r.Header.Get(\"X-Forwarded-For\")\n\t\t\t\tif len(ip) == 0 {\n\t\t\t\t\tip = r.RemoteAddr\n\t\t\t\t}\n\t\t\t\tt := time.Now()\n\t\t\t\tt.Format(time.RFC3339)\n\t\t\t\t_, err = db.Exec(insert, id, t, ip)\n\t\t\t}\n\t\t}\n\t}\n\n\tc.Handler.ServeHTTP(w, r)\n}", "func (contrl *MailController) SendAuthentication(c *gin.Context) (int, gin.H, error) {\n\tconst subject = \"請驗證您的信箱\"\n\tvar err error\n\tvar mailBody string\n\tvar out bytes.Buffer\n\tvar reqBody activationReqBody\n\n\tif failData, err := bindRequestJSONBody(c, &reqBody); err != nil {\n\t\treturn http.StatusBadRequest, gin.H{\"status\": \"fail\", \"data\": failData}, nil\n\t}\n\n\tif err = contrl.HTMLTemplate.ExecuteTemplate(&out, \"authenticate.tmpl\", struct {\n\t\tHref string\n\t}{\n\t\treqBody.ActivateLink,\n\t}); err != nil {\n\t\treturn http.StatusInternalServerError, gin.H{\"status\": \"error\", \"message\": \"can not create authenticate mail body\"}, errors.WithStack(err)\n\t}\n\n\tmailBody = out.String()\n\n\tif err = contrl.MailService.Send(reqBody.Email, subject, mailBody); err != nil {\n\t\treturn http.StatusInternalServerError, gin.H{\"status\": \"error\", \"message\": fmt.Sprintf(\"can not send authenticate mail to %s\", reqBody.Email)}, err\n\t}\n\n\treturn http.StatusNoContent, gin.H{}, nil\n}", "func Authentication(auths []Authenticator, opts ...Option) func(next http.Handler) http.Handler {\n\toptions := newOptions(opts...)\n\tconfigureSupportedChallenges(options)\n\ttracer := getTraceProvider(options).Tracer(\"proxy\")\n\n\tspanOpts := []trace.SpanStartOption{\n\t\ttrace.WithSpanKind(trace.SpanKindServer),\n\t}\n\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tctx, span := tracer.Start(r.Context(), fmt.Sprintf(\"%s %v\", r.Method, r.URL.Path), spanOpts...)\n\t\t\tdefer span.End()\n\t\t\tr = r.WithContext(ctx)\n\n\t\t\tri := router.ContextRoutingInfo(ctx)\n\t\t\tif isOIDCTokenAuth(r) || ri.IsRouteUnprotected() || r.Method == \"OPTIONS\" {\n\t\t\t\t// Either this is a request that does not need any authentication or\n\t\t\t\t// the authentication for this request is handled by the IdP.\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, a := range auths {\n\t\t\t\tif req, ok := a.Authenticate(r); ok {\n\t\t\t\t\tnext.ServeHTTP(w, req)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !isPublicPath(r.URL.Path) {\n\t\t\t\t// Failed basic authentication attempts receive the Www-Authenticate header in the response\n\t\t\t\tvar touch bool\n\t\t\t\tcaser := cases.Title(language.Und)\n\t\t\t\tfor k, v := range options.CredentialsByUserAgent {\n\t\t\t\t\tif strings.Contains(k, r.UserAgent()) {\n\t\t\t\t\t\tremoveSuperfluousAuthenticate(w)\n\t\t\t\t\t\tw.Header().Add(\"Www-Authenticate\", fmt.Sprintf(\"%v realm=\\\"%s\\\", charset=\\\"UTF-8\\\"\", caser.String(v), r.Host))\n\t\t\t\t\t\ttouch = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// if the request is not bound to any user agent, write all available challenges\n\t\t\t\tif !touch &&\n\t\t\t\t\t// This is a temporary hack... Before the authentication middleware rewrite all\n\t\t\t\t\t// unauthenticated requests were still handled. The reva http services then did add\n\t\t\t\t\t// the supported authentication headers to the response. Since we are not allowing the\n\t\t\t\t\t// requests to continue so far we have to do it here. But we shouldn't do it for the graph service.\n\t\t\t\t\t// That's the reason for this hard check here.\n\t\t\t\t\t!strings.HasPrefix(r.URL.Path, \"/graph\") {\n\t\t\t\t\twriteSupportedAuthenticateHeader(w, r)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, s := range SupportedAuthStrategies {\n\t\t\t\tuserAgentAuthenticateLockIn(w, r, options.CredentialsByUserAgent, s)\n\t\t\t}\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t// if the request is a PROPFIND return a WebDAV error code.\n\t\t\t// TODO: The proxy has to be smart enough to detect when a request is directed towards a webdav server\n\t\t\t// and react accordingly.\n\t\t\tif webdav.IsWebdavRequest(r) {\n\t\t\t\tb, err := webdav.Marshal(webdav.Exception{\n\t\t\t\t\tCode: webdav.SabredavPermissionDenied,\n\t\t\t\t\tMessage: \"Authentication error\",\n\t\t\t\t})\n\n\t\t\t\twebdav.HandleWebdavError(w, b, err)\n\t\t\t}\n\n\t\t\tif r.ProtoMajor == 1 {\n\t\t\t\t// https://github.com/owncloud/ocis/issues/5066\n\t\t\t\t// https://github.com/golang/go/blob/d5de62df152baf4de6e9fe81933319b86fd95ae4/src/net/http/server.go#L1357-L1417\n\t\t\t\t// https://github.com/golang/go/issues/15527\n\n\t\t\t\tdefer r.Body.Close()\n\t\t\t\t_, _ = io.Copy(io.Discard, r.Body)\n\t\t\t}\n\t\t})\n\t}\n}", "func setAuthorization(req *http.Request, apiKey string) {\n\treq.SetBasicAuth(apiKey, \"\")\n}", "func (a *API) Auth(req *http.Request) {\n\t//Supports unauthenticated access to confluence:\n\t//if username and token are not set, do not add authorization header\n\tif a.Username != \"\" && a.Token != \"\" {\n\t\treq.SetBasicAuth(a.Username, a.Token)\n\t}\n}", "func (a *Middleware) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.TLS == nil {\n\t\ttrace.WriteError(w, trace.AccessDenied(\"missing authentication\"))\n\t\treturn\n\t}\n\tuser, err := a.GetUser(*r.TLS)\n\tif err != nil {\n\t\ttrace.WriteError(w, err)\n\t\treturn\n\t}\n\n\tremoteAddr := r.RemoteAddr\n\t// If the request is coming from a trusted proxy and the proxy is sending a\n\t// TeleportImpersonateHeader, we will impersonate the user in the header\n\t// instead of the user in the TLS certificate.\n\t// This is used by the proxy to impersonate the end user when making requests\n\t// without re-signing the client certificate.\n\timpersonateUser := r.Header.Get(TeleportImpersonateUserHeader)\n\tif impersonateUser != \"\" {\n\t\tif !isProxyRole(user) {\n\t\t\ttrace.WriteError(w, trace.AccessDenied(\"Credentials forwarding is only permitted for Proxy\"))\n\t\t\treturn\n\t\t}\n\t\t// If the service is not configured to allow credentials forwarding, reject the request.\n\t\tif !a.EnableCredentialsForwarding {\n\t\t\ttrace.WriteError(w, trace.AccessDenied(\"Credentials forwarding is not permitted by this service\"))\n\t\t\treturn\n\t\t}\n\n\t\tif user, err = a.extractIdentityFromImpersonationHeader(impersonateUser); err != nil {\n\t\t\ttrace.WriteError(w, err)\n\t\t\treturn\n\t\t}\n\t\tremoteAddr = r.Header.Get(TeleportImpersonateIPHeader)\n\t}\n\n\t// If the request is coming from a trusted proxy, we already know the user\n\t// and we will impersonate him. At this point, we need to remove the\n\t// TeleportImpersonateHeader from the request, otherwise the proxy will\n\t// attempt sending the request to upstream servers with the impersonation\n\t// header from a fake user.\n\tr.Header.Del(TeleportImpersonateUserHeader)\n\tr.Header.Del(TeleportImpersonateIPHeader)\n\n\t// determine authenticated user based on the request parameters\n\tctx := r.Context()\n\tctx = authz.ContextWithUserCertificate(ctx, certFromConnState(r.TLS))\n\tclientSrcAddr, err := utils.ParseAddr(remoteAddr)\n\tif err == nil {\n\t\tctx = authz.ContextWithClientAddr(ctx, clientSrcAddr)\n\t}\n\tctx = authz.ContextWithUser(ctx, user)\n\ta.Handler.ServeHTTP(w, r.WithContext(ctx))\n}", "func HTTPRequest(user m.AuthUser, req *http.Request, verbose bool) ([]byte, string) {\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tError(err, \"There was a problem in making the request\")\n\n\tdefer resp.Body.Close()\n\trespBody, err := ioutil.ReadAll(resp.Body)\n\tError(err, \"There was a problem reading the response body\")\n\n\t//b.CheckAuth(user, resp.Status)\n\n\tif verbose {\n\t\tfmt.Println(\"Response Headers:\", resp.Header)\n\t\tfmt.Println(\"Response Status:\", resp.Status)\n\t\tfmt.Println(\"Response Body:\", string(respBody))\n\t}\n\treturn respBody, resp.Status\n}", "func (c *KeycloakClient) do(req *http.Request) (*http.Response, error) {\n\tlog.Println(req.Method + \" \" + req.URL.String())\n\treq.Header.Set(\"Authorization\", \"Bearer \"+c.token)\n\treturn http.DefaultClient.Do(req)\n}", "func (handler *UserHandler) AuthenticatedUser(w http.ResponseWriter, r *http.Request) {\n\n\ttokenString := r.Header.Get(\"Authorization\")\n\tif strings.Contains(tokenString, \"Bearer\") || strings.Contains(tokenString, \"bearer\") {\n\t\ttokenSplitted := strings.SplitAfter(tokenString, \" \")\n\t\tif len(tokenSplitted) < 1 {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\tjson.NewEncoder(w).Encode(entities.Error{\n\t\t\t\tError: \"invalid token\",\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t\ttokenString = tokenSplitted[1]\n\t}\n\n\tauthenticated, err := handler.usecases.AuthenticatedUser(tokenString)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tjson.NewEncoder(w).Encode(entities.Error{\n\t\t\tError: err.Error(),\n\t\t})\n\t\treturn\n\t}\n\n\tif authenticated.Authenticated {\n\t\tw.WriteHeader(http.StatusOK)\n\t\tjson.NewEncoder(w).Encode(&authenticated)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusUnauthorized)\n}", "func Handle(m modules.Context, rc *modules.RequestContext, r *radius.Request, _ modules.Middleware) (*modules.Response, error) {\n\tctx, ok := m.(moduleContext)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unable to obtain context\")\n\t}\n\n\t// Currently only handling authorization requests - we have roadmap tasks to support full v2 integration T64414814\n\t// When we'll have v2 support we can remove the hand crafted json packet\n\tjsonPacket := map[string]map[string]interface{}{\n\t\t\"Called-Station-Id\": {\"type\": \"string\", \"value\": []string{normalize(rfc2865.CalledStationID_GetString(r.Packet))}},\n\t\t\"Calling-Station-Id\": {\"type\": \"string\", \"value\": []string{normalize(rfc2865.CallingStationID_GetString(r.Packet))}},\n\t\t\"NAS-Identifier\": {\"type\": \"string\", \"value\": []string{rfc2865.NASIdentifier_GetString(r.Packet)}},\n\t}\n\t// If no nas ip address is specified then no field will be sent\n\tif rfc2865.NASIPAddress_Get(r.Packet) != nil {\n\t\tjsonPacket[\"NAS-IP-Address\"] =\n\t\t\tmap[string]interface{}{\"type\": \"string\", \"value\": []string{rfc2865.NASIPAddress_Get(r.Packet).String()}}\n\t}\n\tencodedMsg, err := json.Marshal(jsonPacket)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to marshal radius packet: %w\", err)\n\t}\n\treq, err := http.NewRequest(http.MethodPost, ctx.URI, bytes.NewReader(encodedMsg))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Authorization\", \"Bearer \"+ctx.AccessToken)\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tresp, err := ctx.HTTPClient.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error sending response to endpoint: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\trc.Logger.Debug(\"got response\", zap.String(\"status\", resp.Status),\n\t\tzap.String(\"url\", resp.Request.URL.String()),\n\t\tzap.Any(\"request\", r.Packet.Attributes),\n\t\tzap.String(\"Called-Station-Id\", rfc2865.CalledStationID_GetString(r.Packet)),\n\t\tzap.String(\"Calling-Station-Id\", rfc2865.CallingStationID_GetString(r.Packet)),\n\t\tzap.String(\"NAS-Identifier\", rfc2865.NASIdentifier_GetString(r.Packet)))\n\n\tif resp.StatusCode != http.StatusOK {\n\t\trc.Logger.Error(\"bad status code\",\n\t\t\tzap.Int(\"status\", resp.StatusCode),\n\t\t\tzap.String(\"url\", resp.Request.URL.String()))\n\t\treturn nil, fmt.Errorf(\"error processing message by endpoint. Response status %d\", resp.StatusCode)\n\t}\n\tvar endPointResponse EndpointResponse\n\tif err := json.NewDecoder(resp.Body).Decode(&endPointResponse); err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to decode endpoint response: %w\", err)\n\t}\n\n\tif len(endPointResponse.Auth) == 0 {\n\t\treturn nil, fmt.Errorf(\"malformed auth response: no acceptance code\")\n\t}\n\tvar p *radius.Packet\n\tswitch endPointResponse.Auth[0] {\n\tcase acceptCode:\n\t\tp = r.Response(radius.CodeAccessAccept)\n\tcase rejectCode:\n\t\tp = r.Response(radius.CodeAccessReject)\n\t}\n\n\tresponse := &modules.Response{\n\t\tCode: p.Code,\n\t\tAttributes: p.Attributes,\n\t}\n\treturn response, nil\n}", "func (h *HTTPHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\tvar err error\n\n\tvar paramsPrefix string = http.CanonicalHeaderKey(\"X-UrlFetch-\")\n\tparams := http.Header{}\n\tfor key, values := range req.Header {\n\t\tif strings.HasPrefix(key, paramsPrefix) {\n\t\t\tparams[key] = values\n\t\t}\n\t}\n\n\tfor key := range params {\n\t\treq.Header.Del(key)\n\t}\n\n\tif h.Authenticator != nil {\n\t\tauth := req.Header.Get(\"Proxy-Authorization\")\n\t\tif auth == \"\" {\n\t\t\th.ProxyAuthorizationRequired(rw, req)\n\t\t\treturn\n\t\t}\n\n\t\tparts := strings.SplitN(auth, \" \", 2)\n\t\tif len(parts) == 2 {\n\t\t\tswitch parts[0] {\n\t\t\tcase \"Basic\":\n\t\t\t\tif auth, err := base64.StdEncoding.DecodeString(parts[1]); err == nil {\n\t\t\t\t\tparts := strings.Split(string(auth), \":\")\n\t\t\t\t\tusername := parts[0]\n\t\t\t\t\tpassword := parts[1]\n\n\t\t\t\t\tif err := h.Authenticator.Authenticate(username, password); err != nil {\n\t\t\t\t\t\thttp.Error(rw, \"403 Forbidden\", http.StatusForbidden)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tglog.Errorf(\"Unrecognized auth type: %#v\", parts[0])\n\t\t\t\thttp.Error(rw, \"403 Forbidden\", http.StatusForbidden)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\treq.Header.Del(\"Proxy-Authorization\")\n\t}\n\n\tif req.Method == http.MethodConnect {\n\t\thost, port, err := net.SplitHostPort(req.Host)\n\t\tif err != nil {\n\t\t\thost = req.Host\n\t\t\tport = \"443\"\n\t\t}\n\n\t\tglog.Infof(\"%s \\\"%s %s:%s %s\\\" - -\", req.RemoteAddr, req.Method, host, port, req.Proto)\n\n\t\tdial := h.Dial\n\t\tif dial == nil {\n\t\t\tdial = h.Transport.Dial\n\t\t}\n\n\t\tconn, err := dial(\"tcp\", net.JoinHostPort(host, port))\n\t\tif err != nil {\n\t\t\thttp.Error(rw, err.Error(), http.StatusBadGateway)\n\t\t\treturn\n\t\t}\n\n\t\thijacker, ok := rw.(http.Hijacker)\n\t\tif !ok {\n\t\t\thttp.Error(rw, fmt.Sprintf(\"%#v is not http.Hijacker\", rw), http.StatusBadGateway)\n\t\t\treturn\n\t\t}\n\t\tlconn, _, err := hijacker.Hijack()\n\t\tif err != nil {\n\t\t\thttp.Error(rw, err.Error(), http.StatusBadGateway)\n\t\t\treturn\n\t\t}\n\n\t\tio.WriteString(lconn, \"HTTP/1.1 200 OK\\r\\n\\r\\n\")\n\n\t\tdefer lconn.Close()\n\t\tdefer conn.Close()\n\n\t\tgo yaputil.IOCopy(conn, lconn)\n\t\tyaputil.IOCopy(lconn, conn)\n\n\t\treturn\n\t}\n\n\tif req.Host == \"\" {\n\t\thttp.Error(rw, \"400 Bad Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif req.URL.Host == \"\" {\n\t\treq.URL.Host = req.Host\n\t}\n\n\tif req.ContentLength == 0 {\n\t\tio.Copy(ioutil.Discard, req.Body)\n\t\treq.Body.Close()\n\t\treq.Body = nil\n\t}\n\n\tglog.Infof(\"%s \\\"%s %s %s\\\" - -\", req.RemoteAddr, req.Method, req.URL.String(), req.Proto)\n\n\tif req.URL.Scheme == \"\" {\n\t\treq.URL.Scheme = \"http\"\n\t}\n\n\tresp, err := h.Transport.RoundTrip(req)\n\tif err != nil {\n\t\tmsg := err.Error()\n\t\tif strings.HasPrefix(msg, \"Invaid DNS Record: \") {\n\t\t\thttp.Error(rw, \"403 Forbidden\", http.StatusForbidden)\n\t\t} else {\n\t\t\thttp.Error(rw, err.Error(), http.StatusBadGateway)\n\t\t}\n\t\treturn\n\t}\n\n\tfor key, values := range resp.Header {\n\t\tfor _, value := range values {\n\t\t\trw.Header().Add(key, value)\n\t\t}\n\t}\n\trw.WriteHeader(resp.StatusCode)\n\n\tdefer resp.Body.Close()\n\n\tvar r io.Reader = resp.Body\n\tyaputil.IOCopy(rw, r)\n}", "func (h *ApiHandlerWithAuth) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\tcookie, err := r.Cookie(\"access_token\")\n\tif err != nil {\n\t\terr = fmt.Errorf(\"extract auth cookie: %v\", err)\n\t\treplyWithError(w, http.StatusForbidden, err)\n\t\treturn\n\t}\n\ttoken, err := jwt.Parse(cookie.Value, func(tok *jwt.Token) (interface{}, error) {\n\t\tif _, ok := tok.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"unexpected signature method\")\n\t\t}\n\t\treturn []byte(h.conf.JwtSignKey), nil\n\t})\n\tif err != nil {\n\t\treplyWithError(w, http.StatusForbidden, fmt.Errorf(\"parse jwt token: %v\", err))\n\t\treturn\n\t}\n\tclaimsMap, ok := token.Claims.(jwt.MapClaims)\n\tif !ok || !token.Valid {\n\t\terr = fmt.Errorf(\"invalid token\")\n\t\treplyWithError(w, http.StatusForbidden, err)\n\t\treturn\n\t}\n\tclaims := &tokenClaims{}\n\tif err := mapstructure.Decode(claimsMap, claims); err != nil {\n\t\terr = fmt.Errorf(\"decode auth token: %v\", err)\n\t\tlogE.Print(err)\n\t\treplyWithError(w, http.StatusForbidden, err)\n\t\treturn\n\t}\n\th.doHandle(w, r, claims.UserId)\n}", "func (r *requestHandler) Authenticate(domain, clientID, clientSecret string) Session {\n\n\tpayload := clientID + \":\" + clientSecret\n\tencodedToken := b64.StdEncoding.EncodeToString([]byte(payload))\n\tresp, err := r.client.R().\n\t\tEnableTrace().\n\t\tSetHeader(\"Accept\", \"application/json\").\n\t\tSetHeader(\"Content-Type\", \"application/x-www-form-urlencoded\").\n\t\tSetHeader(\"Accept\", \"application/json\").\n\t\tSetHeader(\"Authorization\", \"Basic \"+encodedToken).\n\t\tSetBody([]byte(`grant_type=client_credentials`)).\n\t\tPost(\"https://\" + domain + \"/oauth/token\")\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tr.session = NewSession(domain, resp.Body())\n\treturn r.session\n}", "func (c *Config) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tu := c.User\n\trequestOrigin := r.Header.Get(\"Origin\")\n\n\t// Add CORS headers before any operation so even on a 401 unauthorized status, CORS will work.\n\tif c.Cors.Enabled && requestOrigin != \"\" {\n\t\theaders := w.Header()\n\n\t\tallowedHeaders := strings.Join(c.Cors.AllowedHeaders, \", \")\n\t\tallowedMethods := strings.Join(c.Cors.AllowedMethods, \", \")\n\t\texposedHeaders := strings.Join(c.Cors.ExposedHeaders, \", \")\n\n\t\tallowAllHosts := len(c.Cors.AllowedHosts) == 1 && c.Cors.AllowedHosts[0] == \"*\"\n\t\tallowedHost := isAllowedHost(c.Cors.AllowedHosts, requestOrigin)\n\n\t\tif allowAllHosts {\n\t\t\theaders.Set(\"Access-Control-Allow-Origin\", \"*\")\n\t\t} else if allowedHost {\n\t\t\theaders.Set(\"Access-Control-Allow-Origin\", requestOrigin)\n\t\t}\n\n\t\tif allowAllHosts || allowedHost {\n\t\t\theaders.Set(\"Access-Control-Allow-Headers\", allowedHeaders)\n\t\t\theaders.Set(\"Access-Control-Allow-Methods\", allowedMethods)\n\n\t\t\tif c.Cors.Credentials {\n\t\t\t\theaders.Set(\"Access-Control-Allow-Credentials\", \"true\")\n\t\t\t}\n\n\t\t\tif len(c.Cors.ExposedHeaders) > 0 {\n\t\t\t\theaders.Set(\"Access-Control-Expose-Headers\", exposedHeaders)\n\t\t\t}\n\t\t}\n\t}\n\n\tif r.Method == \"OPTIONS\" && c.Cors.Enabled && requestOrigin != \"\" {\n\t\treturn\n\t}\n\n\t// Authentication\n\tif c.Auth {\n\t\tw.Header().Set(\"WWW-Authenticate\", `Basic realm=\"Restricted\"`)\n\n\t\t// Gets the correct user for this request.\n\t\tusername, password, ok := r.BasicAuth()\n\t\tzap.L().Info(\"login attempt\", zap.String(\"username\", username), zap.String(\"remote_address\", r.RemoteAddr))\n\t\tif !ok {\n\t\t\thttp.Error(w, \"Not authorized\", 401)\n\t\t\treturn\n\t\t}\n\n\t\tuser, ok := c.Users[username]\n\t\tif !ok {\n\t\t\thttp.Error(w, \"Not authorized\", 401)\n\t\t\treturn\n\t\t}\n\n\t\tif !checkPassword(user.Password, password) {\n\t\t\tzap.L().Info(\"invalid password\", zap.String(\"username\", username), zap.String(\"remote_address\", r.RemoteAddr))\n\t\t\thttp.Error(w, \"Not authorized\", 401)\n\t\t\treturn\n\t\t}\n\n\t\tu = user\n\t\tzap.L().Info(\"user authorized\", zap.String(\"username\", username))\n\t} else {\n\t\t// Even if Auth is disabled, we might want to get\n\t\t// the user from the Basic Auth header. Useful for Caddy\n\t\t// plugin implementation.\n\t\tusername, _, ok := r.BasicAuth()\n\t\tif ok {\n\t\t\tif user, ok := c.Users[username]; ok {\n\t\t\t\tu = user\n\t\t\t}\n\t\t}\n\t}\n\n\t// Checks for user permissions relatively to this PATH.\n\tnoModification := r.Method == \"GET\" || r.Method == \"HEAD\" ||\n\t\tr.Method == \"OPTIONS\" || r.Method == \"PROPFIND\"\n\n\tallowed := u.Allowed(r.URL.Path, noModification)\n\n\tzap.L().Debug(\"allowed & method & path\", zap.Bool(\"allowed\", allowed), zap.String(\"method\", r.Method), zap.String(\"path\", r.URL.Path))\n\n\tif !allowed {\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\treturn\n\t}\n\n\tif r.Method == \"HEAD\" {\n\t\tw = newResponseWriterNoBody(w)\n\t}\n\n\t// Excerpt from RFC4918, section 9.4:\n\t//\n\t// \t\tGET, when applied to a collection, may return the contents of an\n\t//\t\t\"index.html\" resource, a human-readable view of the contents of\n\t//\t\tthe collection, or something else altogether.\n\t//\n\t// Get, when applied to collection, will return the same as PROPFIND method.\n\tif r.Method == \"GET\" && strings.HasPrefix(r.URL.Path, u.Handler.Prefix) {\n\t\tinfo, err := u.Handler.FileSystem.Stat(context.TODO(), strings.TrimPrefix(r.URL.Path, u.Handler.Prefix))\n\t\tif err == nil && info.IsDir() {\n\t\t\tr.Method = \"PROPFIND\"\n\n\t\t\tif r.Header.Get(\"Depth\") == \"\" {\n\t\t\t\tr.Header.Add(\"Depth\", \"1\")\n\t\t\t}\n\t\t}\n\t}\n\n\t// Runs the WebDAV.\n\t//u.Handler.LockSystem = webdav.NewMemLS()\n\tu.Handler.ServeHTTP(w, r)\n}", "func BasicAuthenticate(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {\n\t//TODO Instead of clear passwor I could stick with encoded or other crypted solution\n\t// Use base64 decoding to extract from http header user credentials\n\tusername, passwd, err := security.Credentials(req)\n\tif err != nil {\n\t\tHTTPAuthorizationError(resp, err)\n\t\treturn\n\t}\n\tlog.Infof(\"User %s trying to connect with %s\\n\", username, passwd)\n\n\tdebug := false\n\t//TODO Manage a way to plug whatever datastore you want, wherever it is\n\tok, err := security.EtcdCheckCredentials(username, passwd, debug)\n\tif err != nil {\n\t\tHTTPInternalError(resp, err)\n\t\treturn\n\t}\n\tif !ok {\n\t\tHTTPAuthorizationError(resp, fmt.Errorf(\"credentials refused\"))\n\t\treturn\n\t}\n\tlog.Infof(\"Authentification granted, processing (%s:%s)\", username, passwd)\n\tchain.ProcessFilter(req, resp)\n}", "func (l Headers) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Access-Control-Allow-Headers\", \"authorization,content-type,hawkular-tenant\")\n\tw.Header().Set(\"Access-Control-Allow-Methods\", \"GET, POST, DELETE, PUT\")\n\n\tif l.Verbose {\n\t\tlog.Printf(\"set http headers\")\n\t}\n\n\tl.next.ServeHTTP(w, r)\n}", "func (h *Helper) Authenticate(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\th.authenticateWithErrHandler(w, r, next, HandleHttpError)\n}", "func Authenticate(clientID, clientIDTag, userID string) {\n\tapiURL = generateAPIURL(clientID)\n\tauth = &Auth{\n\t\tClientID: strings.Join([]string{clientID, clientIDTag}, \"-\"),\n\t\tUserID: userID}\n}", "func sendHTTPRequestToKTT(kttClient *KttClient, request *http.Request) KttResponse {\n\t//Perform POST request against KTT only in \"normal\" mode\n\tif applicationMode == \"normal\" {\n\n\t\tresponse, err := kttClient.Client.Do(request)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdefer response.Body.Close()\n\n\t\tlog.Printf(\"Status code %v\", response.StatusCode)\n\n\t\tif response.StatusCode == http.StatusCreated {\n\t\t\tstatistics.TicketsCreated += 1\n\t\t\tbodyBytes, err := ioutil.ReadAll(response.Body)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tbodyString := string(bodyBytes)\n\t\t\t//fmt.Println(bodyString)\n\t\t\treturn KttResponse(bodyString)\n\t\t} else {\n\t\t\tstatistics.Errors += 1\n\t\t}\n\n\t}\n\n\tif applicationMode == \"test\" {\n\t\tlog.Println(\"In test mode, pass ticket creation...\")\n\t}\n\n\treturn \"\"\n}", "func (h *Handler) ServeAPI(w http.ResponseWriter, r *http.Request) {\n\tif !common.Allowed(h.cfg.AllowRequest, r.RemoteAddr) {\n\t\terr := errors.New(r.RemoteAddr + \" not allowed\")\n\t\tlog.Print(err)\n\t\tcommon.ErrResp(w, err)\n\t\treturn\n\t}\n\n\tb, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\tcommon.ErrResp(w, err)\n\t\treturn\n\t}\n\tvar c struct {\n\t\tCommand string `json:\"command\"`\n\t}\n\tif err := json.Unmarshal(b, &c); err != nil {\n\t\tlog.Print(err)\n\t\tlog.Print(string(b))\n\t\tcommon.ErrResp(w, err)\n\t\treturn\n\t}\n\tif c.Command != \"attachToTangle\" {\n\t\terr := common.Loop(func() error {\n\t\t\treturn bypass(h.cfg.IRIserver, w, b)\n\t\t})\n\t\tif err != nil {\n\t\t\tw.WriteHeader(400)\n\t\t}\n\t\treturn\n\t}\n\thdr := r.Header.Get(http.CanonicalHeaderKey(\"Authorization\"))\n\ttoken := common.ParseAuthorizationHeader(hdr)\n\tif !common.IsValid(token, h.cfg.Tokens) && h.cfg.Limit != 0 {\n\t\tlog.Print(\"not authed\")\n\t\tlimitReached := h.cmdLimiter.Limit(c.Command, r)\n\t\tif limitReached != nil {\n\t\t\tcommon.ErrResp(w, limitReached)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tlog.Print(\"authed\")\n\t}\n\tif err := h.attachToTangle(w, b); err != nil {\n\t\tlog.Print(err)\n\t\tcommon.ErrResp(w, err)\n\t}\n}", "func (c *HTTPClient) DoUnauthenticated(ctx context.Context, method string, path string, params map[string]string, data interface{}, result interface{}) (statusCode int, err error) {\n\tc.l.Lock()\n\tdefer c.l.Unlock()\n\n\treq, err := c.prepareRequest(method, path, params, data)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn c.do(ctx, req, result, true, false, false)\n}", "func (v4 Signer) SignHTTP(ctx context.Context, r *http.Request, payloadHash string, service string, region string, signingTime time.Time) error {\n\tcredentials, err := v4.Credentials.Retrieve(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsigner := &httpSigner{\n\t\tRequest: r,\n\t\tPayloadHash: payloadHash,\n\t\tServiceName: service,\n\t\tRegion: region,\n\t\tCredentials: credentials,\n\t\tTime: signingTime.UTC(),\n\t\tDisableHeaderHoisting: v4.DisableHeaderHoisting,\n\t\tDisableURIPathEscaping: v4.DisableURIPathEscaping,\n\t}\n\n\tsignedRequest, err := signer.Build()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tv4.logHTTPSigningInfo(signedRequest)\n\n\t*r = *signedRequest.Request\n\n\treturn nil\n}", "func (h AuthRefreshHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\ttoken, ok := getUserToken(r)\n\tif !ok {\n\t\twriteError(w, http.StatusUnauthorized, \"Missing or invalid Authorization header.\")\n\t\treturn\n\t}\n\n\tuser, err := h.UserRepo.FindUser(\"WHERE token = $1\", token)\n\tif err != nil {\n\t\tif err == easyalert.ErrRecordDoesNotExist {\n\t\t\twriteError(w, http.StatusUnauthorized, \"Invalid token.\")\n\t\t\treturn\n\t\t}\n\n\t\twriteError(w, http.StatusInternalServerError, \"an unknown error occured\")\n\t\treturn\n\t}\n\n\tnewToken, err := random.String(easyalert.UserTokenLength)\n\tif err != nil {\n\t\twriteError(w, http.StatusInternalServerError, \"could not generate token\")\n\t\treturn\n\t}\n\n\tuser.Token = newToken\n\n\tuser, err = h.UserRepo.UpdateUser(user)\n\tif err != nil {\n\t\twriteError(w, http.StatusInternalServerError, \"could not update token\")\n\t\treturn\n\t}\n\n\tvar responseBody authResponseBody\n\tresponseBody.Token = user.Token\n\n\tresponseBodyBytes, err := json.Marshal(responseBody)\n\tif err != nil {\n\t\twriteError(w, http.StatusInternalServerError, \"could not marshal response body\")\n\t\treturn\n\t}\n\n\tbody, err := prettifyJSON(string(responseBodyBytes))\n\tif err != nil {\n\t\twriteError(w, http.StatusInternalServerError, \"could not prettify json response\")\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(body))\n}", "func (c *Client) SendWithAuth(req *http.Request, v interface{}) error {\n\tif (c.Token == nil) || (c.Token.ExpiresAt.Before(time.Now())) {\n\t\tresp, err := c.GetAccessToken()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tc.Token = resp\n\t}\n\treq.Header.Set(\"Authorization\", \"Bearer \"+c.Token.Token)\n\n\treturn c.Send(req, v)\n}", "func (t *TokenAuth) ServeHTTP(w http.ResponseWriter, req *http.Request, next http.HandlerFunc) {\n\ttoken, err := t.auth.Authenticate(req)\n\tif err != nil {\n\t\tt.auth.UnauthorizedHandler.ServeHTTP(w, req)\n\t\treturn\n\t}\n\tcontext.Set(req, \"token\", token)\n\tnext(w, req)\n\tcontext.Clear(req)\n}", "func TestApiKeyAuthenticationHandler_ServeHTTP_GetRequest(t *testing.T) {\n\treq, err := http.NewRequest(\"GET\", shared.SendPath, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trr := httptest.NewRecorder()\n\treq.Header.Set(\"Cookie\", shared.AuthenticationCookieName+\"=\"+getValidIncomingApiKey())\n\n\tnextHandler := MockedNextHandler{}\n\n\ttestee := ApiKeyAuthenticationHandler{ApiKeyResolver: getValidIncomingApiKey, Next: &nextHandler,\n\t\tAllowedMethod: \"POST\", Logger: getTestLogger()}\n\thandler := http.HandlerFunc(testee.ServeHTTP)\n\n\thandler.ServeHTTP(rr, req)\n\tassert.Equal(t, 401, rr.Code, \"Status code 401 should be returned\")\n\tassert.False(t, nextHandler.hasBeenCalled, \"Child handler should not be called\")\n}", "func (t *AuthenticationTransport) RoundTrip(req *http.Request) (*http.Response, error) {\n\treq.Header.Add(AuthenticationHeader, t.authenticationToken)\n\treturn t.transport.RoundTrip(req)\n}", "func (auth Auth) ServeHTTP(res http.ResponseWriter, req *http.Request) {\n\tif req.Method != http.MethodPost {\n\t\thttp.Error(res, \"only POST allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\temail := req.FormValue(\"email\")\n\tpassword := req.FormValue(\"password\")\n\n\tuser, err := auth.db.UserCheck(email, password)\n\tif err != nil {\n\t\thttp.Error(res, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif user == nil {\n\t\thttp.Error(res, \"invalid login\", http.StatusForbidden)\n\t\treturn\n\t}\n\n\ttoken, err := auth.key.NewToken(user.ID)\n\tif err != nil {\n\t\thttp.Error(res, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tencode(res, data.Auth{\n\t\tToken: token,\n\t\tEmail: email,\n\t})\n}", "func (t TabAuth) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif !t.isAuthenticated(r) {\n\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tstatus, err := t.tabauthHandler(w, r)\n\tif err != nil {\n\t\tlog.Printf(\"HTTP %d: %s\", status, err.Error())\n\t\thttp.Error(w, http.StatusText(status), status)\n\t}\n}", "func (h *authHandler) Authenticate(ctx *fasthttp.RequestCtx) {\n\ttype successResponse struct {\n\t\tLogin string `json:\"login\"`\n\t\tToken string `json:\"token\"`\n\t}\n\n\ttype errorResponse struct {\n\t\tError string `json:\"error\"`\n\t}\n\n\tlogin := string(ctx.PostArgs().Peek(\"login\"))\n\tpassword := string(ctx.PostArgs().Peek(\"password\"))\n\n\tif isValidCredentials(login, password) {\n\t\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\t\"login\": login,\n\t\t\t\"exp\": time.Now().Add(time.Hour * 24).Unix(),\n\t\t})\n\n\t\ttokenString, _ := token.SignedString(MyJWTSigningKey)\n\n\t\trespondWithJSON(ctx, 200, successResponse{\n\t\t\tLogin: login,\n\t\t\tToken: tokenString,\n\t\t})\n\t} else {\n\t\trespondWithJSON(ctx, 401, errorResponse{\n\t\t\tError: \"NotAuthenticated - your credentials are incorrect\",\n\t\t})\n\t}\n}", "func (s *LoginServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.URL.Path == \"/\" {\n\t\ts.printServerKeys(w)\n\t\treturn\n\t}\n\n\tuser := r.Header.Get(s.userHeader)\n\n\tpath := r.URL.RawPath\n\tif path == \"\" {\n\t\tpath = r.URL.Path\n\t}\n\n\tresponse, err := s.loginParser.ParseURLResponse(path)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 400)\n\t\treturn\n\t}\n\n\ts.printToken(w, response, user)\n}", "func (h *httpCloud) sendHTTPRequest(requestType string, url string, requestBody io.Reader) ([]byte, error) {\n\treq, err := http.NewRequest(requestType, url, requestBody)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient := &http.Client{\n\t\tTransport: http.DefaultTransport,\n\t\tTimeout: HttpProviderTimeout,\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn body, nil\n\t}\n}", "func (ba *BasicAuthenticator) Wrap(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tuname, pw, ok := r.BasicAuth()\n\t\tif !ok {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\tw.Write([]byte(\"no basic credentials in request header\"))\n\t\t\treturn\n\t\t}\n\t\tuname, err := ba.authenticate(uname, pw)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\tw.Write([]byte(fmt.Sprintf(\"authentication failed. %s\", err)))\n\t\t\treturn\n\t\t}\n\t\th.ServeHTTP(w, r.WithContext(context.WithValue(r.Context(), unameCtxKey, uname)))\n\t})\n}", "func AuthHandler(handler AuthorisedRequestHandler) httprouter.Handle {\n\treturn func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {\n\t\t// Measure time spent executing shit\n\t\tstart := time.Now()\n\n\t\t// Authorize request\n\t\terr := authorize(r)\n\t\tif err != nil {\n\t\t\t// Logs [source IP] [request method] [request URL] [HTTP status] [time spent serving request]\n\t\t\tlog.Printf(\"%v\\t \\\"%v - %v\\\"\\t%v\\t%v\", sourceIP(r), r.Method, r.RequestURI, http.StatusUnauthorized, time.Since(start))\n\t\t\thttp.Error(w, err.Error(), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\t// Pass to the real handler\n\t\tresponse, statusCode, err := handler(r, params)\n\n\t\t// Logs [source IP] [request method] [request URL] [HTTP status] [time spent serving request]\n\t\tlog.Printf(\"%v\\t \\\"%v - %v\\\"\\t%v\\t%v\", sourceIP(r), r.Method, r.RequestURI, statusCode, time.Since(start))\n\n\t\tif err != nil {\n\t\t\t// If we run into an error, throw it back to the client (as plain text)\n\t\t\thttp.Error(w, err.Error(), statusCode)\n\t\t\treturn\n\t\t}\n\n\t\tw.WriteHeader(statusCode)\n\t\tfmt.Fprintln(w, response)\n\t}\n}", "func WrapAuthenticate(hfn http.Handler) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\turlVars := mux.Vars(r)\n\t\turlValues := r.URL.Query()\n\n\t\t// if the url parameter 'key' is empty or absent, end the request with an unauthorized response\n\t\tif urlValues.Get(\"key\") == \"\" {\n\t\t\terr := APIErrorUnauthorized()\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\t\tserviceToken := gorillaContext.Get(r, \"auth_service_token\").(string)\n\n\t\tprojectName := urlVars[\"project\"]\n\t\tprojectUUID := projects.GetUUIDByName(urlVars[\"project\"], refStr)\n\n\t\t// In all cases instead of project create\n\t\tif \"projects:create\" != mux.CurrentRoute(r).GetName() {\n\t\t\t// Check if given a project name the project wasn't found\n\t\t\tif projectName != \"\" && projectUUID == \"\" {\n\t\t\t\tapiErr := APIErrorNotFound(\"project\")\n\t\t\t\trespondErr(w, apiErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// Check first if service token is used\n\t\tif serviceToken != \"\" && serviceToken == urlValues.Get(\"key\") {\n\t\t\tgorillaContext.Set(r, \"auth_roles\", []string{\"service_admin\"})\n\t\t\tgorillaContext.Set(r, \"auth_user\", \"\")\n\t\t\tgorillaContext.Set(r, \"auth_user_uuid\", \"\")\n\t\t\tgorillaContext.Set(r, \"auth_project_uuid\", projectUUID)\n\t\t\thfn.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\troles, user := auth.Authenticate(projectUUID, urlValues.Get(\"key\"), refStr)\n\n\t\tif len(roles) > 0 {\n\t\t\tuserUUID := auth.GetUUIDByName(user, refStr)\n\t\t\tgorillaContext.Set(r, \"auth_roles\", roles)\n\t\t\tgorillaContext.Set(r, \"auth_user\", user)\n\t\t\tgorillaContext.Set(r, \"auth_user_uuid\", userUUID)\n\t\t\tgorillaContext.Set(r, \"auth_project_uuid\", projectUUID)\n\t\t\thfn.ServeHTTP(w, r)\n\t\t} else {\n\t\t\terr := APIErrorUnauthorized()\n\t\t\trespondErr(w, err)\n\t\t}\n\n\t})\n}", "func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {\n\n\tsign, err := SignRequest(t.mac.SecretKey, req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tauth := \"Qiniu \" + t.mac.AccessKey + \":\" + base64.URLEncoding.EncodeToString(sign)\n\treq.Header.Set(\"Authorization\", auth)\n\treturn t.Transport.RoundTrip(req)\n}", "func RequestCredentials(w http.ResponseWriter, r *http.Request) {\n\tvar (\n\t\tidentity AuthIdentity\n\t\tcredentials AuthCredentials\n\t)\n\n\teaaCtx := r.Context().Value(contextKey(\"appliance-ctx\")).(*Context)\n\n\tconst fName = \"/Auth RequestCredentials \"\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\n\terr := json.NewDecoder(r.Body).Decode(&identity)\n\tif err != nil {\n\t\tlog.Errf(fName+\"decode failed: %v\", err.Error())\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\thost, port, err := net.SplitHostPort(r.RemoteAddr)\n\tif err != nil {\n\t\tlog.Errf(fName+\"Cannot retrieve IP from RemoteAddr: %v [%v:%v] %v\",\n\t\t\tr.RemoteAddr, host, port, err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tcert, err := SignCSR(identity.Csr, eaaCtx)\n\tif err != nil {\n\t\tlog.Errf(fName+\"failed: %v\", err.Error())\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tsignedCertBlock := pem.EncodeToMemory(\n\t\t&pem.Block{Type: \"CERTIFICATE\", Bytes: cert.Raw})\n\tif signedCertBlock == nil {\n\t\tlog.Err(fName + \"/failed to enode signed cert\")\n\t\treturn\n\t}\n\trcaBlock := pem.EncodeToMemory(\n\t\t&pem.Block{Type: \"CERTIFICATE\",\n\t\t\tBytes: eaaCtx.certsEaaCa.rca.x509Cert.Raw})\n\tif rcaBlock == nil {\n\t\tlog.Err(fName + \"failed to enode rca cert\")\n\t\treturn\n\t}\n\n\tcredentials.ID = cert.Subject.CommonName\n\tcredentials.Certificate = string(signedCertBlock)\n\tcredentials.CaChain = []string{string(rcaBlock)}\n\tcredentials.CaPool = []string{string(rcaBlock)}\n\n\tencoder := json.NewEncoder(w)\n\terr = encoder.Encode(credentials)\n\tif err != nil {\n\t\tlog.Errf(fName+\"encoding output to JSON failed: %s\",\n\t\t\terr.Error())\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tlog.Info(fName + \" request from CN: \" + credentials.ID + \", from IP: \" +\n\t\thost + \" properly handled\")\n}", "func (h *asserterHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\n\tif r.Method == http.MethodGet {\n\t\ttoken := r.Header.Get(\"x-token\")\n\t\tsubj := &AssertResponse{}\n\n\t\tif len(token) == 0 {\n\t\t\tsubj.ErrMessage = \"token is empty\"\n\t\t\tsendResp(w, http.StatusBadRequest, subj)\n\t\t\treturn\n\t\t} else if token == \"test-token\" {\n\t\t\tsubj.ErrCode = http.StatusBadRequest\n\t\t\tsubj.ErrMessage = \"invalid token\"\n\n\t\t\tsendResp(w, http.StatusBadRequest, subj)\n\t\t} else {\n\t\t\tsubj.ErrCode = 0\n\t\t\tsubj.ErrMessage = \"\"\n\t\t\tsubj.Principals = []*adsapi.Principal{\n\t\t\t\t{\n\t\t\t\t\tType: adsapi.PRINCIPAL_TYPE_USER,\n\t\t\t\t\tName: \"testUser\",\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tsendResp(w, http.StatusOK, subj)\n\t\t}\n\t}\n\n}", "func (s *Handler) Handle(w http.ResponseWriter, r *http.Request, target security.Target) error {\n\tlog.Debugf(\"Handle JWT Request for %s\", net.RemoveURLParams(r.RequestURI))\n\tbasicAuth, ok := r.Header[HeaderAuthorization]\n\tif !ok {\n\t\treturn net.ErrUnauthorized.From(errors.New(\"Authorization is required\"))\n\t}\n\ttoken, err := s.verify(r.Context(), strings.TrimPrefix(basicAuth[0], BearerPreffix))\n\tif err != nil {\n\t\treturn err\n\t}\n\tsecurity.SetAuthType(r, ContextValue)\n\tclaims := token.Claims.(jwt.MapClaims)\n\tif username, exist := claims[UserNameField]; exist {\n\t\tsecurity.SetUserName(r, username.(string))\n\t}\n\tif userID, exist := claims[SubjectField]; exist {\n\t\tsecurity.SetUserID(r, userID.(string))\n\t}\n\tif rolesVal, exist := claims[RolesField]; exist {\n\t\tif iroles, ok := rolesVal.([]interface{}); ok {\n\t\t\troles := make([]string, 0, len(iroles))\n\t\t\tfor _, role := range iroles {\n\t\t\t\troles = append(roles, role.(string))\n\t\t\t}\n\t\t\tsecurity.SetUserRoles(r, roles)\n\t\t}\n\t}\n\treturn nil\n}", "func (h *HTTP2Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\tvar err error\n\n\treqHostname := req.Host\n\tif host, _, err := net.SplitHostPort(req.Host); err == nil {\n\t\treqHostname = host\n\t}\n\n\tvar h2 bool = req.ProtoMajor == 2 && req.ProtoMinor == 0\n\tvar isProxyRequest bool = !yaputil.ContainsString(h.ServerNames, reqHostname)\n\n\tvar paramsPrefix string = http.CanonicalHeaderKey(\"X-UrlFetch-\")\n\tparams := http.Header{}\n\tfor key, values := range req.Header {\n\t\tif strings.HasPrefix(key, paramsPrefix) {\n\t\t\tparams[key] = values\n\t\t}\n\t}\n\n\tfor key := range params {\n\t\treq.Header.Del(key)\n\t}\n\n\tif isProxyRequest && h.DisableProxy {\n\t\thttp.Error(rw, \"403 Forbidden\", http.StatusForbidden)\n\t\treturn\n\t}\n\n\tvar username, password string\n\tif isProxyRequest && h.Authenticator != nil {\n\t\tauth := req.Header.Get(\"Proxy-Authorization\")\n\t\tif auth == \"\" {\n\t\t\th.ProxyAuthorizationRequired(rw, req)\n\t\t\treturn\n\t\t}\n\n\t\tparts := strings.SplitN(auth, \" \", 2)\n\t\tif len(parts) == 2 {\n\t\t\tswitch parts[0] {\n\t\t\tcase \"Basic\":\n\t\t\t\tif auth, err := base64.StdEncoding.DecodeString(parts[1]); err == nil {\n\t\t\t\t\tparts := strings.Split(string(auth), \":\")\n\t\t\t\t\tusername = parts[0]\n\t\t\t\t\tpassword = parts[1]\n\n\t\t\t\t\tif err := h.Authenticator.Authenticate(username, password); err != nil {\n\t\t\t\t\t\thttp.Error(rw, \"403 Forbidden\", http.StatusForbidden)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tglog.Errorf(\"Unrecognized auth type: %#v\", parts[0])\n\t\t\t\thttp.Error(rw, \"403 Forbidden\", http.StatusForbidden)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\treq.Header.Del(\"Proxy-Authorization\")\n\t}\n\n\tif req.Method == http.MethodConnect {\n\t\thost, port, err := net.SplitHostPort(req.Host)\n\t\tif err != nil {\n\t\t\thost = req.Host\n\t\t\tport = \"443\"\n\t\t}\n\n\t\tglog.Infof(\"[%v 0x%04x %s] %s \\\"%s %s %s\\\" - -\",\n\t\t\treq.TLS.ServerName, req.TLS.Version, username, req.RemoteAddr, req.Method, req.Host, req.Proto)\n\n\t\tdial := h.Dial\n\t\tif dial == nil {\n\t\t\tdial = h.Transport.Dial\n\t\t}\n\n\t\tconn, err := dial(\"tcp\", net.JoinHostPort(host, port))\n\t\tif err != nil {\n\t\t\thttp.Error(rw, err.Error(), http.StatusBadGateway)\n\t\t\treturn\n\t\t}\n\n\t\tvar w io.Writer\n\t\tvar r io.Reader\n\n\t\t// http2 only support Flusher, http1/1.1 support Hijacker\n\t\tif h2 {\n\t\t\tflusher, ok := rw.(http.Flusher)\n\t\t\tif !ok {\n\t\t\t\thttp.Error(rw, fmt.Sprintf(\"%#v is not http.Flusher\", rw), http.StatusBadGateway)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trw.WriteHeader(http.StatusOK)\n\t\t\tflusher.Flush()\n\n\t\t\tw = FlushWriter{rw}\n\t\t\tr = req.Body\n\t\t} else {\n\t\t\thijacker, ok := rw.(http.Hijacker)\n\t\t\tif !ok {\n\t\t\t\thttp.Error(rw, fmt.Sprintf(\"%#v is not http.Hijacker\", rw), http.StatusBadGateway)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlconn, _, err := hijacker.Hijack()\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(rw, err.Error(), http.StatusBadGateway)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer lconn.Close()\n\n\t\t\tw = lconn\n\t\t\tr = lconn\n\n\t\t\tio.WriteString(lconn, \"HTTP/1.1 200 OK\\r\\n\\r\\n\")\n\t\t}\n\n\t\tdefer conn.Close()\n\n\t\tgo yaputil.IOCopy(conn, r)\n\t\tyaputil.IOCopy(w, conn)\n\n\t\treturn\n\t}\n\n\tif req.Host == \"\" {\n\t\thttp.Error(rw, \"403 Forbidden\", http.StatusForbidden)\n\t\treturn\n\t}\n\n\tif req.URL.Host == \"\" {\n\t\treq.URL.Host = req.Host\n\t}\n\n\tif req.ContentLength == 0 {\n\t\tio.Copy(ioutil.Discard, req.Body)\n\t\treq.Body.Close()\n\t\treq.Body = nil\n\t}\n\n\tglog.Infof(\"[%v 0x%04x %s] %s \\\"%s %s %s\\\" - -\",\n\t\treq.TLS.ServerName, req.TLS.Version, username, req.RemoteAddr, req.Method, req.URL.String(), req.Proto)\n\n\tif req.URL.Scheme == \"\" {\n\t\treq.URL.Scheme = \"http\"\n\t}\n\n\tif h2 {\n\t\treq.ProtoMajor = 1\n\t\treq.ProtoMinor = 1\n\t\treq.Proto = \"HTTP/1.1\"\n\t}\n\n\tif !isProxyRequest && h.Fallback != nil {\n\t\tif h.Fallback.Scheme == \"file\" {\n\t\t\thttp.FileServer(http.Dir(h.Fallback.Path)).ServeHTTP(rw, req)\n\t\t\treturn\n\t\t}\n\t\treq.URL.Scheme = h.Fallback.Scheme\n\t\treq.URL.Host = h.Fallback.Host\n\t\tif ip, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {\n\t\t\txff := req.Header.Get(\"X-Forwarded-For\")\n\t\t\tif xff == \"\" {\n\t\t\t\treq.Header.Set(\"X-Forwarded-For\", ip)\n\t\t\t} else {\n\t\t\t\treq.Header.Set(\"X-Forwarded-For\", xff+\", \"+ip)\n\t\t\t}\n\t\t\treq.Header.Set(\"X-Forwarded-Proto\", \"https\")\n\t\t\treq.Header.Set(\"X-Real-IP\", ip)\n\t\t}\n\t}\n\n\tresp, err := h.Transport.RoundTrip(req)\n\tglog.Infof(\"%+v\", req)\n\tif err != nil {\n\t\tmsg := err.Error()\n\t\tif strings.HasPrefix(msg, \"Invaid DNS Record: \") {\n\t\t\thttp.Error(rw, \"403 Forbidden\", http.StatusForbidden)\n\t\t} else {\n\t\t\thttp.Error(rw, err.Error(), http.StatusBadGateway)\n\t\t}\n\t\treturn\n\t}\n\n\tif h2 {\n\t\tresp.Header.Del(\"Connection\")\n\t\tresp.Header.Del(\"Keep-Alive\")\n\t}\n\n\tfor key, values := range resp.Header {\n\t\tfor _, value := range values {\n\t\t\trw.Header().Add(key, value)\n\t\t}\n\t}\n\trw.WriteHeader(resp.StatusCode)\n\n\tdefer resp.Body.Close()\n\n\tvar r io.Reader = resp.Body\n\tyaputil.IOCopy(rw, r)\n}" ]
[ "0.7415173", "0.7090839", "0.68118095", "0.67946285", "0.64851695", "0.6237729", "0.6094957", "0.60039806", "0.5994013", "0.5942896", "0.56280935", "0.55854416", "0.556091", "0.5519506", "0.5493864", "0.5493858", "0.546087", "0.54589224", "0.54501", "0.5440818", "0.5440324", "0.54073876", "0.5403203", "0.54009104", "0.54002213", "0.5383508", "0.5381112", "0.53398114", "0.53364867", "0.5316461", "0.52966696", "0.5269135", "0.525596", "0.52390504", "0.5224662", "0.5212992", "0.51900655", "0.51829636", "0.5180994", "0.51583654", "0.5135107", "0.5134389", "0.5133978", "0.51318765", "0.5131854", "0.5127259", "0.5118702", "0.51171076", "0.511487", "0.510745", "0.51074076", "0.5105609", "0.510292", "0.50992125", "0.509729", "0.5092107", "0.5089548", "0.50837433", "0.5078542", "0.5076889", "0.5072048", "0.5068079", "0.50672925", "0.50634134", "0.50603426", "0.50594246", "0.50467306", "0.504532", "0.5032014", "0.5028351", "0.5024498", "0.5023863", "0.50215447", "0.50212204", "0.50203305", "0.5017634", "0.50167453", "0.5009166", "0.50021064", "0.49988052", "0.49926865", "0.4990961", "0.49898484", "0.498758", "0.49811077", "0.49789357", "0.49785697", "0.49778834", "0.49585727", "0.49480718", "0.4947317", "0.49460414", "0.49434024", "0.49267465", "0.49244455", "0.49232435", "0.492279", "0.4920789", "0.49196887", "0.49189243" ]
0.76494217
0
GetFee returns an estimate of fee based on type of transaction
func (h *HUOBI) GetFee(feeBuilder *exchange.FeeBuilder) (float64, error) { var fee float64 if feeBuilder.FeeType == exchange.OfflineTradeFee || feeBuilder.FeeType == exchange.CryptocurrencyTradeFee { fee = calculateTradingFee(feeBuilder.Pair, feeBuilder.PurchasePrice, feeBuilder.Amount) } if fee < 0 { fee = 0 } return fee, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (h *HitBTC) GetFee(ctx context.Context, feeBuilder *exchange.FeeBuilder) (float64, error) {\n\tvar fee float64\n\tswitch feeBuilder.FeeType {\n\tcase exchange.CryptocurrencyTradeFee:\n\t\tfeeInfo, err := h.GetFeeInfo(ctx,\n\t\t\tfeeBuilder.Pair.Base.String()+\n\t\t\t\tfeeBuilder.Pair.Delimiter+\n\t\t\t\tfeeBuilder.Pair.Quote.String())\n\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tfee = calculateTradingFee(feeInfo, feeBuilder.PurchasePrice,\n\t\t\tfeeBuilder.Amount,\n\t\t\tfeeBuilder.IsMaker)\n\tcase exchange.CryptocurrencyWithdrawalFee:\n\t\tcurrencyInfo, err := h.GetCurrency(ctx, feeBuilder.Pair.Base.String())\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tfee, err = strconv.ParseFloat(currencyInfo.PayoutFee, 64)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\tcase exchange.CryptocurrencyDepositFee:\n\t\tfee = calculateCryptocurrencyDepositFee(feeBuilder.Pair.Base,\n\t\t\tfeeBuilder.Amount)\n\tcase exchange.OfflineTradeFee:\n\t\tfee = getOfflineTradeFee(feeBuilder.PurchasePrice, feeBuilder.Amount)\n\t}\n\tif fee < 0 {\n\t\tfee = 0\n\t}\n\n\treturn fee, nil\n}", "func (p *Poloniex) GetFee(ctx context.Context, feeBuilder *exchange.FeeBuilder) (float64, error) {\n\tvar fee float64\n\tswitch feeBuilder.FeeType {\n\tcase exchange.CryptocurrencyTradeFee:\n\t\tfeeInfo, err := p.GetFeeInfo(ctx)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tfee = calculateTradingFee(feeInfo,\n\t\t\tfeeBuilder.PurchasePrice,\n\t\t\tfeeBuilder.Amount,\n\t\t\tfeeBuilder.IsMaker)\n\n\tcase exchange.CryptocurrencyWithdrawalFee:\n\t\tfee = getWithdrawalFee(feeBuilder.Pair.Base)\n\tcase exchange.OfflineTradeFee:\n\t\tfee = getOfflineTradeFee(feeBuilder.PurchasePrice, feeBuilder.Amount)\n\t}\n\tif fee < 0 {\n\t\tfee = 0\n\t}\n\n\treturn fee, nil\n}", "func (f *feeCalculator) Fee(amountInSat int64, feeRateInSatsPerVByte float64, takeFeeFromAmount bool) int64 {\n\tif amountInSat == 0 {\n\t\treturn 0\n\t}\n\tif takeFeeFromAmount {\n\t\treturn f.feeFromAmount(amountInSat, feeRateInSatsPerVByte)\n\t} else {\n\t\treturn f.feeFromRemainingBalance(amountInSat, feeRateInSatsPerVByte)\n\t}\n}", "func (_ElvTradable *ElvTradableCaller) GetTransferFee(opts *bind.CallOpts, _tokenId *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _ElvTradable.contract.Call(opts, &out, \"getTransferFee\", _tokenId)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (b *Bitmex) GetFeeByType(ctx context.Context, feeBuilder *exchange.FeeBuilder) (float64, error) {\n\tif feeBuilder == nil {\n\t\treturn 0, fmt.Errorf(\"%T %w\", feeBuilder, common.ErrNilPointer)\n\t}\n\tif !b.AreCredentialsValid(ctx) && // Todo check connection status\n\t\tfeeBuilder.FeeType == exchange.CryptocurrencyTradeFee {\n\t\tfeeBuilder.FeeType = exchange.OfflineTradeFee\n\t}\n\treturn b.GetFee(feeBuilder)\n}", "func CalculateFee(tx []byte, fees Fees) (uint64, error) {\n\tt := trace.New().Source(\"transaction.go\", \"\", \"CalculateFee\")\n\tsize := len(tx)\n\tlog.Println(trace.Info(\"TX size\").UTC().Add(\"bytes len\", fmt.Sprintf(\"%d\", size)).Append(t))\n\tstandardFee, err := fees.GetStandardFee()\n\tif err != nil {\n\t\tlog.Println(trace.Alert(\"no standard fee avaliable\").UTC().Error(err).Append(t))\n\t\treturn 0, fmt.Errorf(\"no standard fee available: %w\", err)\n\t}\n\tminingFee := (float64(size) / float64(standardFee.MiningFee.Bytes)) * float64(standardFee.MiningFee.Satoshis)\n\t// relayFee := (float64(size) / float64(standardFee.RelayFee.Bytes)) * float64(standardFee.RelayFee.Satoshis)\n\trelayFee := 0.0\n\ttotalFee := uint64(math.Ceil(miningFee + relayFee))\n\tlog.Println(trace.Info(\"calculating fee\").UTC().Add(\"size\", fmt.Sprintf(\"%d\", size)).Add(\"miningFee\", fmt.Sprintf(\"%.9f\", miningFee)).Add(\"relayFee\", fmt.Sprintf(\"%.9f\", relayFee)).Add(\"totalFee\", fmt.Sprintf(\"%d\", totalFee)).Append(t))\n\treturn uint64(totalFee), nil\n}", "func (msg MsgBeginUnstake) GetFee() sdk.Int {\n\treturn sdk.NewInt(PosFeeMap[msg.Type()])\n}", "func (_ElvTradableLocal *ElvTradableLocalCaller) GetTransferFee(opts *bind.CallOpts, _tokenId *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _ElvTradableLocal.contract.Call(opts, &out, \"getTransferFee\", _tokenId)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (msg MsgUnjail) GetFee() sdk.Int {\n\treturn sdk.NewInt(PosFeeMap[msg.Type()])\n}", "func (msg MsgSend) GetFee() sdk.Int {\n\treturn sdk.NewInt(PosFeeMap[msg.Type()])\n}", "func (_Contract *ContractCaller) ProposalBurntFee(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Contract.contract.Call(opts, &out, \"proposalBurntFee\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_TransferFeeProxy *TransferFeeProxyCaller) GetTransferFee(opts *bind.CallOpts, _tokenId *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _TransferFeeProxy.contract.Call(opts, &out, \"getTransferFee\", _tokenId)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (msg MsgStake) GetFee() sdk.Int {\n\treturn sdk.NewInt(PosFeeMap[msg.Type()])\n}", "func (c *CoinbasePro) GetFeeByType(ctx context.Context, feeBuilder *exchange.FeeBuilder) (float64, error) {\n\tif feeBuilder == nil {\n\t\treturn 0, fmt.Errorf(\"%T %w\", feeBuilder, common.ErrNilPointer)\n\t}\n\tif !c.AreCredentialsValid(ctx) && // Todo check connection status\n\t\tfeeBuilder.FeeType == exchange.CryptocurrencyTradeFee {\n\t\tfeeBuilder.FeeType = exchange.OfflineTradeFee\n\t}\n\treturn c.GetFee(ctx, feeBuilder)\n}", "func (m *Payment) GetFee() int64 {\n\tif m != nil {\n\t\treturn m.Fee\n\t}\n\treturn 0\n}", "func (m *Payment) GetFee() int64 {\n\tif m != nil {\n\t\treturn m.Fee\n\t}\n\treturn 0\n}", "func (m *Payment) GetFee() int64 {\n\tif m != nil {\n\t\treturn m.Fee\n\t}\n\treturn 0\n}", "func (m *Payment) GetFee() int64 {\n\tif m != nil {\n\t\treturn m.Fee\n\t}\n\treturn 0\n}", "func (m *Payment) GetFee() int64 {\n\tif m != nil {\n\t\treturn m.Fee\n\t}\n\treturn 0\n}", "func (m *Payment) GetFee() int64 {\n\tif m != nil {\n\t\treturn m.Fee\n\t}\n\treturn 0\n}", "func (k *Kraken) GetFeeByType(feeBuilder *exchange.FeeBuilder) (float64, error) {\n\tif (k.APIKey == \"\" || k.APISecret == \"\") && // Todo check connection status\n\t\tfeeBuilder.FeeType == exchange.CryptocurrencyTradeFee {\n\t\tfeeBuilder.FeeType = exchange.OfflineTradeFee\n\t}\n\treturn k.GetFee(feeBuilder)\n}", "func (h *HUOBIHADAX) GetFee(feeBuilder *exchange.FeeBuilder) (float64, error) {\n\tvar fee float64\n\tif feeBuilder.FeeType == exchange.OfflineTradeFee || feeBuilder.FeeType == exchange.CryptocurrencyTradeFee {\n\t\tfee = calculateTradingFee(feeBuilder.Pair, feeBuilder.PurchasePrice, feeBuilder.Amount)\n\t}\n\tif fee < 0 {\n\t\tfee = 0\n\t}\n\n\treturn fee, nil\n}", "func (st *SignedTx) Fee() int64 {\n\treturn st.fee\n}", "func (k Keeper) getFeeRate(ctx sdk.Context, collateralType string) (fee sdk.Dec) {\n\tcollalateralParam, found := k.GetCollateral(ctx, collateralType)\n\tif !found {\n\t\tpanic(fmt.Sprintf(\"could not get fee rate for %s, collateral not found\", collateralType))\n\t}\n\treturn collalateralParam.StabilityFee\n}", "func (_Authority *AuthorityCaller) Fee(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Authority.contract.Call(opts, out, \"fee\")\n\treturn *ret0, err\n}", "func (m *Hop) GetFee() int64 {\n\tif m != nil {\n\t\treturn m.Fee\n\t}\n\treturn 0\n}", "func (m *Hop) GetFee() int64 {\n\tif m != nil {\n\t\treturn m.Fee\n\t}\n\treturn 0\n}", "func (m *Hop) GetFee() int64 {\n\tif m != nil {\n\t\treturn m.Fee\n\t}\n\treturn 0\n}", "func (m *Hop) GetFee() int64 {\n\tif m != nil {\n\t\treturn m.Fee\n\t}\n\treturn 0\n}", "func (m *Hop) GetFee() int64 {\n\tif m != nil {\n\t\treturn m.Fee\n\t}\n\treturn 0\n}", "func (m *Hop) GetFee() int64 {\n\tif m != nil {\n\t\treturn m.Fee\n\t}\n\treturn 0\n}", "func (m *Hop) GetFee() int64 {\n\tif m != nil {\n\t\treturn m.Fee\n\t}\n\treturn 0\n}", "func (m *Hop) GetFee() int64 {\n\tif m != nil {\n\t\treturn m.Fee\n\t}\n\treturn 0\n}", "func (m *Hop) GetFee() int64 {\n\tif m != nil {\n\t\treturn m.Fee\n\t}\n\treturn 0\n}", "func (_ElvTradable *ElvTradableCaller) BaseTransferFee(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _ElvTradable.contract.Call(opts, &out, \"baseTransferFee\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_L1Block *L1BlockCaller) Basefee(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _L1Block.contract.Call(opts, &out, \"basefee\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func GetFeePerByte() int {\n\treturn neogointernal.CallWithToken(Hash, \"getFeePerByte\", int(contract.ReadStates)).(int)\n}", "func (tx *Tx) FeeRate() uint64 {\n\treturn tx.feeRate\n}", "func (_ElvTradableLocal *ElvTradableLocalCaller) BaseTransferFee(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _ElvTradableLocal.contract.Call(opts, &out, \"baseTransferFee\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (op *GenericOperation) TransactionFee() *big.Int {\n\tif op.Kind() != opKindTransaction {\n\t\treturn nil\n\t}\n\treturn op.parseSerializedNumberOffset(0)\n}", "func (s *BlocksService) Fee(ctx context.Context) (*BlocksFee, *http.Response, error) {\n\tvar responseStruct *BlocksFee\n\tresp, err := s.client.SendRequest(ctx, \"GET\", \"blocks/getFee\", nil, &responseStruct)\n\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn responseStruct, resp, err\n}", "func (p *Poloniex) GetFeeInfo(ctx context.Context) (Fee, error) {\n\tresult := Fee{}\n\n\treturn result, p.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, poloniexFeeInfo, url.Values{}, &result)\n}", "func (_Cakevault *CakevaultCaller) CallFee(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Cakevault.contract.Call(opts, &out, \"callFee\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_Contract *ContractCaller) ProposalFee(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Contract.contract.Call(opts, &out, \"proposalFee\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_Cakevault *CakevaultCaller) PerformanceFee(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Cakevault.contract.Call(opts, &out, \"performanceFee\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (h *TxHeader) Fee() uint64 {\n\treturn h.MaxGas * h.GasPrice\n}", "func (tx *Tx) CalculateFee(f *FeeQuote) (uint64, error) {\r\n\ttotal := tx.TotalInputSatoshis() - tx.TotalOutputSatoshis()\r\n\tsats, _, err := tx.change(nil, f, false)\r\n\tif err != nil {\r\n\t\treturn 0, err\r\n\t}\r\n\treturn total - sats, nil\r\n}", "func (m *mTOServiceItem) FeeType() string {\n\treturn m.feeTypeField\n}", "func (_Contract *ContractCallerSession) ProposalBurntFee() (*big.Int, error) {\n\treturn _Contract.Contract.ProposalBurntFee(&_Contract.CallOpts)\n}", "func (serv *ExchangeServer) GetBtcFee() uint64 {\n\treturn uint64(serv.cfg.BtcFee)\n}", "func (t *Transaction) FeePerByte() int64 {\n\treturn t.NetworkFee / int64(t.Size())\n}", "func (client *Client) Fee() float64 {\n\treturn client.fee\n}", "func (_Contract *ContractSession) ProposalBurntFee() (*big.Int, error) {\n\treturn _Contract.Contract.ProposalBurntFee(&_Contract.CallOpts)\n}", "func (_L1Block *L1BlockCallerSession) Basefee() (*big.Int, error) {\n\treturn _L1Block.Contract.Basefee(&_L1Block.CallOpts)\n}", "func (_L1Block *L1BlockSession) Basefee() (*big.Int, error) {\n\treturn _L1Block.Contract.Basefee(&_L1Block.CallOpts)\n}", "func (_Authority *AuthoritySession) Fee() (*big.Int, error) {\n\treturn _Authority.Contract.Fee(&_Authority.CallOpts)\n}", "func (_Authority *AuthorityCallerSession) Fee() (*big.Int, error) {\n\treturn _Authority.Contract.Fee(&_Authority.CallOpts)\n}", "func EstimateFee(tx StdTx) StdFee {\n\treturn NewStdFee(txparam.DefaultMsgGas*uint64(len(tx.Msgs)), tx.Fee.GasPrice)\n}", "func CalculateFee(basic_op_fee int64, len_memo int64) int64 {\n\n\tvar basic_memo_fee int64 = 1\n\treturn basic_op_fee + len_memo*basic_memo_fee\n}", "func (_EtherDelta *EtherDeltaCaller) FeeTake(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _EtherDelta.contract.Call(opts, out, \"feeTake\")\n\treturn *ret0, err\n}", "func (_ElvTradable *ElvTradableCaller) PROXYTYPETRANSFERFEE(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _ElvTradable.contract.Call(opts, &out, \"PROXY_TYPE_TRANSFER_FEE\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_ElvTradableLocal *ElvTradableLocalCaller) PROXYTYPETRANSFERFEE(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _ElvTradableLocal.contract.Call(opts, &out, \"PROXY_TYPE_TRANSFER_FEE\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_EtherDelta *EtherDeltaCaller) FeeMake(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _EtherDelta.contract.Call(opts, out, \"feeMake\")\n\treturn *ret0, err\n}", "func (tx *Hello) Fee(p types.Process, loader types.LoaderWrapper) *amount.Amount {\n\tsp := p.(*HelloWorld)\n\treturn sp.vault.GetDefaultFee(loader)\n}", "func (h *HitBTC) GetFeeInfo(ctx context.Context, currencyPair string) (Fee, error) {\n\tresult := Fee{}\n\terr := h.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodGet,\n\t\tapiV2FeeInfo+\"/\"+currencyPair,\n\t\turl.Values{},\n\t\ttradingRequests,\n\t\t&result)\n\n\treturn result, err\n}", "func (r Repository) Get(id string) (*entity.Fee, error) {\n\trecord := &entity.Fee{}\n\n\tresult := r.dbClient.\n\t\tModel(entity.Fee{}).\n\t\tWhere(\"transaction_id = ?\", id).\n\t\tFirst(record)\n\n\tif result.Error != nil {\n\t\tif errors.Is(result.Error, gorm.ErrRecordNotFound) {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, result.Error\n\t}\n\treturn record, nil\n}", "func (_EtherDelta *EtherDeltaCaller) FeeRebate(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _EtherDelta.contract.Call(opts, out, \"feeRebate\")\n\treturn *ret0, err\n}", "func CalculateFeeForTx(tx *bt.Tx, standardRate, dataRate *bt.Fee) uint64 {\n\n\t// Set the totals\n\tvar totalFee int\n\tvar totalDataBytes int\n\n\t// Set defaults if not found\n\tif standardRate == nil {\n\t\tstandardRate = bt.DefaultStandardFee()\n\t}\n\tif dataRate == nil {\n\t\tdataRate = bt.DefaultStandardFee()\n\t\t// todo: adjusted to 5/10 for now, since all miners accept that rate\n\t\tdataRate.FeeType = bt.FeeTypeData\n\t}\n\n\t// Set the total bytes of the tx\n\ttotalBytes := len(tx.ToBytes())\n\n\t// Loop all outputs and accumulate size (find data related outputs)\n\tfor _, out := range tx.GetOutputs() {\n\t\toutHexString := out.GetLockingScriptHexString()\n\t\tif strings.HasPrefix(outHexString, \"006a\") || strings.HasPrefix(outHexString, \"6a\") {\n\t\t\ttotalDataBytes += len(out.ToBytes())\n\t\t}\n\t}\n\n\t// Got some data bytes?\n\tif totalDataBytes > 0 {\n\t\ttotalBytes = totalBytes - totalDataBytes\n\t\ttotalFee += (dataRate.MiningFee.Satoshis * totalDataBytes) / dataRate.MiningFee.Bytes\n\t}\n\n\t// Still have regular standard bytes?\n\tif totalBytes > 0 {\n\t\ttotalFee += (standardRate.MiningFee.Satoshis * totalBytes) / standardRate.MiningFee.Bytes\n\t}\n\n\t// Safety check (possible division by zero?)\n\tif totalFee == 0 {\n\t\ttotalFee = 1\n\t}\n\n\t// Return the total fee as a uint (easier to use with satoshi values)\n\treturn uint64(totalFee)\n}", "func (transaction *FileCreateTransaction) GetMaxTransactionFee() Hbar {\n\treturn transaction.Transaction.GetMaxTransactionFee()\n}", "func GetFeeInfo(state kv.KVStoreReader, hname coretypes.Hname) (balance.Color, int64, int64) {\n\t//returns nil of contract not found\n\trec, err := FindContract(state, hname)\n\tif err != nil {\n\t\tif err != ErrContractNotFound {\n\t\t\tpanic(err)\n\t\t} else {\n\t\t\trec = nil\n\t\t}\n\t}\n\treturn GetFeeInfoByContractRecord(state, rec)\n}", "func totalFees(block *model.Block, receipts []*model.Receipt) *big.Float {\n\tfeesWei := new(big.Int)\n\tfor i, tx := range block.Transactions() {\n\t\tminerFee, _ := tx.EffectiveGasTip(block.BaseFee())\n\t\tfeesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), minerFee))\n\t}\n\treturn new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(config.Ether)))\n}", "func (eth *Backend) FeeRate(ctx context.Context) (uint64, error) {\n\tbigGP, err := eth.node.suggestGasPrice(ctx)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn dexeth.ToGwei(bigGP)\n}", "func (a API) EstimateFeeGetRes() (out *float64, e error) {\n\tout, _ = a.Result.(*float64)\n\te, _ = a.Result.(error)\n\treturn \n}", "func (c *swapCoin) FeeRate() uint64 {\n\treturn c.gasPrice\n}", "func (c *DataCache) GetFeeInfo() (uint32, chainjson.FeeInfoMempool) {\n\tc.mtx.RLock()\n\tdefer c.mtx.RUnlock()\n\treturn c.height, c.ticketFeeInfo\n}", "func (_Cakevault *CakevaultCaller) WithdrawFee(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Cakevault.contract.Call(opts, &out, \"withdrawFee\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (tx *Transaction) MaxFee() int32 {\n\treturn int32(tx.Envelope.Tx.Fee)\n}", "func (transaction *AccountCreateTransaction) GetMaxTransactionFee() Hbar {\n\treturn transaction.Transaction.GetMaxTransactionFee()\n}", "func (p *Policy) getFeePerByte(ic *interop.Context, _ []stackitem.Item) stackitem.Item {\n\treturn stackitem.NewBigInteger(big.NewInt(p.GetFeePerByteInternal(ic.DAO)))\n}", "func (_EtherDelta *EtherDeltaSession) FeeTake() (*big.Int, error) {\n\treturn _EtherDelta.Contract.FeeTake(&_EtherDelta.CallOpts)\n}", "func (_EtherDelta *EtherDeltaCallerSession) FeeTake() (*big.Int, error) {\n\treturn _EtherDelta.Contract.FeeTake(&_EtherDelta.CallOpts)\n}", "func TestGetFeeByTypeOfflineTradeFee(t *testing.T) {\n\tt.Parallel()\n\tvar feeBuilder = setFeeBuilder()\n\t_, err := k.GetFeeByType(context.Background(), feeBuilder)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif !sharedtestvalues.AreAPICredentialsSet(k) {\n\t\tif feeBuilder.FeeType != exchange.OfflineTradeFee {\n\t\t\tt.Errorf(\"Expected %v, received %v\", exchange.OfflineTradeFee, feeBuilder.FeeType)\n\t\t}\n\t} else {\n\t\tif feeBuilder.FeeType != exchange.CryptocurrencyTradeFee {\n\t\t\tt.Errorf(\"Expected %v, received %v\", exchange.CryptocurrencyTradeFee, feeBuilder.FeeType)\n\t\t}\n\t}\n}", "func (b *Bitcoind) GetFeePerKb() (useFee, relayFee btcutil.Amount, err error) {\n\tvar netInfoResp struct {\n\t\tRelayFee float64 `json:\"relayfee\"`\n\t}\n\tvar walletInfoResp struct {\n\t\tPayTxFee float64 `json:\"paytxfee\"`\n\t}\n\tvar estimateResp struct {\n\t\tFeeRate float64 `json:\"feerate\"`\n\t}\n\n\tnetInfoRawResp, err := b.client.call(\"getnetworkinfo\", nil)\n\tif err == nil {\n\t\terr = json.Unmarshal(netInfoRawResp.Result, &netInfoResp)\n\t\tif err != nil {\n\t\t\treturn 0, 0, err\n\t\t}\n\t}\n\n\twalletInfoRawResp, err := b.client.call(\"getwalletinfo\", nil)\n\tif err == nil {\n\t\terr = json.Unmarshal(walletInfoRawResp.Result, &walletInfoResp)\n\t\tif err != nil {\n\t\t\treturn 0, 0, err\n\t\t}\n\t}\n\n\trelayFee, err = btcutil.NewAmount(netInfoResp.RelayFee)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tpayTxFee, err := btcutil.NewAmount(walletInfoResp.PayTxFee)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\t// Use user-set wallet fee when set and not lower than the network relay\n\t// fee.\n\tif payTxFee != 0 {\n\t\tmaxFee := payTxFee\n\t\tif relayFee > maxFee {\n\t\t\tmaxFee = relayFee\n\t\t}\n\t\treturn maxFee, relayFee, nil\n\t}\n\n\tparams := []json.RawMessage{[]byte(\"6\")}\n\testimateRawResp, err := b.client.call(\"estimatesmartfee\", params)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\terr = json.Unmarshal(estimateRawResp.Result, &estimateResp)\n\tif err == nil && estimateResp.FeeRate > 0 {\n\t\tuseFee, err = btcutil.NewAmount(estimateResp.FeeRate)\n\t\tif relayFee > useFee {\n\t\t\tuseFee = relayFee\n\t\t}\n\t\treturn useFee, relayFee, err\n\t}\n\n\tfmt.Println(\"warning: falling back to mempool relay fee policy\")\n\treturn relayFee, relayFee, nil\n}", "func TestPopulateTransaction_Fee(t *testing.T) {\n\tctx, _ := test.ContextWithLogBuffer()\n\n\tvar (\n\t\tdest Transaction\n\t\trow history.Transaction\n\t)\n\n\tdest = Transaction{}\n\trow = history.Transaction{\n\t\tTransactionWithoutLedger: history.TransactionWithoutLedger{\n\t\t\tMaxFee: 10000,\n\t\t\tFeeCharged: 100,\n\t\t},\n\t}\n\n\tassert.NoError(t, PopulateTransaction(ctx, row.TransactionHash, &dest, row))\n\tassert.Equal(t, int64(100), dest.FeeCharged)\n\tassert.Equal(t, int64(10000), dest.MaxFee)\n}", "func (gc *GovernanceContract) ProposalFee() (hexutil.Big, error) {\n\treturn gc.repo.GovernanceProposalFee(&gc.Address)\n}", "func fetchFeeFromOracle(ctx context.Context, net dex.Network, nb uint64) (float64, error) {\n\tvar url string\n\tif net == dex.Testnet {\n\t\turl = testnetExternalApiUrl\n\t} else { // mainnet and simnet\n\t\turl = externalApiUrl\n\t}\n\turl += \"/utils/estimatefee?nbBlocks=\" + strconv.FormatUint(nb, 10)\n\tctx, cancel := context.WithTimeout(ctx, 4*time.Second)\n\tdefer cancel()\n\tr, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\thttpResponse, err := client.Do(r)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tc := make(map[uint64]float64)\n\treader := io.LimitReader(httpResponse.Body, 1<<14)\n\terr = json.NewDecoder(reader).Decode(&c)\n\thttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdcrPerKB, ok := c[nb]\n\tif !ok {\n\t\treturn 0, errors.New(\"no fee rate for requested number of blocks\")\n\t}\n\treturn dcrPerKB, nil\n}", "func (_EtherDelta *EtherDeltaSession) FeeMake() (*big.Int, error) {\n\treturn _EtherDelta.Contract.FeeMake(&_EtherDelta.CallOpts)\n}", "func (transaction *ScheduleSignTransaction) GetMaxTransactionFee() Hbar {\n\treturn transaction.Transaction.GetMaxTransactionFee()\n}", "func (dcr *ExchangeWallet) feeRate(confTarget uint64) (uint64, error) {\n\tif feeEstimator, is := dcr.wallet.(FeeRateEstimator); is && !dcr.wallet.SpvMode() {\n\t\tdcrPerKB, err := feeEstimator.EstimateSmartFeeRate(dcr.ctx, int64(confTarget), chainjson.EstimateSmartFeeConservative)\n\t\tif err == nil && dcrPerKB > 0 {\n\t\t\treturn dcrPerKBToAtomsPerByte(dcrPerKB)\n\t\t}\n\t\tif err != nil {\n\t\t\tdcr.log.Warnf(\"Failed to get local fee rate estimate: %v\", err)\n\t\t} else { // dcrPerKB == 0\n\t\t\tdcr.log.Warnf(\"Local fee estimate is zero.\")\n\t\t}\n\t}\n\n\t// Either SPV wallet or EstimateSmartFeeRate failed.\n\tif !dcr.apiFeeFallback {\n\t\treturn 0, fmt.Errorf(\"fee rate estimation unavailable and external API is disabled\")\n\t}\n\n\tnow := time.Now()\n\n\tdcr.oracleFeesMtx.Lock()\n\tdefer dcr.oracleFeesMtx.Unlock()\n\toracleFee := dcr.oracleFees[confTarget]\n\tif now.Sub(oracleFee.stamp) < freshFeeAge {\n\t\treturn oracleFee.rate, nil\n\t}\n\tif dcr.oracleFailing {\n\t\treturn 0, errors.New(\"fee rate oracle is in a temporary failing state\")\n\t}\n\n\tdcr.log.Debugf(\"Retrieving fee rate from external fee oracle for %d target blocks\", confTarget)\n\tdcrPerKB, err := fetchFeeFromOracle(dcr.ctx, dcr.network, confTarget)\n\tif err != nil {\n\t\t// Flag the oracle as failing so subsequent requests don't also try and\n\t\t// fail after the request timeout. Remove the flag after a bit.\n\t\tdcr.oracleFailing = true\n\t\ttime.AfterFunc(freshFeeAge, func() {\n\t\t\tdcr.oracleFeesMtx.Lock()\n\t\t\tdcr.oracleFailing = false\n\t\t\tdcr.oracleFeesMtx.Unlock()\n\t\t})\n\t\treturn 0, fmt.Errorf(\"external fee rate API failure: %v\", err)\n\t}\n\tif dcrPerKB <= 0 {\n\t\treturn 0, fmt.Errorf(\"invalid fee rate %f from fee oracle\", dcrPerKB)\n\t}\n\t// Convert to atoms/B and error if it is greater than fee rate limit.\n\tatomsPerByte, err := dcrPerKBToAtomsPerByte(dcrPerKB)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif atomsPerByte > dcr.feeRateLimit {\n\t\treturn 0, fmt.Errorf(\"fee rate from external API greater than fee rate limit: %v > %v\",\n\t\t\tatomsPerByte, dcr.feeRateLimit)\n\t}\n\tdcr.oracleFees[confTarget] = feeStamped{atomsPerByte, now}\n\treturn atomsPerByte, nil\n}", "func (a *RepoAPI) depositPropFee(params interface{}) (resp *rpc.Response) {\n\treturn rpc.Success(a.mods.Repo.DepositProposalFee(cast.ToStringMap(params)))\n}", "func (_EtherDelta *EtherDeltaCallerSession) FeeMake() (*big.Int, error) {\n\treturn _EtherDelta.Contract.FeeMake(&_EtherDelta.CallOpts)\n}", "func (_DogsOfRome *DogsOfRomeCaller) Usage_fee(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _DogsOfRome.contract.Call(opts, out, \"usage_fee\")\n\treturn *ret0, err\n}", "func getOfflineTradeFee(price, amount float64) float64 {\n\treturn 0.002 * price * amount\n}", "func getOfflineTradeFee(price, amount float64) float64 {\n\treturn 0.002 * price * amount\n}", "func (_IUniswapV2Factory *IUniswapV2FactorySession) FeeTo() (common.Address, error) {\r\n\treturn _IUniswapV2Factory.Contract.FeeTo(&_IUniswapV2Factory.CallOpts)\r\n}", "func (_Distributor *DistributorCaller) GetEarnedFeeAmountOf(opts *bind.CallOpts, validatorId *big.Int) (struct {\n\tEarned *big.Int\n\tEndMonth *big.Int\n}, error) {\n\tret := new(struct {\n\t\tEarned *big.Int\n\t\tEndMonth *big.Int\n\t})\n\tout := ret\n\terr := _Distributor.contract.Call(opts, out, \"getEarnedFeeAmountOf\", validatorId)\n\treturn *ret, err\n}", "func (api *DeprecatedApiService) quoteFee(params map[string]string) map[string]string {\n\tresult := make(map[string]string)\n\ttrsid, ok1 := params[\"txhash\"]\n\tif !ok1 {\n\t\tresult[\"err\"] = \"Param txhash must.\"\n\t\treturn result\n\t}\n\n\tvar trshx []byte\n\tif txhx, e := hex.DecodeString(trsid); e == nil && len(txhx) == 32 {\n\t\ttrshx = txhx\n\t} else {\n\t\tresult[\"err\"] = \"Transaction hash error.\"\n\t\treturn result\n\t}\n\n\t// Query transaction\n\ttx, ok := api.txpool.CheckTxExistByHash(trshx)\n\tif !ok || tx == nil {\n\t\tresult[\"err\"] = \"Not find transaction in txpool.\"\n\t\treturn result\n\t}\n\n\t// fee\n\tfeestr, ok2 := params[\"fee\"]\n\tif !ok2 {\n\t\tresult[\"err\"] = \"Param fee must.\"\n\t\treturn result\n\t}\n\n\tfeeamt, e2 := fields.NewAmountFromFinString(feestr)\n\tif e2 != nil {\n\t\tresult[\"err\"] = \"Param fee format error.\"\n\t\treturn result\n\t}\n\n\t// password\n\tpassword_or_privatekey, ok3 := params[\"password\"]\n\tif !ok3 {\n\t\tresult[\"err\"] = \"param password must.\"\n\t\treturn result\n\t}\n\n\tvar acc *account.Account = nil\n\tprivatekey, e2 := hex.DecodeString(password_or_privatekey)\n\tif len(password_or_privatekey) == 64 && e2 == nil {\n\t\tacc, e2 = account.GetAccountByPriviteKey(privatekey)\n\t\tif e2 != nil {\n\t\t\tresult[\"err\"] = \"Privite Key Error\"\n\t\t\treturn result\n\t\t}\n\t} else {\n\t\tacc = account.CreateAccountByPassword(password_or_privatekey)\n\t}\n\n\t// check\n\tif fields.Address(acc.Address).NotEqual(tx.GetAddress()) {\n\t\tresult[\"err\"] = \"Tx fee address password error.\"\n\t\treturn result\n\t}\n\n\t// change fee\n\ttx = tx.Clone()\n\ttx.SetFee(feeamt)\n\n\t// Private key\n\tallPrivateKeyBytes := make(map[string][]byte, 1)\n\tallPrivateKeyBytes[string(acc.Address)] = acc.PrivateKey\n\n\t// do sign\n\terr3 := tx.FillNeedSigns(allPrivateKeyBytes, nil)\n\tif err3 != nil {\n\t\tresult[\"err\"] = err3.Error()\n\t\treturn result\n\t}\n\n\t// add to pool\n\terr4 := api.txpool.AddTx(tx)\n\tif err4 != nil {\n\t\tresult[\"err\"] = err4.Error()\n\t\treturn result\n\t}\n\n\t// ok\n\tresult[\"status\"] = \"ok\"\n\treturn result\n}", "func (marketHandler *USDMarket) marketFee(position *Position) float64 {\n\t//If the close price is not zero it means the fee applyed is for closing the position\n\t//thus the price to apply the fee is the close price\n\tif position.ClosePrice > 0 {\n\t\treturn position.Size * position.ClosePrice * marketHandler.TakerFee\n\t}\n\treturn position.Size * position.EntryPrice * marketHandler.TakerFee\n}", "func (transaction *TokenUpdateTransaction) GetMaxTransactionFee() Hbar {\n\treturn transaction.Transaction.GetMaxTransactionFee()\n}", "func (_Cakevault *CakevaultSession) CallFee() (*big.Int, error) {\n\treturn _Cakevault.Contract.CallFee(&_Cakevault.CallOpts)\n}" ]
[ "0.7427633", "0.73902583", "0.72759", "0.72617024", "0.722174", "0.7202637", "0.7101204", "0.7060723", "0.7043434", "0.7031177", "0.702849", "0.7016284", "0.70002353", "0.6979829", "0.69199574", "0.69199574", "0.69199574", "0.69199574", "0.69199574", "0.69199574", "0.68892753", "0.68301445", "0.6811005", "0.679438", "0.6774688", "0.6769624", "0.6769624", "0.6769624", "0.6769624", "0.6769624", "0.6769624", "0.6769624", "0.6769624", "0.6769624", "0.66721505", "0.66550803", "0.6592949", "0.6559628", "0.6543061", "0.65411526", "0.65109944", "0.6506784", "0.6483156", "0.64458126", "0.64256245", "0.6364496", "0.6322591", "0.6306925", "0.6296313", "0.6246908", "0.6246254", "0.6242009", "0.6236799", "0.6227318", "0.6219423", "0.6200386", "0.61957955", "0.61925113", "0.617578", "0.61673987", "0.61595654", "0.61592597", "0.61546963", "0.6145053", "0.6116326", "0.60889006", "0.60818136", "0.6076026", "0.6070139", "0.6051348", "0.6035164", "0.603283", "0.6025267", "0.6024357", "0.6022529", "0.6009208", "0.5990947", "0.5980967", "0.5926152", "0.59131753", "0.59100866", "0.5901038", "0.5900411", "0.58940023", "0.58726496", "0.58588636", "0.58150434", "0.5797472", "0.5786603", "0.57792073", "0.57775545", "0.5768701", "0.576334", "0.576334", "0.5746073", "0.5726549", "0.57254064", "0.57018155", "0.5698348", "0.5697048" ]
0.6914461
20
SetGaugeMetric func(name string, help string, env string, envValue string, version string, versionValue string) (prometheusGauge Gauge)
func SetGaugeMetric(name string, help string, env string, envValue string, version string, versionValue string) (prometheusGauge prometheus.Gauge) { var ( gaugeMetric = prometheus.NewGauge(prometheus.GaugeOpts{ Name: name, Help: help, ConstLabels: prometheus.Labels{env: envValue, version: versionValue}, }) ) return gaugeMetric }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (cm *customMetrics) SetGauge(gauge string, value float64) {\n\n\tcm.gauges[gauge].Set(value)\n}", "func (r *Reporter) Gauge(name string, value float64, tags metrics.Tags) (err error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = ErrPrometheusPanic\n\t\t}\n\t}()\n\n\tgauge := r.metrics.getGauge(r.stripUnsupportedCharacters(name), r.listTagKeys(tags))\n\tmetric, err := gauge.GetMetricWith(r.convertTags(tags))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmetric.Set(value)\n\treturn err\n}", "func gauge(name string, desc string) metric {\n\treturn metric{\n\t\ttyp: prometheus.GaugeValue,\n\t\taeroName: name,\n\t\tdesc: desc,\n\t}\n}", "func (m *CirconusMetrics) SetGauge(metric string, val interface{}) {\n\tm.gm.Lock()\n\tdefer m.gm.Unlock()\n\tm.gauges[metric] = m.gaugeValString(val)\n}", "func (r *Reporter) Gauge(metricName string, value float64, tags metrics.Tags) error {\n\treturn nil\n}", "func SetMetric(name string, labels map[string]string, value float64) error {\n\t// set vector value\n\tgv, ok := MetricVecs[name]\n\tif ok {\n\t\tgv.With(labels).Set(value)\n\t\treturn nil\n\t}\n\n\t// create label array for vector creation\n\tkeys := make([]string, 0, len(labels))\n\tfor k := range labels {\n\t\tkeys = append(keys, k)\n\t}\n\n\t// the vector does not exist, create it, register and then add this gauge metric to the gauge vector\n\tgv = *prometheus.NewGaugeVec(prometheus.GaugeOpts{Name: name}, keys)\n\tcustomMetrics.Registry.MustRegister(gv)\n\tMetricVecs[name] = gv\n\n\treturn nil\n}", "func (d TestSink) Gauge(c *telemetry.Context, stat string, value float64) {\n\td[stat] = TestMetric{\"Gauge\", value, c.Tags()}\n}", "func NewGaugeMetric(name, unit, sourceID string, opts ...MetricOption) GaugeMetric {\n\tg := &gaugeMetric{\n\t\tname: name,\n\t\tunit: unit,\n\t\tsourceID: sourceID,\n\t\ttags: make(map[string]string),\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(g.tags)\n\t}\n\n\treturn g\n}", "func (_m *Reporter) Gauge(name string, value float64, tags ...monitoring.Tag) {\n\t_va := make([]interface{}, len(tags))\n\tfor _i := range tags {\n\t\t_va[_i] = tags[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, name, value)\n\t_ca = append(_ca, _va...)\n\t_m.Called(_ca...)\n}", "func NewGauge(metric string, dimensions map[string]string, value int64) *Gauge {\n\treturn &Gauge{metric: metric, dimensions: dimensions, value: value}\n}", "func (m *CirconusMetrics) Gauge(metric string, val interface{}) {\n\tm.SetGauge(metric, val)\n}", "func Gauge(path string, val float32) {\n\tmetrics.SetGauge(strings.Split(path, \".\"), val)\n}", "func Gauge(ref string, metricType string, options ...Option) *Stackdriver {\n\treturn newMetric(ref, \"GAUGE\", metricType, options...)\n}", "func (c *Client) Gauge(name string, value float64, tags []string, common bool) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tif !c.started {\n\t\treturn\n\t}\n\tkey := metricKey(name, tags)\n\tm, ok := c.metrics[key]\n\tif !ok {\n\t\tm = newmetric(name, metricKindGauge, tags, common)\n\t\tc.metrics[key] = m\n\t}\n\tm.value = value\n\tm.ts = float64(time.Now().Unix())\n\tc.newMetrics = true\n}", "func (p *Profiler) GaugeMetric(name string) IGauge {\n\tp.gauges[name] = NewGauge(p.namespace, name)\n\treturn p.gauges[name]\n}", "func (c *LoggerClient) Gauge(name string, value float64) {\n\tc.print(\"Gauge\", name, value, value)\n}", "func (m *Metrics) Gauge(name, help string) prometheus.Gauge {\n\tif gauge, ok := m.gauges[name]; !ok {\n\t\tgauge := prometheus.NewGauge(\n\t\t\tprometheus.GaugeOpts{\n\t\t\t\tNamespace: m.config.Namespace,\n\t\t\t\tName: name,\n\t\t\t\tHelp: help,\n\t\t\t})\n\n\t\tprometheus.MustRegister(gauge)\n\t\treturn gauge\n\t} else {\n\t\treturn gauge\n\t}\n}", "func NewGauge(namespace, subsystem, name, help string, labelMap map[string]string) prometheus.Gauge {\n\treturn prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: subsystem,\n\t\tName: name,\n\t\tHelp: help,\n\t\tConstLabels: labelMap,\n\t})\n}", "func Gauge(metric_in, m1Prefix string) (metric_out string) {\n\tif IsMetric20(metric_in) {\n\t\treturn metric_in\n\t}\n\treturn m1Prefix + metric_in\n}", "func WrapGauge(\n\tmetric string,\n\tdimensions map[string]string,\n\tvalue Getter,\n) *WrappedGauge {\n\treturn &WrappedGauge{\n\t\tmetric: metric,\n\t\tdimensions: dimensions,\n\t\tvalue: value,\n\t}\n}", "func (w *Gauge) Set(value float64, labelValues ...string) {\n\tw.watcher.WithLabelValues(labelValues...).Set(value)\n\tmetaLabel := w.prefix + \".\" + strings.Join(labelValues, \".\")\n\t(*w.client).Gauge(metaLabel, int64(value), 1.0)\n}", "func (c *Stats) Gauge(name string, value float64, tags []string) {\n\tselect {\n\tcase c.jobs <- &job{metric: &metric{\n\t\tname: name,\n\t\tclass: client.Gauge,\n\t\tvalue: value,\n\t\ttags: tags,\n\t}}:\n\tdefault:\n\t\tatomic.AddUint64(&c.dropped, 1)\n\t}\n}", "func (cm *customMetrics) AddGauge(namespace, subsystem, name, help, internalKey string) {\n\n\tcm.gauges[internalKey] = promauto.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: subsystem,\n\t\tName: name,\n\t\tHelp: help,\n\t})\n}", "func (datadog *Datadog) Gauge(name string, value float64, tags []string, rate float64) error {\n\terr := datadog.client.Gauge(name, value, tags, rate)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *DogStatsdSink) SetGauge(key []string, val float32, tags []Tag) {\n\tflatKey, t := s.getFlatkeyAndCombinedLabels(key, tags)\n\trate := 1.0\n\ts.client.Gauge(flatKey, float64(val), t, rate)\n}", "func (c *Client) Gauge(name string, value int) error {\n\treturn c.DataDog.Gauge(name, float64(value), nil, 1)\n}", "func (p *Provider) Gauge(name string, value float64, tags map[string]string) error {\n\treturn p.client.Gauge(name, value, p.formatTags(tags), p.rate)\n}", "func SetMetric() {\n\tmetricsstore.DefaultMetricsStore.VersionInfo.WithLabelValues(Version, BuildDate, GitCommit).Set(1)\n}", "func (r *ResettableMetrics) NewGauge(name string, help string) metrics.Gauge {\n\tgauge := metricsmon.NewGauge(name, help, []string{})\n\tgauge.Set(0)\n\tr.resettableMetricsMap[name] = &gauge\n\treturn gauge\n}", "func Metric(metric string) InfluxReporterFunc {\n\treturn func(c *InfluxReporter) {\n\t\tc.metric = metric\n\t}\n}", "func (h *HTTP) Gauge(stat string, value int64) error {\n\th.Lock()\n\th.json.SetP(value, stat)\n\th.Unlock()\n\treturn nil\n}", "func WithGaugeValue(name string, value float64, unit string) EmitGaugeOption {\n\treturn func(e *loggregator_v2.Envelope) {\n\t\te.GetGauge().Metrics[name] = &loggregator_v2.GaugeValue{Value: value, Unit: unit}\n\t}\n}", "func (c *collector) Gauge(name string, value float64) {\n\t// our buffer is full, log an error but continue\n\tif len(c.buffer) >= cap(c.buffer) {\n\t\tlogrus.Error(\"unable to add new gauges, buffer full, you may want to increase your buffer size or decrease your timeout\")\n\t\treturn\n\t}\n\n\tc.buffer <- gauge{Name: strings.ToLower(name), Value: value, MeasureTime: time.Now().Unix()}\n}", "func (c *Client) Gauge(stat string, value int, rate float64) error {\n\treturn c.send(stat, rate, \"%d|g\", value)\n}", "func (c *StatsClient) Gauge(name string, value float64) {\n\tif err := c.client.Gauge(name, value, c.tags, Rate); err != nil {\n\t\tc.logger().Printf(\"datadog.StatsClient.Gauge error: %s\", err)\n\t}\n}", "func (s *StatsdClient) Gauge(name string, value float64, tags []string, rate float64) error {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\tif len(tags) == 0 {\n\t\ts.counts[name] = int64(value)\n\t}\n\n\tfor _, tag := range tags {\n\t\ts.counts[name+\":\"+tag] = int64(value)\n\t}\n\treturn nil\n}", "func (s Broker) Gauge(name string, value int) {\n\ts.Send(&gauge{Name: name, Value: value})\n}", "func NewGauge(name string, opts ...MOption) *GaugeUint64 {\n\treturn default_client.NewGauge(name, opts...)\n}", "func (m *Metric) SetGaugeValue(labelValues []string, value float64) error {\n\tif m.Type == None {\n\t\treturn errors.Errorf(\"metric '%s' not existed.\", m.Name)\n\t}\n\n\tif m.Type != Gauge {\n\t\treturn errors.Errorf(\"metric '%s' not Gauge type\", m.Name)\n\t}\n\tm.vec.(*prometheus.GaugeVec).WithLabelValues(labelValues...).Set(value)\n\treturn nil\n}", "func newGauge(namespace, subsystem, name string, labelNames []string, client *statsd.Statter, isPrometheusEnabled bool) *Gauge {\n\topts := prometheus.GaugeOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: subsystem,\n\t\tName: name,\n\t}\n\tvec := prometheus.NewGaugeVec(opts, labelNames)\n\tif isPrometheusEnabled {\n\t\tprometheus.MustRegister(vec)\n\t}\n\n\treturn &Gauge{\n\t\twatcher: vec,\n\t\tlabels: labelNames,\n\t\tclient: client,\n\t\tprefix: strings.Join([]string{namespace, subsystem, name}, \".\"),\n\t}\n}", "func Gauge(name string, value float64, tags []string, rate float64) {\n\tif ddog == nil {\n\t\tlog.Error(\"datadog client is not initialized\")\n\t\treturn\n\t}\n\n\terr := ddog.Client.Gauge(name, value, tags, rate)\n\tif err != nil {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"error\": err,\n\t\t\t\"name\": name,\n\t\t}).Error(\"Failed to send gauge data to datadog\")\n\t}\n}", "func (m *MetricUnion) Gauge() Gauge { return Gauge{ID: m.ID, Value: m.GaugeVal} }", "func setTelemetryMetric(val string, metric telemetry.Gauge) error {\n\tvalFloat, err := strconv.Atoi(val)\n\tif err == nil {\n\t\tmetric.Set(float64(valFloat), queryEndpoint, le.JoinLeaderValue)\n\t}\n\treturn err\n}", "func (p *Profiler) GaugeMetricWithLabel(name string, labels map[string]string) IGauge {\n\tp.gauges[name] = NewGaugeWithLabel(p.namespace, name, labels)\n\treturn p.gauges[name]\n}", "func (m *MockMetrics) SetGauge(arg0 string, arg1 float64, arg2 ...string) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tm.ctrl.Call(m, \"SetGauge\", varargs...)\n}", "func (plugin *ExamplePlugin) Init() error {\n\n\t// add new metric to default registry (accessible at the path /metrics)\n\t//\n\t// the current value is returned by provided callback\n\t// created gauge is identified by tuple(namespace, subsystem, name) only the name field is mandatory\n\t// additional properties can be defined using labels - key-value pairs. They do not change over time for the given gauge.\n\terr := plugin.Prometheus.RegisterGaugeFunc(prom.DefaultRegistry, \"ns\", \"sub\", \"gaugeOne\",\n\t\t\"this metrics represents randomly generated numbers\", prometheus.Labels{\"Property1\": \"ABC\", \"Property2\": \"DEF\"}, func() float64 {\n\t\t\treturn rand.Float64()\n\t\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create new registry that will be exposed at /custom path\n\terr = plugin.Prometheus.NewRegistry(customRegistry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create gauge using prometheus API\n\tplugin.temporaryCounter = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tName: \"Countdown\",\n\t\tHelp: \"This gauge is decremented by 1 each second, once it reaches 0 the gauge is removed.\",\n\t})\n\tplugin.counterVal = 60\n\tplugin.temporaryCounter.Set(float64(plugin.counterVal))\n\n\t// register created gauge to the custom registry\n\terr = plugin.Prometheus.Register(customRegistry, plugin.temporaryCounter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create gauge vector and register it\n\tplugin.gaugeVec = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\tName: \"Vector\",\n\t\tHelp: \"This gauge groups multiple similar metrics.\",\n\t\tConstLabels: prometheus.Labels{\"type\": \"vector\", \"answer\": \"42\"},\n\t}, []string{orderLabel})\n\terr = plugin.Prometheus.Register(customRegistry, plugin.gaugeVec)\n\n\treturn err\n\n}", "func (m *Metrics) NewGauge(name string, description string) prometheus.Gauge {\n\n\tvar item prometheus.Gauge\n\n\tmc := m.Get(name)\n\n\tif mc == nil {\n\n\t\toptions := prometheus.GaugeOpts{\n\t\t\tName: name,\n\t\t\tHelp: description,\n\t\t}\n\n\t\titem = prometheus.NewGauge(options)\n\t\tprometheus.Register(item)\n\n\t\tmc = &metric{Name: name, Description: description, Type: Gauge, Inner: item}\n\n\t\tm.Set(name, mc)\n\n\t} else {\n\t\titem = mc.Inner.(prometheus.Gauge)\n\t}\n\n\treturn item\n}", "func (e *Exporter) NewGauge(name string, help string) *stats.Gauge {\n\tif e.name == \"\" || name == \"\" {\n\t\tv := stats.NewGauge(name, help)\n\t\taddUnnamedExport(name, v)\n\t\treturn v\n\t}\n\tlvar := stats.NewGauge(\"\", help)\n\tif exists := e.createCountTracker(name, help, lvar, reuseOnDup, typeCounter); exists != nil {\n\t\treturn exists.(*stats.Gauge)\n\t}\n\treturn lvar\n}", "func ProvideGauge(o prometheus.GaugeOpts, labelNames ...string) fx.Annotated {\n\treturn fx.Annotated{\n\t\tName: o.Name,\n\t\tTarget: func(f Factory) (metrics.Gauge, error) {\n\t\t\treturn f.NewGauge(o, labelNames)\n\t\t},\n\t}\n}", "func NewGauge(name, desc string, labelKeys []string) *Gauge {\n\t// new GaugeVec\n\tgaugeVec := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\tName: name,\n\t\tHelp: desc,\n\t}, labelKeys)\n\n\t// register\n\tprometheus.MustRegister(gaugeVec)\n\n\treturn &Gauge{\n\t\tgaugeVec: gaugeVec,\n\t}\n}", "func (c *Client) NewGauge(name string, opts ...MOption) *GaugeUint64 {\n\treturn c.NewGaugeUint64(name, opts...)\n}", "func (r *Registry) Gauge(name string) *Gauge {\n\tr.mtx.RLock()\n\tgauge := r.gauges[name]\n\tr.mtx.RUnlock()\n\treturn gauge\n}", "func (m *MockMetrics) SetGauge(arg0 []string, arg1 float32) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"SetGauge\", arg0, arg1)\n}", "func (g *gaugeMetric) Set(n float64) {\n\tatomic.StoreUint64(&g.value, toUint64(n, 2))\n}", "func (s *StressReport) AddMetric(name string, value float64, unit string) {\n\tif s.Extras == nil {\n\t\ts.Extras = map[string]struct {\n\t\t\tValue float64\n\t\t\tUnit string\n\t\t}{}\n\t}\n\ts.Extras[name] = struct {\n\t\tValue float64\n\t\tUnit string\n\t}{\n\t\tValue: value,\n\t\tUnit: unit,\n\t}\n}", "func (m *CirconusMetrics) SetGaugeFunc(metric string, fn func() int64) {\n\tm.gfm.Lock()\n\tdefer m.gfm.Unlock()\n\tm.gaugeFuncs[metric] = fn\n}", "func NewGauge(name string, options ...Option) Gauge {\n\treturn newGauge(name, options...)\n}", "func (rg *StaticGauge) Set(value float64) {\n\trg.Base.watcher.WithLabelValues(rg.values...).Set(value)\n}", "func (e *Exporter) NewGaugeFunc(name string, help string, f func() int64) *stats.GaugeFunc {\n\tif e.name == \"\" || name == \"\" {\n\t\tv := stats.NewGaugeFunc(name, help, f)\n\t\taddUnnamedExport(name, v)\n\t\treturn v\n\t}\n\tlvar := stats.NewGaugeFunc(\"\", help, f)\n\t_ = e.createCountTracker(name, help, lvar, replaceOnDup, typeGauge)\n\treturn lvar\n}", "func NewGauge(w io.Writer, key string, interval time.Duration) metrics.Gauge {\n\tg := make(chan string)\n\tgo fwd(w, key, interval, g)\n\treturn statsdGauge(g)\n}", "func MetricName(name string) Option {\n\treturn func(opts *Options) {\n\t\topts.MetricName = name\n\t}\n}", "func (p *influxProvider) NewGauge(name string) metrics.Gauge {\n\treturn p.in.NewGauge(name)\n}", "func NewGauge(subsystem, name, help string, labels []string) *prometheus.GaugeVec {\n\treturn promauto.NewGaugeVec(\n\t\tprometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: subsystem,\n\t\t\tName: name,\n\t\t\tHelp: help,\n\t\t},\n\t\tlabels,\n\t)\n}", "func NewGauge() *Gauge {\n\tg := &Gauge{\n\t\tBlock: *NewBlock(),\n\t\tPercentColor: theme.GaugePercent,\n\t\tBarColor: theme.GaugeBar,\n\t\tLabel: \"{{percent}}%\",\n\t\tLabelAlign: AlignCenter,\n\t}\n\n\tg.Width = 12\n\tg.Height = 5\n\treturn g\n}", "func (f *Factory) Gauge(name string, tags map[string]string) metrics.Gauge {\n\tgauge := &gauge{\n\t\tgauges: make([]metrics.Gauge, len(f.factories)),\n\t}\n\tfor i, factory := range f.factories {\n\t\tgauge.gauges[i] = factory.Gauge(name, tags)\n\t}\n\treturn gauge\n}", "func (c *Client) Metric(v interface{}, metric string) error {\n\tPUrl := fmt.Sprintf(\"metrics/mbean/%s\", metric)\n\terr := c.Get(&v, PUrl, nil)\n\treturn err\n}", "func TestMetric(value interface{}, name ...string) telegraf.Metric {\n\tif value == nil {\n\t\tpanic(\"Cannot use a nil value\")\n\t}\n\tmeasurement := \"test1\"\n\tif len(name) > 0 {\n\t\tmeasurement = name[0]\n\t}\n\ttags := map[string]string{\"tag1\": \"value1\"}\n\tpt := metric.New(\n\t\tmeasurement,\n\t\ttags,\n\t\tmap[string]interface{}{\"value\": value},\n\t\ttime.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),\n\t)\n\treturn pt\n}", "func (g *PCPGauge) Set(val float64) error {\n\tg.mutex.Lock()\n\tdefer g.mutex.Unlock()\n\treturn g.set(val)\n}", "func (c *Context) Gauge(stat string, value float64) {\n\tfor _, sink := range c.sinks {\n\t\tsink.Gauge(c, stat, value)\n\t}\n}", "func (m *MockClient) Gauge(arg0 string, arg1 float64, arg2 []string, arg3 float64) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Gauge\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func newGaugeOperator(name, help string) m.Gauge {\n\treturn m.NewGauge(appName, \"operator\", name, \"operator transaction logs \"+help)\n}", "func (m *Metrics) GaugeVec(name, help string, labels ...string) *prometheus.GaugeVec {\n\tif gauge, ok := m.gaugeVecs[name]; !ok {\n\t\tgauge := prometheus.NewGaugeVec(\n\t\t\tprometheus.GaugeOpts{\n\t\t\t\tNamespace: m.config.Namespace,\n\t\t\t\tName: name,\n\t\t\t\tHelp: help,\n\t\t\t}, labels)\n\n\t\tprometheus.MustRegister(gauge)\n\t\treturn gauge\n\t} else {\n\t\treturn gauge\n\t}\n}", "func (mr *MockMetricsMockRecorder) SetGauge(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{arg0, arg1}, arg2...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"SetGauge\", reflect.TypeOf((*MockMetrics)(nil).SetGauge), varargs...)\n}", "func (p *PromGauge) Set(value int64) error {\n\tp.ctr.Set(float64(value))\n\treturn nil\n}", "func (r *Registry) NewGauge(name string, reader GaugeReader) *Gauge {\n\treturn r.NewGaugeWithUnit(name, \"\", reader)\n}", "func (p *Profiler) GetGaugeMetric(name string) (gauge IGauge) {\n\tif v, ok := p.gauges[name]; ok {\n\t\tgauge = v\n\t}\n\treturn\n}", "func (g *PCPGaugeVector) Set(val float64, instance string) error {\n\tg.mutex.Lock()\n\tdefer g.mutex.Unlock()\n\treturn g.setInstance(val, instance)\n}", "func NewGauge(gv *prometheus.GaugeVec) metrics.Gauge {\n\treturn &gauge{\n\t\tgv: gv,\n\t}\n}", "func (g *Float64Gauge) Set(ctx context.Context, value float64, labels LabelSet) {\n\tg.recordOne(ctx, NewFloat64MeasurementValue(value), labels)\n}", "func NewRegisteredUGaugeT(name string, tagsMap map[string]string, r Registry) UGauge {\n\tc := NewUGauge()\n\tif nil == r {\n\t\tr = DefaultRegistry\n\t}\n\tr.RegisterT(name, tagsMap, c)\n\treturn c\n}", "func (ac *Accumulator) AddGauge(measurement string, fields map[string]interface{},\n\ttags map[string]string, t ...time.Time) {\n\t// as of right now metric always returns a nil error\n\tm, _ := metric.New(measurement, tags, fields, getTime(t), telegraf.Gauge)\n\tac.AddMetric(m)\n}", "func (b *B) ReportMetric(n float64, unit string) {}", "func IncrementGauge(ctx context.Context, metric string) {\n\t// The field name we use is the specified metric name prepended with FieldnamePrefixGauge to designate that it is a Prometheus gauge metric\n\t// The collector will replace that prefix with \"fn_\" and use the result as the Prometheus metric name.\n\tfieldname := FieldnamePrefixGauge + metric\n\n\t// Spans are not processed by the collector until the span ends, so to prevent any delay\n\t// in processing the stats when the current span is long-lived we create a new span for every call\n\t// suffix the span name with SpannameSuffixDummy to denote that it is used only to hold a metric and isn't itself of any interest\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, fieldname+SpannameSuffixDummy)\n\tdefer span.Finish()\n\n\t// gauge metrics are actually float64; here we log that it should be increased by +1\n\tspan.LogFields(log.Float64(fieldname, 1.))\n}", "func newGaugePixels(name, help string) m.Gauge {\n\treturn m.NewGauge(appName, \"pixel\", name, \"pixel \"+help)\n}", "func NewGauge(db DB.DBI, g *Gauge) *Gauge {\n\tg.db = db\n\tg.G = ui.NewGauge()\n\tmerge.Merge(g, g.G, \"G\", \"db\")\n\treturn g\n}", "func pushMetric(addr string, interval time.Duration) {\n\tif interval == zeroDuration || len(addr) == 0 {\n\t\tlog.Info(\"disable Prometheus push client\")\n\t\treturn\n\t}\n\tlog.Info(\"start prometheus push client\", zap.String(\"server addr\", addr), zap.String(\"interval\", interval.String()))\n\tgo prometheusPushClient(addr, interval)\n}", "func NewRegisteredFunctionalUGaugeT(name string, tagsMap map[string]string, r Registry, f func() uint64) UGauge {\n\tc := NewFunctionalUGauge(f)\n\tif nil == r {\n\t\tr = DefaultRegistry\n\t}\n\tr.RegisterT(name, tagsMap, c)\n\treturn c\n}", "func (g *gaugeMetric) Emit(c LogClient) {\n\toptions := []loggregator.EmitGaugeOption{\n\t\tloggregator.WithGaugeValue(\n\t\t\tg.name,\n\t\t\ttoFloat64(atomic.LoadUint64(&g.value), 2),\n\t\t\tg.unit,\n\t\t),\n\t\tg.sourceIDOption,\n\t}\n\n\tfor k, v := range g.tags {\n\t\toptions = append(options, loggregator.WithEnvelopeTag(k, v))\n\t}\n\n\tc.EmitGauge(options...)\n}", "func NewGauge(max int, writer io.Writer) *Gauge {\n\treturn &Gauge{\n\t\tmax: max,\n\t\tcur: 0,\n\t\twriter: writer,\n\t}\n}", "func (w *PrometheusWriter) Write(metric model.Metric) error {\n\tduration := float64(metric.Duration / time.Millisecond)\n\treason := \"\"\n\tif metric.Error != \"\" {\n\t\treason = strings.SplitN(metric.Error, \":\", 2)[0]\n\t\treason = strings.ToLower(reason)\n\t\thealthCheckStatusGauge.With(prometheus.Labels{\n\t\t\t\"name\": metric.Name,\n\t\t}).Set(0)\n\t\thealthCheckErrorCounter.With(prometheus.Labels{\n\t\t\t\"name\": metric.Name,\n\t\t\t\"reason\": reason,\n\t\t}).Inc()\n\t} else {\n\t\thealthCheckStatusGauge.With(prometheus.Labels{\n\t\t\t\"name\": metric.Name,\n\t\t}).Set(1)\n\t}\n\thealthCheckResponseTimeGauge.With(prometheus.Labels{\n\t\t\"name\": metric.Name,\n\t}).Set(duration)\n\n\treturn nil\n}", "func (NoopProvider) Gauge(_ string, _ ...string) Gauge {\n\treturn noopInstrument{}\n}", "func (c *Client) IncrementGauge(stat string, value int, rate float64) error {\n\treturn c.send(stat, rate, \"+%d|g\", value)\n}", "func TestMetric(value interface{}, name ...string) optic.Metric {\n\tif value == nil {\n\t\tpanic(\"Cannot use a nil value\")\n\t}\n\tnamespace := \"test1\"\n\tif len(name) > 0 {\n\t\tnamespace = name[0]\n\t}\n\ttags := map[string]string{\"tag1\": \"value1\"}\n\tm, _ := metric.New(\n\t\tnamespace,\n\t\ttags,\n\t\tmap[string]interface{}{\"value\": value},\n\t\ttime.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),\n\t)\n\treturn m\n}", "func (m *UserExperienceAnalyticsMetricHistory) SetMetricType(value *string)() {\n err := m.GetBackingStore().Set(\"metricType\", value)\n if err != nil {\n panic(err)\n }\n}", "func AddGauge(gaugeVec *prometheus.GaugeVec, v float64) {\n\tif gaugeVec == nil {\n\t\treturn\n\t}\n\tgaugeVec.With(nil).Add(v)\n}", "func (h *Float64GaugeHandle) Set(ctx context.Context, value float64) {\n\th.RecordOne(ctx, NewFloat64MeasurementValue(value))\n}", "func (mr *MockMetricsMockRecorder) SetGauge(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"SetGauge\", reflect.TypeOf((*MockMetrics)(nil).SetGauge), arg0, arg1)\n}", "func (l *Librato) AddGauge(g Gauge) {\n\tselect {\n\tcase l.publisher.measures <- g:\n\tdefault:\n\t\tl.publisher.reportError(fmt.Errorf(\"gauge could not be added to the metrics queue\"))\n\t}\n}", "func (r *GoMetricsRegistry) Register(name string, metric interface{}) error {\n\tr.mutex.Lock()\n\tdefer r.mutex.Unlock()\n\n\tif r.get(name) != nil {\n\t\treturn fmt.Errorf(\"metric '%v' already registered\", name)\n\t}\n\n\tr.doRegister(name, metric)\n\treturn nil\n}", "func heartbeatGauge(m *Metrics, s heartbeatState) (g *metric.Gauge) {\n\tswitch s {\n\tcase heartbeatInitializing:\n\t\tg = m.HeartbeatsInitializing\n\tcase heartbeatNominal:\n\t\tg = m.HeartbeatsNominal\n\tcase heartbeatFailed:\n\t\tg = m.HeartbeatsFailed\n\t}\n\treturn g\n}" ]
[ "0.6838521", "0.6675083", "0.66375196", "0.6615935", "0.65820676", "0.6513595", "0.65016466", "0.64129496", "0.63703287", "0.63678575", "0.6307274", "0.62895155", "0.6248275", "0.6245103", "0.623869", "0.6179788", "0.61300707", "0.6121112", "0.61141324", "0.60184526", "0.59965587", "0.5986529", "0.59642714", "0.59632754", "0.59117275", "0.5909272", "0.5898975", "0.5870922", "0.5870361", "0.5826785", "0.58132577", "0.5737406", "0.5734413", "0.5701746", "0.5700612", "0.5693804", "0.56561834", "0.5650884", "0.5592289", "0.55863106", "0.5564334", "0.55584157", "0.55564415", "0.55272824", "0.5507992", "0.5487559", "0.5486788", "0.5472195", "0.5467651", "0.5448487", "0.54319596", "0.5389062", "0.53851515", "0.5356809", "0.53389674", "0.53389263", "0.5331457", "0.5300694", "0.5287269", "0.5255241", "0.52454895", "0.52337784", "0.52223104", "0.5217662", "0.5163151", "0.5160226", "0.5140848", "0.51340044", "0.51263714", "0.5103054", "0.5068544", "0.5062514", "0.505306", "0.50526595", "0.5030322", "0.5024923", "0.5012643", "0.5006594", "0.4982494", "0.49820685", "0.49775368", "0.49707535", "0.4969695", "0.49542794", "0.49334002", "0.49178436", "0.49075064", "0.48929632", "0.48915827", "0.48888737", "0.48873228", "0.48845622", "0.48839492", "0.48784626", "0.48777816", "0.4875654", "0.48462582", "0.482323", "0.48128527", "0.4810939" ]
0.8258457
0
/ import "github.com/01edu/z01" / import ( "fmt" )
func AlphaCount(str string) int { counter := 0 list := []byte(str) for _, letter := range list { if letter >= 65 && letter <= 90 || letter <= 122 && letter >= 97 { counter++ } } return counter }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func main() {\n\t_ := \"gitlab.com/username/library19\"\n\tfmt.Println(\"import \\\"gitlab.com/username/library20\\\"\")\n\tfmt.Println(\"import gitlab.com/username/library20)\n\n}", "func Imports() {\n\tfmt.Printf(\"Now you have %g problems.\\n\", math.Sqrt(7))\n}", "func main() {\n\tc, _ := d.BuildDigDependencies()\n\n\tdig.Visualize(c, os.Stdout)\n}", "func main() {\n\tfmt.Println(\"GitHub is for everyone\")\n}", "func version() {\n fmt.Printf(\"v%s\\ncommit=%s\\n\", versionNumber, commitId)\n}", "func printVersion() {\n\tfmt.Printf(`Version: %s\nGo version: %s\nGit commit: %s\nBuilt: %s\n`, Version, GoVersion, BuildCommit, BuildTime)\n}", "func main() {\n\n\t// (Print - Println) - Printf\n\n\t/* \tfmt.Println(\"Merhaba\")\n\t \tfmt.Print(\"Merhaba\")\n\t \tfmt.Println(\"\")\n\t \tfmt.Printf(\"Merhaba\")\t */\n\n\t/* name := \"Arin\" */\n\n\t/* \tfmt.Print(name)\n\t \tfmt.Println(name)\n\t \tfmt.Printf(name) */\n\n\t/* \tfmt.Print(\"Benim Adım\", name)\n\t \tfmt.Println(\"\")\n\t \tfmt.Println(\"Benim Adım \", name)\n\t \tfmt.Printf(\"Benim Adım %X %T\", name, name)\n\n\t\t// the value in a default format\n\t\t// %T\ta Go-syntax representation of the type of the value */\n\n\t/* \tx := 100\n\t \ty := 20\n\t \tz := 30\n\n\t \tfmt.Printf(\"%b %d %o\", x, y, z) */\n\n\t// %b\tbase 2\n\t// %d\tbase 10\n\t// %o\tbase 8\n\n\t/* name, age := \"Alperen\", 23 */\n\n\t// fmt.Print(\"Benim Adım \", name, \", ve ben \", age, \" yaşındayım.\")\n\t// fmt.Println(\"Benim Adım\", name, \"ve ben\", age, \"yaşındayım.\")\n\t// fmt.Printf(\"Benim Adım %v, ve ben %v yaşındayım\", name, age)\n\n\t//VISIBILITY\n\n\t/* \tfmt.Println(x)\n\n\t \tmyFunc() */\n\n\t/* \tvar coin string // count - customer - coin\n\t \t// Go camel case isimlendirme kullanılır.\n\t \tvar coinType string\n\t \tvar custName string\n\t \t// kısaltmalar büyük harflerle yazılır\n\t \tvar URL // Url değil\n\t \tvar HTTP // http değil \"xyzHTTP\" */\n\n}", "func DisplayVersion() {\n\tfmt.Printf(`package: %s\nversion: %s\nrevision: %s\n`, Package, Version, Revision)\n}", "func main() {\n\t// Get a greeting message and print it.\n\t//Access the Hello function in the greetings package\n\tmessage := greetings.Hello(\"Gladys\")\n\tfmt.Println(message)\n}", "func main() {\n\n\ttrygo.PrintTryGo()\n\ttrygo.PrintMoreGo()\n\n}", "func printOutputHeader() {\n\tfmt.Println(\"\")\n\tfmt.Println(\"FreeTAXII TestLab - Basic Connectivity Tests\")\n\tfmt.Println(\"Copyright: Bret Jordan\")\n\tfmt.Println(\"Version:\", Version)\n\tif Build != \"\" {\n\t\tfmt.Println(\"Build:\", Build)\n\t}\n\tfmt.Println(\"\")\n}", "func main() {\n\tfmt.Println(\"Program_1: including some mongodb packages into header which are successful but golang will complain if packages are imported and not used so commented them out. :D\")\n}", "func main() {\n\tfmt.Println(\"Hello, World\") //formatting text and displaying it to console\n\n\tfmt.Println(quote.Go())\n}", "func mkzversion(dir, file string) {\n\tout := fmt.Sprintf(\n\t\t\"// auto generated by go tool dist\\n\"+\n\t\t\t\"\\n\"+\n\t\t\t\"package runtime\\n\"+\n\t\t\t\"\\n\"+\n\t\t\t\"const defaultGoroot = `%s`\\n\"+\n\t\t\t\"const theVersion = `%s`\\n\"+\n\t\t\t\"const goexperiment = `%s`\\n\"+\n\t\t\t\"const stackGuardMultiplier = %d\\n\"+\n\t\t\t\"var buildVersion = theVersion\\n\", goroot_final, findgoversion(), os.Getenv(\"GOEXPERIMENT\"), stackGuardMultiplier())\n\n\twritefile(out, file, writeSkipSame)\n}", "func main() {\n\tvar output, _ = exec.Command(\"go\", \"version\").Output()\n\tfmt.Printf(string(output))\n}", "func printOutputHeader() {\n\tfmt.Println(\"\")\n\tfmt.Println(\"FreeTAXII Server\")\n\tfmt.Println(\"Copyright, Bret Jordan\")\n\tfmt.Println(\"Version:\", sVersion)\n\tfmt.Println(\"\")\n}", "func main() {\n\tvar err error\n\tvar buffer bytes.Buffer\n\n\t// Generate the User struct\n\tuserStruct, userImport, err := Generate(User{})\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\t// Generate the UserStatus struct\n\tuserStatus, statusImport, err := Generate(UserStatus{})\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\t// Write the package declaration\n\tbuffer.Write([]byte(\"package model\\n\"))\n\n\t// Write the imports\n\tbuffer.Write([]byte(\"import (\\n\"))\n\tfor _, i := range userImport {\n\t\tbuffer.Write([]byte(fmt.Sprintf(`\"%s\"`, i) + \"\\n\"))\n\t}\n\tfor _, i := range statusImport {\n\t\tbuffer.Write([]byte(fmt.Sprintf(`\"%s\"`, i) + \"\\n\"))\n\t}\n\tbuffer.Write([]byte(\")\\n\"))\n\n\t// Wrote the structs\n\tbuffer.Write(userStruct)\n\tbuffer.Write([]byte(\"\\n\\n\"))\n\tbuffer.Write(userStatus)\n\tbuffer.Write([]byte(\"\\n\\n\"))\n\n\t// Format the buffer using go fmt\n\tout, err := format.Source(buffer.Bytes())\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\t// Write the file\n\terr = ioutil.WriteFile(\"model.go\", out, 0644)\n\tif err != nil {\n\t\tlog.Println(\"File save error\")\n\t\treturn\n\t}\n}", "func main(){\n\n\t// print hello world using fmt packages' Printf function\n\tfmt.Printf(\"Hello world!\\n\")\n\n\t// use custom package and call Sqrt on it\n\tfmt.Printf(\"Sqrt(2) = %v\\n\", newmath.Sqrt(2))\n\n\t// use the google newmath \n\tfmt.Printf(\"Sqrt(2) = %v\\n\", gmath.Sqrt(2))\n}", "func printVersion() {\n\tfmt.Printf(\"Amore version: %v\", amoreVersion)\n}", "func printHeader() {\n\tfmt.Fprintf(os.Stdout, \"sqlite gobroem, v%s\\n\", version)\n}", "func (g *Generator) format(outputFilename string) []byte {\n\tsrc, err := format.Source(g.buf.Bytes())\n\tif err != nil {\n\t\t// Should never happen, but can arise when developing this code.\n\t\t// The user can compile the output to see the error.\n\t\tlog.Printf(\"warning: internal error: invalid Go generated: %s\", err)\n\t\tlog.Printf(\"warning: compile the package to analyze the error\")\n\t\treturn g.buf.Bytes()\n\t}\n\n\timportedSrc, err := imports.Process(outputFilename, src, nil)\n\tif err != nil {\n\t\tlog.Printf(\"warning: internal error: invalid Go generated: %s\", err)\n\t}\n\n\treturn importedSrc\n}", "func (g *Gonerator) format() ([]byte, error) {\n\toriginal := g.buf.Bytes()\n\n\tresult, err := gotools.GoFmt(original)\n\tif err != nil {\n\t\t// Should never happen, but can arise when developing this code.\n\t\t// The user can compile the output to see the error.\n\t\tlog.Printf(\"[%s] warning: internal error: invalid Go gonerated: %s\", g.data.OutputFile, err)\n\t\tlog.Printf(\"[%s] warning: compile the package to analyze the error\", g.data.OutputFile)\n\t\treturn original, err\n\t}\n\n\tresult, err = gotools.GoImports(g.data.OutputFile, result)\n\tif err != nil {\n\t\t// Should never happen, but can arise when developing this code.\n\t\t// The user can compile the output to see the error.\n\t\tlog.Printf(\"[%s] warning: internal error: invalid Go gonerated: %s\", g.data.OutputFile, err)\n\t\tlog.Printf(\"[%s] warning: compile the package to analyze the error\", g.data.OutputFile)\n\t\treturn original, err\n\t}\n\n\treturn result, err\n}", "func main() {\n\tx := 10\n\n\tfmt.Printf(\"i said %v %v times\\n\", y, x)\n\n\tfmt.Println(z)\n\n}", "func main() {\n\tgitRepo := git.NewDefaultVersionGitRepo()\n\tbumper := version.NewConventionalCommitBumpStrategy(gitRepo)\n\tversion, err := bumper.Bump()\n\n\tif err != nil {\n\t\tlog.Error(\"Cannot bump version caused by: %v\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Println(version)\n}", "func main() {\n\t// plain\n\tfmt.Println(\"Just print \" + \"something\")\n\n\t// quote\n\tfmt.Println(quote.Hello())\n\n\t// rand number -- cheat sheet for formatting https://yourbasic.org/golang/fmt-printf-reference-cheat-sheet/#printf\n\trand.Seed(time.Now().Unix()) // seed the generator with current time\n\tfmt.Printf(\"The random number of this unique moment is %.3g\\n\", float64(rand.Intn(10))*rand.Float64()*math.E)\n\n\t// looped print in function\n\tfmt.Println(repeat(3, \"hello\\n\"))\n\n\t// You were the choosen one!\n\tfmt.Println(\"Imperium strikes back:\")\n\tfmt.Println(matrix_repeat(5, 3, \"|-o-|\"))\n\tfmt.Printf(\" \\t|-@@-|\\n\\n\")\n\tfmt.Println(matrix_repeat(5, 3, \"|-o-|\"))\n\n\t// go also has multiple results returned, niice\n\tlooking_for := \"spark\"\n\thost, port := zookeeper(looking_for)\n\tif port > 0 {\n\t\tfmt.Println(host, port)\n\t} else {\n\t\tfmt.Println(\"service not found\")\n\t}\n\n}", "func main() {\n\tfirst, last := greet(\"Luis \", \"Benavides \")\n\tfmt.Printf(\"%s %s \\n\", first, last)\n}", "func PrintNum(x int) {\n\tfmt.Println(\"this is a method in another package\")\n\tfmt.Println(x)\n}", "func main() {\n\tversion = \"k/g\"\n\tMain(os.Args[1:])\n}", "func main() {\n\tfmt.Printf(\"%T\\n%T\\n%T\\n\", x, y, z)\n\tfmt.Println(x)\n\tfmt.Println(y)\n\tfmt.Println(z)\n}", "func main() {\n\terr := highLevelExample()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func mkzbootstrap(file string) {\n\tout := fmt.Sprintf(\n\t\t\"// auto generated by go tool dist\\n\"+\n\t\t\t\"\\n\"+\n\t\t\t\"package obj\\n\"+\n\t\t\t\"\\n\"+\n\t\t\t\"import \\\"runtime\\\"\\n\"+\n\t\t\t\"\\n\"+\n\t\t\t\"const defaultGOROOT = `%s`\\n\"+\n\t\t\t\"const defaultGO386 = `%s`\\n\"+\n\t\t\t\"const defaultGOARM = `%s`\\n\"+\n\t\t\t\"const defaultGOOS = runtime.GOOS\\n\"+\n\t\t\t\"const defaultGOARCH = runtime.GOARCH\\n\"+\n\t\t\t\"const defaultGO_EXTLINK_ENABLED = `%s`\\n\"+\n\t\t\t\"const version = `%s`\\n\"+\n\t\t\t\"const stackGuardMultiplier = %d\\n\"+\n\t\t\t\"const goexperiment = `%s`\\n\",\n\t\tgoroot_final, go386, goarm, goextlinkenabled, findgoversion(), stackGuardMultiplier(), os.Getenv(\"GOEXPERIMENT\"))\n\n\twritefile(out, file, writeSkipSame)\n}", "func main() {\n\t// default writer is os.Stdout\n\tif err := cli.Root(root,\n\t\tcli.Tree(jfDef),\n\t\tcli.Tree(fjDef)).Run(os.Args[1:]); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t}\n\tfmt.Println(\"\")\n}", "func main() {\n\tapp := kingpin.New(\"gg\", \"a git commit & push\")\n\tautoCommit(app) //\tgit add\n\tgetMessage(app) //\tgit commit -m\n\tpushRemote(app) //\tgit push -u\n\tgetCommit(app) //\tgit log --date=short --no-merges --pretty=format:\"%cd (@%cn) %h %s\"\n\tfullCommand(app)\n\tkingpin.MustParse(app.Parse(os.Args[1:]))\n}", "func main() {\n\tx := 42\n\ty := \"James Bond\"\n\tz := true\n\n\tfmt.Println(x, y, z)\n\tfmt.Println(x)\n\tfmt.Println(y)\n\tfmt.Println(z)\n}", "func printUsage() {\n\tfmt.Println(\"----------------------------------------------\")\n\tfmt.Println(\"Usage: Converts the .db file into a .sql file.\")\n\tfmt.Println(\"Usage Example: ./db_convert -file default.db\")\n}", "func main() {\n\tfmt.Println(a)\n\tfmt.Println(b)\n\tfmt.Println(c)\n}", "func main() {\n fmt.println(\"Hello Programmer!\")\n fmt.println(\"Here's what you have in store for today\")\n fmt.println(\"I can't predict your day, I'm a computer. But you can tell me\")\n}", "func PrintVersion() {\n\tfmt.Println(`gpress version ` + version + `🐣 GCOMPRESSION lib version 0.0.1/2\n\nCopyright (C) 2016 by Jakub Kułak.\ngpress comes with ABSOLUTELY NO WARRANTY. This is free software, and you are welcome to redistribute it under certain conditions. See the GNU General Public Licence for details.`)\n}", "func main() {\n\t// Println prints the phrase on the standard output and also break a line.\n\tfmt.Println(\"Hello Gophers!!\")\n}", "func main() {\n\n\t// (Print - Println) - Printf\n\n\t/* \t{\n\t\tvar c\n\t} */\n\n\t/* \tfmt.Println(\"Merhaba\")\n\t \tfmt.Print(\"Merhaba\")\n\t \tfmt.Println(\"\")\n\t \tfmt.Printf(\"Merhaba\") */\n\n\t/* \tname := \"Arin\" */\n\n\t/* \tfmt.Print(name)\n\t \tfmt.Println(name)\n\t \tfmt.Printf(name) */\n\n\t/* \tfmt.Print(\"Benim Adım\", name)\n\t \tfmt.Println(\"\")\n\t \tfmt.Println(\"Benim Adım\", name)\n\t \tfmt.Printf(\"Benim Adım %X %T\", name, name) */\n\n\t/* \tx := 100\n\t \ty := 20\n\t \tz := 30\n\n\t \tfmt.Printf(\"%b %d %o\", x, y, z) */\n\n\t/* name, age := \"Arin\", 5 */\n\n\t// fmt.Print(\"Benim Adım \", name, \", ve ben \", age, \" yaşındayım.\")\n\t// fmt.Println(\"Benim Adım\", name, \"ve ben\", age, \"yaşındayım.\")\n\t// fmt.Printf(\"Benim Adım %v, ve ben %v yaşındayım\", name, age)\n\n\t// VISIBILITY\n\n\t/* \tfmt.Println(x)\n\n\t \tmyFunc() */\n\n\t/* \tvar coin string // count - customer - coin\n\t \t// Go camel case isimlendirme kullanılır\n\t \tvar coinType string\n\t \tvar custName string\n\t \t// kısaltmalar büyük harflerle yazılır\n\t \tvar URL // Url değil\n\t \tvar HTTP // http değil \"xyzHTTP\"\n\n\t \ti , j , k */\n\n\t// benForDongusuDegiskeninİsmiyim\n\n}", "func showHeader() {\n\theader := fmt.Sprintf(\"pivot version %s\", version)\n\t// If we have a commit hash then add it to the program header\n\tif commitHash != \"\" {\n\t\theader = fmt.Sprintf(\"%s (%s)\", header, commitHash)\n\t}\n\tfmt.Println(header)\n}", "func mkzversion(dir, file string) {\n\tvar buf bytes.Buffer\n\tfmt.Fprintf(&buf, \"// Code generated by go tool dist; DO NOT EDIT.\\n\")\n\tfmt.Fprintln(&buf)\n\tfmt.Fprintf(&buf, \"package sys\\n\")\n\tfmt.Fprintln(&buf)\n\tfmt.Fprintf(&buf, \"const StackGuardMultiplierDefault = %d\\n\", stackGuardMultiplierDefault())\n\n\twritefile(buf.String(), file, writeSkipSame)\n}", "func generateByThirdPackage() {\n\n}", "func main() {\n\ts := fmt.Sprintf(\"%v %v %v\\n\", x, y, z)\n\n\tfmt.Println(s)\n}", "func direct(branch discord_version.Branch) {\n\td := discord_version.NewDirect(branch)\n\tv, e := d.GetVersion()\n\tif e != nil {\n\t\tfmt.Printf(\"Error fetching version: %v\\nDetail: %#v\\n\", e, e)\n\t\treturn\n\t}\n\tfmt.Printf(\"Currently %s\\n\", display(v))\n}", "func printVersion() {\n\tfmt.Printf(\"Azure Container Network Monitoring Service\\n\")\n\tfmt.Printf(\"Version %v\\n\", version)\n}", "func main() {\n\t// quote.Go() function, printing a clever message about communication.\n\tfmt.Println(quote.Go())\n}", "func showVersion() {\n\tfmt.Print(versionString())\n\tfmt.Print(releaseString())\n\tif devBuild && gitShortStat != \"\" {\n\t\tfmt.Printf(\"%s\\n%s\\n\", gitShortStat, gitFilesModified)\n\t}\n}", "func main() {\n\tfmt.Println(\"Hello, my name is Jordan\")\n}", "func main() {\n\t// Print in console \"Hello world\"\n\tfmt.Println(\"Hello, World!\")\n\n\t// Print in console the function called of rsc.io/quote\n\tfmt.Println(quote.Go())\n\tfmt.Println(quote.Glass())\n\tfmt.Println(quote.Hello())\n}", "func printOutputHeader() {\n\tfmt.Println(\"\")\n\tfmt.Println(\"FreeTAXII - STIX Table Creator\")\n\tfmt.Println(\"Copyright: Bret Jordan\")\n\tfmt.Println(\"Version:\", Version)\n\tif Build != \"\" {\n\t\tfmt.Println(\"Build:\", Build)\n\t}\n\tfmt.Println(\"\")\n}", "func main() {\n\tvar gitPath = \"/home/pi/project/test\"\n\trepo, err := gogit.PlainOpen(gitPath)\n\tb, e := repo.Branch(\"home\")\n\tfmt.Println(b, e)\n\tpanicError(err)\n\tbranches, err := repo.Branches()\n\tpanicError(err)\n\t//fmt.Println(branches)\n\tbranches.ForEach(func(ref *plumbing.Reference) error {\n\t\tfmt.Println(ref)\n\t\treturn nil\n\t})\n}", "func format(commits []string) string {\n\tvar hasDeps bool\n\tvar gapic, bazel, gencli, chore, samples, other []string\n\n\tfor _, msg := range commits {\n\t\tsep := strings.Index(msg, \":\")\n\t\tif sep == -1 {\n\t\t\tother = append(other, \"* \"+msg)\n\t\t\tcontinue\n\t\t}\n\n\t\tcomp := msg[:sep]\n\t\tcontent := strings.TrimSpace(msg[sep+1:])\n\n\t\tswitch comp {\n\t\tcase \"gapic\":\n\t\t\tgapic = append(gapic, \"* \"+content)\n\t\tcase \"bazel\":\n\t\t\tbazel = append(bazel, \"* \"+content)\n\t\tcase \"gencli\":\n\t\t\tgencli = append(gencli, \"* \"+content)\n\t\tcase \"chore(deps)\":\n\t\t\thasDeps = true\n\t\tcase \"chore\":\n\t\t\tchore = append(chore, \"* \"+content)\n\t\tcase \"samples\":\n\t\t\tsamples = append(samples, \"* \"+content)\n\t\tdefault:\n\t\t\tother = append(other, \"* \"+msg)\n\t\t}\n\t}\n\n\tif hasDeps {\n\t\tchore = append(chore, \"* update dependencies (see history)\")\n\t}\n\n\tvar notes strings.Builder\n\tif len(gapic) > 0 {\n\t\tnotes.WriteString(\"# gapic\\n\\n\")\n\t\tnotes.WriteString(strings.Join(gapic, \"\\n\"))\n\t\tnotes.WriteString(\"\\n\\n\")\n\t}\n\n\tif len(bazel) > 0 {\n\t\tnotes.WriteString(\"# bazel\\n\\n\")\n\t\tnotes.WriteString(strings.Join(bazel, \"\\n\"))\n\t\tnotes.WriteString(\"\\n\\n\")\n\t}\n\n\tif len(gencli) > 0 {\n\t\tnotes.WriteString(\"# gencli\\n\\n\")\n\t\tnotes.WriteString(strings.Join(gencli, \"\\n\"))\n\t\tnotes.WriteString(\"\\n\\n\")\n\t}\n\n\tif len(samples) > 0 {\n\t\tnotes.WriteString(\"# samples\\n\\n\")\n\t\tnotes.WriteString(strings.Join(samples, \"\\n\"))\n\t\tnotes.WriteString(\"\\n\\n\")\n\t}\n\n\tif len(chore) > 0 {\n\t\tnotes.WriteString(\"# chores\\n\\n\")\n\t\tnotes.WriteString(strings.Join(chore, \"\\n\"))\n\t\tnotes.WriteString(\"\\n\\n\")\n\t}\n\n\tif len(other) > 0 {\n\t\tnotes.WriteString(\"# other\\n\\n\")\n\t\tnotes.WriteString(strings.Join(other, \"\\n\"))\n\t}\n\n\treturn notes.String()\n}", "func main() {\n\tfmt.Println(quote.Go())\n\n\tuseFprintf()\n}", "func main() {\n\tlog.SetFlags(0)\n\tpgs.Init(\n\t\tpgs.DebugEnv(\"DEBUG\"),\n\t).RegisterModule(\n\t\t&protoModule{ModuleBase: &pgs.ModuleBase{}},\n\t).RegisterPostProcessor(\n\t\tpgsgo.GoFmt(),\n\t).Render()\n}", "func display(v discord_version.Version) string {\n\tvar bn string\n\tif len(v.BuildNumber) == 0 {\n\t\tbn = \"unknown\"\n\t} else {\n\t\tbn = v.BuildNumber\n\t}\n\tvar date string\n\tif v.Date.IsZero() {\n\t\tdate = \"unknown date\"\n\t} else {\n\t\tdate = v.Date.String()\n\t}\n\treturn fmt.Sprintf(\"%s branches's Build ID is %s with hash of %s and released at %s\", v.Branch, bn, v.VersionHash, date)\n}", "func Print() {\n\tfmt.Printf(\"hierarchy, version %v (branch: %v, revision: %v), build date: %v, go version: %v\\n\", Version, Branch, GitSHA1, BuildDate, runtime.Version())\n}", "func main() {\n\tx = 42\n\ty = 42.34546354235234523523\n\tfmt.Println(x)\n\tfmt.Printf(\"%T\\n\", x)\n\tfmt.Println(y)\n\tfmt.Printf(\"%T\\n\", y)\n\n\tfmt.Println(z)\n\n\t// GO Operating System\n\tfmt.Println(runtime.GOOS)\n\tfmt.Println(runtime.GOARCH)\n}", "func main() {\n\tfmt.Print(\"My weight on the surface of Mars is \")\n\tfmt.Print(230.4 * 0.3783)\n\tfmt.Print(\" lbs, and I would be \")\n\tfmt.Print(63 * 365 / 687)\n\tfmt.Print(\" years old.\\n\")\n\t// Using Printf gives better control over output\n\tfmt.Printf(\"My weight on the surface of Mars is %v lbs,\", 230.4*0.3783)\n\tfmt.Printf(\" and I would be %v years old.\\n\", 63*365/687)\n\t// Using Println puts a return on the line\n\tfmt.Println(\"If used Print I would need a return character. With Println I don't\")\n\t// Multiple format verbs can be used in the with Print f\n\tfmt.Printf(\"My weight on the surface of %v is %v lbs.\\n\", \"Earth\", 154.0)\n\t// Printf can also help aligning text\n\tfmt.Printf(\"%-15v $%4v\\n\", \"SpaceX\", 94)\n\tfmt.Printf(\"%-15v $%4v\\n\", \"Virgin Galactic\", 100)\n\n}", "func main() {\n\tfmt.Println(A)\n\tfmt.Println(B)\n\tfmt.Println(C)\n}", "func Println() {\n\tfmt.Println(\"Hello world from test.go\")\n}", "func importdot(opkg *types.Pkg, pack *Node) {\n\tn := 0\n\tfor _, s := range opkg.Syms {\n\t\tif s.Def == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif !types.IsExported(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot\n\t\t\tcontinue\n\t\t}\n\t\ts1 := lookup(s.Name)\n\t\tif s1.Def != nil {\n\t\t\tpkgerror := fmt.Sprintf(\"during import %q\", opkg.Path)\n\t\t\tredeclare(lineno, s1, pkgerror)\n\t\t\tcontinue\n\t\t}\n\n\t\ts1.Def = s.Def\n\t\ts1.Block = s.Block\n\t\tif asNode(s1.Def).Name == nil {\n\t\t\tDump(\"s1def\", asNode(s1.Def))\n\t\t\tFatalf(\"missing Name\")\n\t\t}\n\t\tasNode(s1.Def).Name.Pack = pack\n\t\ts1.Origpkg = opkg\n\t\tn++\n\t}\n\n\tif n == 0 {\n\t\t// can't possibly be used - there were no symbols\n\t\tyyerrorl(pack.Pos, \"imported and not used: %q\", opkg.Path)\n\t}\n}", "func main() {\n\tvar start, stop int\n\n\tif args := os.Args[1:]; len(args) == 2 {\n\t\tstart, _ = strconv.Atoi(args[0])\n\t\tstop, _ = strconv.Atoi(args[1])\n\t}\n\n\tif stop == 0 || start == 0 {\n\t\tstop, start = 'Z', 'A'\n\t}\n\tfmt.Printf(\"%-10s %-10s %-10s %-10s\\n%s\\n\", \"literal\", \"dec\", \"hex\", \"encoded\",\n\t\tstrings.Repeat(\"-\", 45))\n\n\tfor n := start; n <= stop; n++ {\n\t\t// space denotes utf8 encoding % -12[1]x\\n\n\t\tfmt.Printf(\"%-10c %-10[1]d %-10[1]x % -12x\\n\", n, string(n))\n\t}\n}", "func printVersionAndLicense(file io.Writer) {\n\tfmt.Fprintf(file, \"vidx2pidx version %v\\n\", Version)\n\tfmt.Fprintf(file, \"%v\\n\", License)\n}", "func main() {\n\n\tfmt.Println(\"Testing Git Access\")\n\n\tfmt.Println(\"Hello World!!\")\n\n}", "func Print() string {\n\treturn \"Current version is 2.0.0\"\n}", "func PrintVersion() {\n\tfmt.Println(assets.BuildInfo)\n}", "func main() {\n\tflag.Parse()\n\tfmt.Println(\"version => \", version)\n}", "func main() {\n\tfmt.Println(\"Welcome to origourls!\")\n}", "func main(){\n\tfmt.Println(\"Welcome to Kumarsparkz Golang Library\")\n\thangmangame.Playhangman()\n}", "func main() {\n\tdemo3()\n}", "func main() {\n\tfmt.Println(\"Hola berracos!\")\n}", "func main() {\n\tf,err := os.Create(\"Grace_kid.go\")\n\tif (err != nil) {\n\t\tpanic(err)\n\t}\n\tw := bufio.NewWriter(f)\n\ts := `package main\n\nimport(\n\t\"fmt\"\n\t\"os\"\n\t\"bufio\"\n)\n\n/*\n Just a random comment wandering in this Quine\n*/\n\nfunc main() {\n\tf,err := os.Create(\"Grace_kid.go\")\n\tif (err != nil) {\n\t\tpanic(err)\n\t}\n\tw := bufio.NewWriter(f)\n\ts := %s\n\tfmt.Fprintf(w, s, \"%c\"+s+\"%c\", 96, 96, 10)\n\tw.Flush()\n}%c`\n\tfmt.Fprintf(w, s, \"`\"+s+\"`\", 96, 96, 10)\n\tw.Flush()\n}", "func (g *Generator) format() []byte {\n\tvar buf bytes.Buffer\n\tbuf.Write(g.header.Bytes())\n\tbuf.Write(g.buf.Bytes())\n\tsrc, err := format.Source(buf.Bytes())\n\tif err != nil {\n\t\t// Should never happen, but can arise when developing this code.\n\t\t// The user can compile the output to see the error.\n\t\tlog.Fatalf(\"invalid Go generated: %s\\n%s\", err, buf.Bytes())\n\t}\n\treturn src\n}", "func printLicense() {\n\tfmt.Printf(color.GreenString(\"\\n\"+appName+\" v\"+semverInfo()) + color.WhiteString(\" by \"+appDev))\n\tcolor.Set(color.FgGreen)\n\tfmt.Println(\"\\n\" + appRepository + \"\\n\" + appURL + \"\\n\")\n\n\tcolor.Set(color.FgHiWhite)\n\tfmt.Println(\"\\nMIT License\\nCopyright (c) 2020-2021 RockSteady, TurtleCoin Developers\")\n\tcolor.Set(color.FgHiBlack)\n\tfmt.Println(\"\\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\\n\\nThe above copyright notice and this permission notice shall be included in allcopies or substantial portions of the Software.\\n\\nTHE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\")\n\tfmt.Println()\n}", "func generateImports(imports []string) string {\n\tvar b bytes.Buffer\n\t// generate import of base package\n\tb.WriteString(\"import _base \\\"github.com/SphereSoftware/go-annotations/registry\\\"\\n\")\n\t// generate other imports\n\tfor i, imp := range imports {\n\t\tb.WriteString(\"import \")\n\t\tb.WriteString(genPackageAlias(i))\n\t\tb.WriteString(\" \")\n\t\tb.WriteString(strconv.Quote(imp))\n\t\tb.WriteString(\"\\n\")\n\t}\n\tb.WriteString(\"\\n\")\n\treturn b.String()\n}", "func main() {\n\tfmt.Println(\"Hello World\") // Print a line with hello world\n}", "func versionExample() string {\n\treturn \"\"\n}", "func printMan() {\n\tfmt.Println(\n\t\tman.Generate(\n\t\t\tgenUsage(),\n\t\t\tgenAbout(\"\"),\n\t\t),\n\t)\n}", "func printMan() {\n\tfmt.Println(\n\t\tman.Generate(\n\t\t\tgenUsage(),\n\t\t\tgenAbout(\"\"),\n\t\t),\n\t)\n}", "func main() {\n\tprintln(getBoo(3))\n}", "func main() {\n // The fmt package provides functions for formatting data with io streams, strings, and the console.\n fmt.Println(\"Hello,\", \"gopher!\") // Arguments will be converted to strings and concatenated together with spaces in between.\n fmt.Printf(\"Goodbye, %s.\\n\", \"gopher\") // Does what you would think if you are familiar with printf in other languages.\n\n // Go's Printf() is really friendly.\n fmt.Printf(\"%f is %s\", \"fred\")\n // Unused variables will not compile\n //var i int = 0\n}", "func Example() {\n\tprintln(main.Simple())\n\t// Output:\n\t// hello world\n}", "func Utils() string {\n\treturn \"Hi there, this is an example code file!\"\n}", "func printHeader(num int) {\n\ts := fmt.Sprint(num)\n\tfmt.Print(s)\n\tfor i := 0; i < 8-len(s); i++ {\n\t\tfmt.Print(\" \")\n\t}\n}", "func main() {\n\tprint(\"test ok! (print)\\n\")\n\tfmt.Println(\"test ok! (fmt)\")\n}", "func TestImports(t *testing.T) {\n\tt.Parallel()\n\n\ttree := writeTree(t, `\n-- a.go --\npackage a\nfunc _() {\n\tfmt.Println()\n}\n`)\n\n\twant := `\npackage a\n\nimport \"fmt\"\nfunc _() {\n\tfmt.Println()\n}\n`[1:]\n\n\t// no arguments\n\t{\n\t\tres := gopls(t, tree, \"imports\")\n\t\tres.checkExit(false)\n\t\tres.checkStderr(\"expects 1 argument\")\n\t}\n\t// default: print with imports\n\t{\n\t\tres := gopls(t, tree, \"imports\", \"a.go\")\n\t\tres.checkExit(true)\n\t\tif res.stdout != want {\n\t\t\tt.Errorf(\"format: got <<%s>>, want <<%s>>\", res.stdout, want)\n\t\t}\n\t}\n\t// -diff: show a unified diff\n\t{\n\t\tres := gopls(t, tree, \"imports\", \"-diff\", \"a.go\")\n\t\tres.checkExit(true)\n\t\tres.checkStdout(regexp.QuoteMeta(`+import \"fmt\"`))\n\t}\n\t// -write: update file\n\t{\n\t\tres := gopls(t, tree, \"imports\", \"-write\", \"a.go\")\n\t\tres.checkExit(true)\n\t\tcheckContent(t, filepath.Join(tree, \"a.go\"), want)\n\t}\n}", "func printBanner() {\n\tfmt.Fprintf(os.Stderr, \"biblint %s (c) 2017-2018 Carl Kingsford. See LICENSE.txt.\\n\", version)\n}", "func Haha1() {\r\n\tfmt.Print(\"Ha ha ebitut\\n\")\r\n}", "func main() {\n\tbasics(i)\n\tbitShifting()\n\tarrayAndSlices()\n\tstructs()\n\tmaps()\n\tswitchCases()\n\tlooping()\n\tloopingOverCollections()\n\tpointers()\n\tfunctions()\n\tinterfaces()\n\tgoRoutines()\n\tchannelBasics()\n\tloggerExample()\n}", "func main() {\n\tparseFlags()\n\n\tfmt.Printf(\"\\033[32mIpe %s@%s (built: %s)\\033[0m\\n\", version, githash, buildtime)\n\tfmt.Println(\"\\033[32mhttps://github.com/dimiro1/ipe\\033[0m\")\n\n\tif isShowVersion {\n\t\treturn\n\t}\n\n\tprintBanner()\n\n\tipe.Start(configFilename)\n}", "func main() {\n\tfmt.Println(str)\n}", "func (g *Generator) format() []byte {\n\t//DEBUG: fmt.Print(g.Buf.String())\n\tsrc, err := format.Source(g.Buf.Bytes())\n\tif err != nil {\n\t\t// Should never happen, but can arise when developing this code.\n\t\t// The user can compile the output to see the error.\n\t\tlog.Printf(\"warning: internal error: invalid Go generated: %s\", err)\n\t\tlog.Printf(\"warning: compile the package to analyze the error\")\n\t\treturn g.Buf.Bytes()\n\t}\n\treturn src\n}", "func main() {\n\tpp.Println(\"=========================================\")\n\tpp.Println()\n\tpp.Println()\n\tpp.Println(\"=========================================\")\n\tpp.Println()\n\tpp.Println()\n\tpp.Println(\"=========================================\")\n\tpp.Println()\n\tpp.Println()\n\tpp.Println(\"=========================================\")\n\tpp.Println()\n\tpp.Println()\n\tpp.Println(\"=========================================\")\n}", "func main() {\n\tpp.Println(\"=========================================\")\n\tpp.Println()\n\tpp.Println()\n\tpp.Println(\"=========================================\")\n\tpp.Println()\n\tpp.Println()\n\tpp.Println(\"=========================================\")\n\tpp.Println()\n\tpp.Println()\n\tpp.Println(\"=========================================\")\n\tpp.Println()\n\tpp.Println()\n\tpp.Println(\"=========================================\")\n}", "func main() {\n\turl := \"https://www8.cao.go.jp/chosei/shukujitsu/syukujitsu.csv\"\n\timporter := jptime.NewHTTPCSVImporter(url)\n\tresp, err := importer.Import()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\treader := csv.NewReader(transform.NewReader(resp.Body, japanese.ShiftJIS.NewDecoder()))\n\tholidays, err := jptime.Parse(reader)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdst, err := os.Create(\"../../holidays2.go\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\texporter := jptime.NewGoFileExporter(dst)\n\tif err := exporter.Export(holidays); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func ExampleGist() {\n\tx := NewGist(\"<your token here>\")\n\tfmt.Printf(\"%v\", x)\n}", "func PrintUsageExamples() {\n\tprintln(\"Examples:\")\n\tprintln()\n\tprintln(\" vanieth -n 3 'ABC'\")\n\tprintln(\" Find 3 addresses that have `ABC` at the beginning.\")\n\tprintln()\n\tprintln(\" vanieth -t 5 'ABC'\")\n\tprintln(\" Find as many address that have `ABC` at the beginning as possible within 5 seconds.\")\n\tprintln()\n\tprintln(\" vanieth -c 'ABC'\")\n\tprintln(\" Find any address that has `ABC` at the beginning of any of the first 10 contract addresses.\")\n\tprintln()\n\tprintln(\" vanieth -cd1 '00+AB'\")\n\tprintln(\" Find any address that has `AB` after 2 or more `0` chars in the first contract address.\")\n\tprintln()\n\tprintln(\" vanieth '.*ABC'\")\n\tprintln(\" Find a single address that contains `ABC` anywhere.\")\n\tprintln()\n\tprintln(\" vanieth '.*DEF$'\")\n\tprintln(\" Find a single address that contains `DEF` at the end.\")\n\tprintln()\n\tprintln(\" vanieth -i 'A.*A$'\")\n\tprintln(\" Find a single address that contains either `A` or `a` at both the start and end.\")\n\tprintln()\n\tprintln(\" vanieth -ld1 '.*ABC'\")\n\tprintln(\" Find a single address that contains `ABC` anywhere, and also list the first contract address.\")\n\tprintln()\n\tprintln(\" vanieth -ld5 --key=0x349fbc254ff918305ae51967acc1e17cfbd1b7c7e84ef8fa670b26f3be6146ba\")\n\tprintln(\" List the details and first five contract address for the supplied private key.\")\n\tprintln()\n\tprintln(\" vanieth -l --scan=0x950024ae4d9934c65c9fd04249e0f383910d27f2\")\n\tprintln(\" Show the first 10 contract addresses of the supplied address.\")\n\tprintln()\n}", "func testPackages() {\n\tarrayOfNumbers := []int{2 ,6, 9, 8, 20, 15, 3, 5, 6, 9}\n\tsort.Ints(arrayOfNumbers)\n min := arrayOfNumbers[0]\n max := arrayOfNumbers[len(arrayOfNumbers)-1]\n fmt.Println(\"\\nBuilt in functions:\")\n fmt.Println(\"Min = \", min)\n fmt.Println(\"Max = \", max)\n fmt.Print(arrayOfNumbers)\n}", "func main() {\n\t// TODO:\n\tfmt.Println(\"NOT IMPLEMENTED\")\n}", "func main() {\n\tfmt.Println(math.Pi)\n}" ]
[ "0.62175906", "0.6216425", "0.58148956", "0.5741799", "0.565565", "0.5636288", "0.5596986", "0.55647355", "0.5549735", "0.55264395", "0.550503", "0.549206", "0.545041", "0.54121274", "0.54022735", "0.53764904", "0.5369192", "0.53366244", "0.53361833", "0.53281826", "0.5312524", "0.52912265", "0.5266675", "0.52621573", "0.52579343", "0.52527046", "0.5249653", "0.52374554", "0.5235062", "0.52181196", "0.51983386", "0.51830864", "0.5178489", "0.5178394", "0.51716197", "0.51712066", "0.5170919", "0.5168574", "0.51664907", "0.5149699", "0.51468307", "0.5142352", "0.5140218", "0.51333654", "0.5129394", "0.51252097", "0.51197904", "0.511514", "0.51148933", "0.5113547", "0.51102847", "0.5102186", "0.50891006", "0.5088525", "0.5080102", "0.5062342", "0.5060242", "0.50598526", "0.5051021", "0.5038258", "0.5037077", "0.5035267", "0.50309026", "0.50205743", "0.5008689", "0.500711", "0.49997672", "0.49977902", "0.49928164", "0.49905708", "0.49785143", "0.49755508", "0.4975036", "0.49745342", "0.49573594", "0.495541", "0.49550465", "0.4948355", "0.49319896", "0.49319896", "0.49317572", "0.492797", "0.4927509", "0.4923781", "0.49193254", "0.4916764", "0.49166715", "0.49118343", "0.49115494", "0.4906787", "0.489962", "0.4884548", "0.4882267", "0.48715213", "0.48715213", "0.48697534", "0.48626244", "0.48611388", "0.4859444", "0.48540583", "0.4851865" ]
0.0
-1
Sending information into channel. If we create large group the program will close before all information is sent. Using just 10 sends
func foo(c chan<- int) { // high := 100 // for i := 0; i < high; i++ { // c <- i // } c <- 10 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func sendAll(count int, c chan int64) {\n\tfor i :=0; i < count; i++ {\n\t\tc <- 1\n\t}\n}", "func send(c chan int) {\n\tc <- 100\n\tfmt.Println(\"Sending data completed\")\n}", "func (c *connection) sendLoop() {\n\tc.group.Add(1)\n\tvar id int\n\tfor msg := range c.out {\n\t\ttime.Sleep(0)\n\t\tid = int(msg[0])\n\t\tif id == c.myId {\n\t\t\tc.in <- msg\n\t\t} else {\n\t\t\tif id >= len(c.peers) {\n\t\t\t\tgo func() {\n\t\t\t\t\ttime.Sleep(time.Millisecond * 500)\n\t\t\t\t\tc.out <- msg\n\t\t\t\t}()\n\t\t\t} else {\n\t\t\t\tmsg[0] = 1\n\n\t\t\t\twrite(c.peers[id].conn, msg)\n\t\t\t}\n\t\t}\n\t}\n\tc.running = false\n\tc.group.Done()\n\tc.group.Wait()\n\tclose(c.in)\n}", "func (rf *Raft) send() {\n\n\tfor !rf.killed() {\n\n\t\trf.mu.Lock()\n\t\tif rf.IsLeader() && rf.canSend {\n\t\t\trf.canSend = false\n\t\t\trf.broadcast()\n\t\t}\n\t\trf.mu.Unlock()\n\n\t\ttime.Sleep(time.Millisecond * 5)\n\t}\n}", "func main() {\n\tflag.Parse()\n\taddr, err := net.ResolveUDPAddr(\"udp\", *host+\":\"+*port)\n\tif err != nil {\n\t\tfmt.Println(\"Can't resolve address: \", err)\n\t\tos.Exit(1)\n\t}\n\twaitGroup := sync.WaitGroup{}\n\tfor i := 0; i < *number; i++ {\n\t\t// waitGroup.Add(i)\n\t\t// fmt.Printf(\"Send gourp %v\\n\", i)\n\t\t// go func(i int) {\n\t\t// \tfor j := 0; j < 1000; j++ {\n\t\t// \t\tsend(i, j, addr)\n\t\t// \t}\n\t\t// \twaitGroup.Done()\n\t\t// }(i)\n\t\tfmt.Printf(\"Sending %v\\n\", i)\n\t\tsend(i, 0, addr)\n\t\ttime.Sleep(time.Second)\n\t}\n\twaitGroup.Wait()\n}", "func write(chnl chan<- int){\n\t\n\tfor i := 0; i < 10; i++ {\n\t\tchnl <- i\n\t\tfmt.Println(\"writer : \", i)\n\t\ttime.Sleep(time.Second )\n\t}\n\n\tclose(chnl)\n}", "func wsChanSend() {\n\tlog.Println(\"wschan running...\")\n\ti := 1\n\tfor {\n\t\t// send stuff to clients\n\t\t// TODO: solve multiple clients connecting\n\t\twsChan <- \"test: \" + strconv.Itoa(i)\n\t\ti++\n\t}\n}", "func send(channel chan <- string, data, prefix string) {\n\t// <-channel // \"It'll not work\"\n\n\tfor _, symbol := range data {\n\t\tfmt.Printf(writeFormat, prefix, string(symbol))\n\t\tchannel <- string(symbol)\n\t}\n}", "func sendInfo(channel chan<- string, info string) {\n\tchannel <- info\n}", "func ChMessageSend(textChannelID, message string) {\n\tfor i := 0; i < 10; i++ {\n\t\t_, err := dg.ChannelMessageSend(textChannelID, message)\n\t\tif err != nil {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n}", "func (c clientImpl) Send(msgs ...Message) (int, error) {\r\n\tvar k int\r\n\tvar msg Message\r\n\tfor k, msg = range msgs {\r\n\t\terr := c.Write(msg)\r\n\t\tif err != nil {\r\n\t\t\treturn k - 1, err\r\n\t\t}\r\n\t}\r\n\treturn k, nil\r\n}", "func (p *Process) send() {\n\t// Only send on every fourth iteration\n\tshouldSend := p.timestamp%4 == 0\n\tif !shouldSend {\n\t\treturn\n\t}\n\n\tp.timestamp++\n\n\tp.outChan <- p.timestamp\n\tfmt.Printf(\"%d: sent value %d\\n\", p.pid, p.timestamp)\n}", "func (c Channel) Send(cmd wit.Command, clientIDs ...string) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\tfor _, clientID := range clientIDs {\n\t\tif cmap, ok := c.clients[clientID]; ok {\n\t\t\tfor _, op := range cmap {\n\t\t\t\tselect {\n\t\t\t\tcase <-op.Done():\n\t\t\t\tcase op.CmdCh() <- cmd:\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (q *TestGroupQueue) Send(ctx context.Context, receivers chan<- *configpb.TestGroup, frequency time.Duration) error {\n\tch := make(chan string)\n\tvar err error\n\tgo func() {\n\t\terr = q.Queue.Send(ctx, ch, frequency)\n\t\tclose(ch)\n\t}()\n\n\tfor who := range ch {\n\t\tq.lock.RLock()\n\t\ttg := q.groups[who]\n\t\tq.lock.RUnlock()\n\t\tif tg == nil {\n\t\t\tcontinue\n\t\t}\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tcase receivers <- tg:\n\t\t}\n\t}\n\treturn err\n}", "func (s *session) send(buf []byte) error {\n\tselect {\n\tcase s.clientActivityC <- true:\n\tdefault:\n\t}\n\n\t_, err := s.backendConn.Write(buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.scheduler.IncrementTx(*s.backend, uint(len(buf)))\n\n\tif s.maxRequests > 0 {\n\t\tif atomic.AddUint64(&s._sentRequests, 1) >= s.maxRequests {\n\t\t\ts.stop()\n\t\t}\n\t}\n\n\treturn nil\n}", "func send(c chan ocr2keepers.BlockKey, k ocr2keepers.BlockKey) {\n\tselect {\n\tcase c <- k:\n\tdefault:\n\t}\n}", "func (tester TestEventScheduler) sendEvents(numEvents int, port string, waitGroup *sync.WaitGroup) {\n\tconn, _ := net.Dial(\"tcp\", \"localhost:\"+port)\n\twriter := bufio.NewWriter(conn)\n\tfor i:=numEvents; i >=1; i-- {\n \t\twriter.WriteString(strconv.Itoa(i)+\"|B\\n\")\n \twriter.Flush()\n\t}\n waitGroup.Done()\n}", "func Send(node *noise.Node, from kad.ID, code byte, data []byte, minBucketID int, maxBucketID int, seqNum byte, incrementSeqNum bool) {\n\terrChan := make(chan error)\n\t// TODO: maybe do a self node lookup here\n\tpeers, prefixLens := kad.Table(node).GetBroadcastPeers(minBucketID, maxBucketID)\n\tfor i, id := range peers {\n\t\t// fmt.Println(\"Peers ID: \", id)\n\t\tmsg := NewMessage(from, prefixLens[i], code, data)\n\t\t// If incrementSeqNum is true, then seqNum is ignored and broadcastSeqNum is used and incremented instead. incrementSeqNum should only be set to true when Send is Send is called by the \"from\" node (i.e. not an intermediate broadcast node).\n\t\tif incrementSeqNum {\n\t\t\tmsg.ChangeSeqNum(broadcastSeqNum)\n\t\t} else {\n\t\t\tmsg.ChangeSeqNum(seqNum)\n\t\t}\n\t\tgo broadcastThroughPeer(node, id.(kad.ID), msg, errChan)\n\t}\n\tif incrementSeqNum {\n\t\tbroadcastSeqNum++\n\t}\n\n\tnumPeers := uint32(len(peers))\n\tresponseCount := uint32(0)\n\tfor atomic.LoadUint32(&responseCount) < numPeers {\n\t\tselect {\n\t\tcase err := <-errChan:\n\t\t\tif err != nil {\n\t\t\t\tlog.Warn().Err(err)\n\t\t\t}\n\t\t\tatomic.AddUint32(&responseCount, 1)\n\t\t}\n\t}\n}", "func main() {\r\n\r\n\t//Creating a Buffered channel of size 3, holding any type of values\r\n\tc := make(chan interface{}, 3)\r\n\tfmt.Println(c)\r\n\r\n\t//Creating a Structure\r\n\ttype stud struct{\r\n\r\n\t\tName string\r\n\t}\r\n\t//Sender Go Routine(Anonymous) to send an integer, two structure objects and a string\r\n\t//If the size exceeds 3, for every item after 3rd, once an item is sniffed, nnext item will be sent\r\n\tgo func(){\r\n\r\n\t\tc <- 1\r\n\t\tc <- stud{\"Kailash\"}\r\n\t\tc <- stud{\"SK7\"}\r\n\t\tc <- \"End of Channel items\"\r\n\r\n\t\t//\"close\" is used to indicate end of items in channel when iterating over items in buffered channel\r\n\t\t//If not used, leads to deadlock, as iterator will forever be expecting items in channel\r\n\t\tclose(c)\r\n\t}()\r\n\r\n\t//When using 2 or more Go Routines, give a sleep time of 1 second after every Go Routine to compensate for concurrency\r\n\ttime.Sleep(time.Second * 1)\r\n\r\n\t//Sniff Go Routine(Anonymous) to receive the channel items\r\n\tgo func(){\r\n\r\n\t\tfor i := range c{\r\n\r\n\t\t\tfmt.Println(i)\r\n\t\t}\r\n\t}()\r\n\r\n\ttime.Sleep(time.Second * 1)\r\n}", "func sendInt(c chan <- int){\n\tfor i:=1; i <= 5; i++ {\n\t\tc <- i\n\t}\n\tclose(c)\n}", "func (nc *NetClient) send() {\n\terrored := false\n\tfor {\n\t\tselect {\n\t\tcase protoMessage := <-nc.Composer.sendChan:\n\t\t\terr := nc.ReadWriteCloser.Write(protoMessage)\n\t\t\tif !errored && err != nil {\n\t\t\t\terrored = true\n\t\t\t\tlog.Println(\"Error writing to server:\", err)\n\t\t\t\tgo nc.Disconnect()\n\t\t\t} else if errored {\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase <-nc.pingServer:\n\t\t\t// query for the root message\n\t\t\troot, _ := nc.Archive.Root()\n\t\t\tgo nc.Composer.Query(root)\n\t\t\tgo nc.Composer.AskWho()\n\t\tcase <-nc.stopSending:\n\t\t\treturn\n\t\t}\n\t}\n}", "func producer(wg *sync.WaitGroup, ch chan int) {\r\n\ttick := time.Tick(time.Second)\r\n\tfor i := 0; i < 7; i++ {\r\n\t\t<-tick\r\n\t\tfmt.Println(\"\\nPushing to Stack(Producer): \", i)\r\n\t\tpush(i)\r\n\t\t//print()\r\n\t\tch <- 1\r\n\t\t//time.Sleep(1 * time.Second)\r\n\t}\r\n\tclose(ch)\r\n\twg.Done()\r\n}", "func (c *Channel) SendLoop() {\n\tfor msg := range c.Messages {\n\t\tif !c.Alive {\n\t\t\treturn\n\t\t}\n\t\tc.LogF(\"Sending msg `%s` to `%d` members in `%s`\", msg, len(c.Members), c.GetName())\n\t\tfor _, v := range c.Members {\n\t\t\tv.Write(msg)\n\t\t}\n\t}\n}", "func (s PushAllSender) Send(sender, data string, counter int) (int, error) {\n\tvar (\n\t\terr error\n\t\tpushers []pusherLib.Pusher\n\t\ttotal int\n\t\tfrom = 0\n\t\tsize = 10\n\t\tquery = make(map[string]interface{})\n\t\tworkdata map[string]string\n\t)\n\tif err = json.Unmarshal([]byte(data), &workdata); err != nil {\n\t\tlog.Printf(\"json.Unmarshal() failed (%s)\", err)\n\t\treturn 0, nil\n\t}\n\n\tif tag, ok := workdata[\"tag\"]; ok && len(tag) > 0 {\n\t\tquery[\"conjuncts\"] = []interface{}{\n\t\t\tmap[string]string{\"query\": \"tags:\" + tag},\n\t\t\tmap[string]string{\"query\": \"senders:\" + sender},\n\t\t}\n\t} else {\n\t\tquery[\"query\"] = \"senders:\" + sender\n\t}\n\n\tq, _ := json.Marshal(query)\n\n\tapi := s.w.GetAPI()\n\tif total, pushers, err = api.SearchPusher(string(q), from, size); err != nil {\n\t\treturn 10 * counter, nil\n\t}\n\ts.pushs(sender, pushers, workdata[\"data\"])\n\tfor from = size; from < total; from = from + size {\n\t\t_, pushers, _ = api.SearchPusher(string(q), from, size)\n\t\ts.pushs(sender, pushers, workdata[\"data\"])\n\t}\n\treturn 0, nil\n}", "func SendWorker(ch chan RemoteCommandMessage, broadlink broadlinkrm.Broadlink, wg *sync.WaitGroup) {\n\tfor msg := range ch {\n\t\tfor _, cmd := range msg.commands {\n\t\t\tswitch cmd.commandType {\n\t\t\tcase SendCommand:\n\t\t\t\terr := broadlink.Execute(cmd.target, cmd.data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"Error executing command: %v\", err)\n\t\t\t\t}\n\t\t\tcase Pause:\n\t\t\t\tinterval, err := strconv.Atoi(cmd.data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"Error processing pause interval (%v): %v\", cmd.data, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\ttime.Sleep(time.Duration(interval) * time.Millisecond)\n\t\t\tcase shutdown:\n\t\t\t\twg.Done()\n\t\t\t\tlog.Print(\"SendWorker terminated\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (h *hub) run() {\n for {\n select{\n case s := <- h.register:\n // fmt.Println(\"wild client has appeared in the brush!\")\n clients := h.channels[s.channel]\n if clients == nil {\n clients = make(map[*client]bool)\n h.channels[s.channel] = clients\n }\n h.channels[s.channel][s.client] = true\n //send the latest data for room (empty string if new room)\n //s.client.send <- []byte(contents[s.channel])\n case s := <- h.unregister:\n clients := h.channels[s.channel]\n if clients != nil {\n if _, ok := clients[s.client]; ok{\n delete(clients, s.client)\n close(s.client.send)\n if len(clients) == 0 {\n delete(h.channels, s.channel)\n if len(contents[s.channel]) != 0 {\n //delete contents for channel if no more clients using it.\n delete(contents, s.channel)\n }\n }\n }\n }\n case m := <- h.broadcast:\n clients := h.channels[m.channel]\n // fmt.Println(\"broadcasting message to \", clients, \"data is: \", string(m.data))\n for c := range clients {\n fmt.Println(\"broadcasting message to \", c, \"data is: \", string(m.data))\n select {\n case c.send <- m.data:\n contents[m.channel] = string(m.data)\n default:\n close(c.send)\n delete(clients, c)\n if len(clients) == 0 {\n delete(h.channels, m.channel)\n if len(contents[m.channel]) != 0 {\n //delete contents for channel if no more clients using it.\n delete(contents, m.channel)\n }\n }\n }\n }\n }\n }\n}", "func SendMany(number int, pkt Generator) error {\n\tfor i := 0; i < number; i++ {\n\t\tif err := pkt.Send(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c Client) Send(bucket string, aggCnt int64, metrics ...Metric) {\n\tif !c.opened {\n\t\treturn\n\t}\n\tif aggCnt == 0 {\n\t\treturn // nothing aggregated\n\t}\n\tsgl := smm.NewSGL(int64(msize))\n\tdefer sgl.Free()\n\n\tbucket = strings.ReplaceAll(bucket, \":\", \"_\")\n\tfor _, m := range metrics {\n\t\tc.appendMetric(m, sgl, bucket, aggCnt)\n\t}\n\tif sgl.Len() > 0 {\n\t\tbytes := sgl.Bytes()\n\t\tmsize = cos.Max(msize, len(bytes))\n\t\t_, err := c.conn.Write(bytes)\n\t\tif err != nil {\n\t\t\tif cnt := errcnt.Inc(); cnt > maxNumErrs {\n\t\t\t\tglog.Errorf(\"Sending to StatsD failed: %v (%d)\", err, cnt)\n\t\t\t\tc.conn.Close()\n\t\t\t\tc.opened = false\n\t\t\t}\n\t\t}\n\t}\n}", "func send_msg_per_time(number int, duration int) {\n\tsleep := time.Duration(int64(float64(duration) / float64(number) * 1000)) * time.Millisecond\n\n\tvar count = 0\n\n\tfor count < number {\n\t\tcount++\n\t\tfmt.Printf(\"Message number %s of %s (%s)\\n\", strconv.Itoa(count), strconv.Itoa(number), time.Now().Format(\"15:04:05.00000\"))\n\t\ttime.Sleep(sleep)\n\t}\n}", "func (sock *Server) sendroutine() {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-sock.kill:\n\t\t\t\treturn\n\t\t\tcase smsg := <-sock.sendqueue:\n\t\t\t\tif smsg != \"\" {\n\t\t\t\t\tio.WriteString(sock.conn, smsg)\n\t\t\t\t\tdebug(\"<- \", smsg)\n\t\t\t\t\ttime.Sleep(sock.throttle)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}", "func (c *client) send() {\nLoop:\n\tfor {\n\t\tmsg, err := readInput(c.Conn, \"\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tif msg == \"/quit\" {\n\t\t\tc.close()\n\t\t\tlog.Printf(\"%v has left..\", c.Name)\n\t\t\tbreak Loop\n\t\t}\n\n\t\t// Check if the message is a command.\n\t\tif c.command(msg) {\n\t\t\t// If the client is currently not connected to a room\n\t\t\tif c.Room == \"\"{\n\t\t\t\tlog.Printf(\"Error: \" + c.Name + \" is not in a room\")\n\t\t\t\twriteFormattedMsg(c.Conn, \"You are currently not in a room, please join a room.\")\n\t\t\t}else {\n\t\t\t\t// Client is connected to a room and the input is a regular message.\n\t\t\t\tlog.Printf(\"send: msg: %v from: %s\", msg, c.Name)\n\t\t\t\tsend := time.Now().Format(customTime) + \" * (\" + c.Name + \"): \\\"\" + msg + \"\\\"\"\n\n\t\t\t\t// Add message to the chat rooms history.\n\t\t\t\troomList[c.Room].history = append(roomList[c.Room].history, send)\n\n\t\t\t\t// Send out message to all clients connected to the current room.\n\t\t\t\tfor _, v := range roomList {\n\t\t\t\t\tfor k := range v.members {\n\t\t\t\t\t\tif k == c.Conn.RemoteAddr().String() {\n\t\t\t\t\t\t\tv.messages <- send\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func send(enc json.Encoder){\n\tif testing {log.Println(\"send\")}\n\tmsg:=new(Message)\n\tfor {\n\t\tmessage:= <-output\n\t\twhatever:=strings.Split(message,\"*\")\n\t\tmsg.Username=myName\n\t\tif message==\"disconnect\"{\n\t\t\tmsg.Kind=\"DISCONNECT\"\n\t\t\tenc.Encode(msg)\n\t\t\tbreak\n\t\t} else if len(whatever)>1 {\n\t\t\tmsg.Kind=\"PRIVATE\"\n\t\t\tmsg.Receiver=whatever[1]\n\t\t\tmsg.MSG=whatever[0]\n\t\t} else {\n\t\t\tmsg.Kind=\"PUBLIC\"\n\t\t\tmsg.MSG=whatever[0]\n\t\t}\n\t\tenc.Encode(msg)\n\t}\n\tos.Exit(1)\n}", "func (b *Benchttp) sendRequests() {\n\tdefer b.wg.Wait()\n\tfor n := uint64(0); (b.targetNumber == 0 || b.targetNumber > n) && !b.isDurationOver(); n++ {\n\t\tb.wg.Add(1)\n\t\tgo b.sendOne(<-b.idleClients)\n\t}\n}", "func (c *ControlConsumer) Send(message *ControlMessage) error {\n\n\tfor _, channel := range c.Channels {\n\t\tchannel <- message\n\t}\n\n\treturn nil\n}", "func (c app) sendLoop() {\n\tfor {\n\t\tif b, open := <-c.out; !open {\n\t\t\tInfo(\"Send loop closed\")\n\t\t\tbreak\n\t\t} else {\n\t\t\tc.Connection.Send(b)\n\t\t}\n\t}\n}", "func (p *kping) send(index int, addrBatchChan chan addrBatch) {\n\tstime := time.Now()\n\t// create ICMP Echo packet\n\tt := make([]byte, p.size)\n\tb := icmp.Echo{ID: icmpIDSeqInitNum + index, Data: t}\n\tm := icmp.Message{\n\t\tType: ipv4.ICMPTypeEcho,\n\t\tCode: 0,\n\t\tBody: &b,\n\t}\n\t// message cache\n\twms := make([]message, 0, p.sendOpts.BatchSize)\nL:\n\tfor {\n\t\tvar ab addrBatch\n\t\tvar ok bool\n\t\tselect {\n\t\tcase <-p.context.Done():\n\t\t\tbreak L // send timeout\n\t\tcase ab, ok = <-addrBatchChan:\n\t\t\tif !ok {\n\t\t\t\tbreak L // send done\n\t\t\t}\n\t\t}\n\t\t// get lock, at most one sent goroutine working\n\t\tp.sendLock.Lock()\n\t\tstime2 := time.Now()\n\t\tb.Seq = icmpIDSeqInitNum + ab.seq\n\t\t// fill icmp payload with current timestamp\n\t\tnsec := time.Now().UnixNano()\n\t\tfor i := uint64(0); i < uint64(p.size); i++ {\n\t\t\tif i < timeSliceLength {\n\t\t\t\tt[i] = byte((nsec >> ((7 - i) * timeSliceLength)) & 0xff)\n\t\t\t} else {\n\t\t\t\tt[i] = 1\n\t\t\t}\n\t\t}\n\t\tbytes, _ := (&m).Marshal(nil)\n\t\t// reuse message cache\n\t\twms2 := wms[0:0:len(ab.addrs)]\n\t\tfor _, addr := range ab.addrs {\n\t\t\tmsg := message{\n\t\t\t\tBuffers: [][]byte{bytes},\n\t\t\t\tAddr: addr,\n\t\t\t}\n\t\t\twms2 = append(wms2, msg)\n\t\t}\n\t\tvar num int\n\t\tvar err error\n\t\tfor {\n\t\t\t// blocking write multi messages\n\t\t\tnum, err = p.rawConn.writeBatch(wms2, 0)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"kping send: %d(%d), seq: %d, writeBatch failed: %v\\n\", index, p.sendOpts.Parallel, ab.seq, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif num != len(wms2) {\n\t\t\tfmt.Fprintf(os.Stderr, \"kping send: %d(%d), seq: %d, writeBatch parted: %d(%d)\\n\", index, p.sendOpts.Parallel, ab.seq, len(wms2), num)\n\t\t}\n\t\tdurTime := time.Since(stime2)\n\t\tif durTime > 50*time.Millisecond {\n\t\t\tfmt.Fprintf(os.Stderr, \"kping send: %d(%d), seq: %d, writeBatch %d(%d), usedTime: %s\\n\", index, p.sendOpts.Parallel, ab.seq, len(wms2), num, durTime)\n\t\t}\n\t\tfor _, msg := range wms2[0:num] {\n\t\t\taddr := msg.Addr.String()\n\t\t\tdurTime := time.Since(stime2)\n\t\t\tp.ipEventChan <- &ipEvent{\n\t\t\t\tip: addr,\n\t\t\t\tseq: b.Seq,\n\t\t\t\tsendDuration: durTime,\n\t\t\t}\n\t\t}\n\t\t// wait a little time\n\t\ttime.Sleep(p.sendOpts.WaitTimeout)\n\t\tp.sendLock.Unlock()\n\t}\n\tfmt.Fprintf(os.Stderr, \"kping send: %d(%d) done, usedTime: %s\\n\", index, p.sendOpts.Parallel, time.Since(stime))\n}", "func routineBuffer(){\n\tchannel := make(chan int,4)//make channel and add buffer\n\twg.Add(2)\n\t//receive from channel\n\tgo func(channel <- chan int){\n\t\t//loop throug the channel\n\t\tfor i := range channel{\n\t\t\tfmt.Println(i)\n\t\t}\n\t\twg.Done()\n\t}(channel)\n\t//sending to channel\n\tgo func(channel chan <- int){\n\t\tchannel <- 100\n\t\tchannel <- 200\n\t\tclose(channel) //after sending close the channel \n\t\twg.Done()\n\t}(channel)\n\twg.Wait()\n}", "func (c *Client) Send(ch <-chan *Message) error {\n\tfor msg := range ch {\n\t\t_, err := io.WriteString(c.conn, msg.Content)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (c *Channel) sendToChannel(msg Message) {\n\tfor _, user := range c.users {\n\t\tuser.ChannelOut() <- fmt.Sprintf(\"%s: %s\", msg.client.Nickname()+msg.msg)\n\t}\n}", "func (c Chan) Send() {\n\tc <- Empty\n}", "func (bili *BiliClient) sendJoinChannel(channelID int) error {\n\tbili.uid = rand.Intn(2000000000) + 1000000000\n\tbody := fmt.Sprintf(\"{\\\"roomid\\\":%d,\\\"uid\\\":%d}\", channelID, bili.uid)\n\treturn bili.sendSocketData(0, 16, bili.protocolVersion, 7, 1, body)\n}", "func (c *Client) Send(sender, receiver, message string) ([]string, error) {\n\tc.muconn.Lock()\n\tdefer c.muconn.Unlock()\n\n\tmsgType := getMessageType(message)\n\tmsgParts := getMessageParts(message)\n\trefNum := rand.Intn(maxRefNum)\n\tids := make([]string, len(msgParts))\n\tc.rateLimiter.SetLimit(rate.Limit(c.GetTps()))\n\tfor i := 0; i < len(msgParts); i++ {\n\t\tsendPacket := encodeMessage(c.nextRefNum(), sender, receiver, msgParts[i], msgType,\n\t\t\tc.GetBillingID(), refNum, i+1, len(msgParts))\n\t\tc.rateLimiter.Wait(context.Background())\n\t\tc.Printf(\"sendPacket: %q\\n\", sendPacket)\n\t\tif _, err := c.writer.Write(sendPacket); err != nil {\n\t\t\tc.Printf(\"error writing sendPacket: %v\\n\", err)\n\t\t\treturn ids, err\n\t\t}\n\t\tif err := c.writer.Flush(); err != nil {\n\t\t\tc.Printf(\"error flushing sendPacket: %v\\n\", err)\n\t\t\treturn ids, err\n\t\t}\n\t\tselect {\n\t\tcase fields := <-c.submitSmRespCh:\n\t\t\tack := fields[ackIndex]\n\t\t\tif ack == negativeAck {\n\t\t\t\terrMsg := fields[len(fields)-errMsgOffset]\n\t\t\t\terrCode := fields[len(fields)-errCodeOffset]\n\t\t\t\tc.Printf(\"negative ack, errMsg: %v errCode: %v\\n\", errMsg, errCode)\n\t\t\t\treturn ids, &UcpError{errCode, errMsg}\n\t\t\t}\n\t\t\tid := fields[submitSmIdIndex]\n\t\t\tids[i] = id\n\t\tcase <-time.After(c.timeout):\n\t\t\tc.Printf(\"send timeout\\n\")\n\t\t\treturn ids, &UcpError{errCodeTimeout, \"Network time-out\"}\n\t\t}\n\t}\n\treturn ids, nil\n}", "func sendLoop() {\n\tif currentWebsocket == nil {\n\t\tcolorLog(\"[INFO] BW: No connection, wait for it.\\n\")\n\t\tcmd := <-connectChannel\n\t\tif \"QUIT\" == cmd.Action {\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor {\n\t\tnext, ok := <-toBrowserChannel\n\t\tif !ok {\n\t\t\tcolorLog(\"[WARN] BW: Send channel was closed.\\n\")\n\t\t\tbreak\n\t\t}\n\n\t\tif \"QUIT\" == next.Action {\n\t\t\tbreak\n\t\t}\n\n\t\tif currentWebsocket == nil {\n\t\t\tcolorLog(\"[INFO] BW: No connection, wait for it.\\n\")\n\t\t\tcmd := <-connectChannel\n\t\t\tif \"QUIT\" == cmd.Action {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\twebsocket.JSON.Send(currentWebsocket, &next)\n\t\tcolorLog(\"[SUCC] BW: Sent %v.\\n\", next)\n\t}\n\n\tcolorLog(\"[WARN] BW: Exit send loop.\\n\")\n}", "func send(conn net.Conn, format string, v ...interface{}) {\n\tdebug(\"> \"+format, v...)\n\n\tfmt.Fprintf(conn, format+\"\\r\\n\", v...)\n\ttime.Sleep(700 * time.Millisecond) // Wait a bit so we don't flood\n}", "func (h *Hub) SendAll(messageType int, data []byte) {\n\tlogger.Info.Println(\"message delivered to all:\", string(data), \"message type:\", messageType)\n\tfor client, _ := range h.Clients {\n\t\tclient.WriteMessage(messageType, data)\n\t}\n}", "func sendBothWays(){\n\tch := make(chan int)\n\twg.Add(2)\n\t//receiving and sending to channel\n\tgo func(){\n\t\tval := <- ch\n\t\tfmt.Println(val)\n\t\tch <- 44\n\t\twg.Done()\n\t}()\n\t//sending to channel and receiving from channel\n\tgo func(){\n\t\tch <- 50\n\t\tfmt.Println(<- ch)\n\t\twg.Done()\n\t}()\n\twg.Wait()\n}", "func (gs *GRPCClient) Send(uid string, d []byte) error {\n\treturn constants.ErrNotImplemented\n}", "func (c *InfluxClient) sendData() {\n\tfor c.connected {\n\t\tselect {\n\t\tcase <-StopCtx.Done():\n\t\t\tglog.Info(\"cancelled, disconnecting from influx\")\n\t\t\tc.Close()\n\t\t\treturn\n\t\tcase bp := <-c.bpoints:\n\t\t\tif len(bp.Points()) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Debug2f(\"%s - start sending bp to influx\", bp.reqID)\n\t\t\tstart := time.Now()\n\t\t\tfor i := 0; i <= c.WriteRetries; i++ {\n\t\t\t\t// total write attempts is at worst WriteRetries+1\n\t\t\t\tif i > 0 {\n\t\t\t\t\ttime.Sleep(time.Duration(1<<uint(i-1)) * time.Second)\n\t\t\t\t}\n\t\t\t\tlog.Debug2f(\"%s - try #%d/%d: writing to influx\", bp.reqID, i+1, c.WriteRetries+1)\n\t\t\t\tif err := c.Write(bp); err != nil {\n\t\t\t\t\tlog.Errorf(\"%s - bp len %d, try #%d/%d: influx write: %v\", bp.reqID, len(bp.Points()), i+1, c.WriteRetries+1, err)\n\t\t\t\t\tif strings.Contains(err.Error(), \"partial write\") {\n\t\t\t\t\t\tglog.Warningf(\">> %s - partial write err, res=%+v\", bp.reqID, bp.res)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tlog.Debug2f(\"%s - try #%d/%d: influx write done in %dms\", bp.reqID, i+1, c.WriteRetries+1, time.Since(start)/time.Millisecond)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}", "func (conn *Connection) sendGroupInformation(value interface{}, wg *sync.WaitGroup) {\n\tdefer func() {\n\t\twg.Done()\n\t\tutils.CatchPanic(\"connection.go sendGroupInformation()\")\n\t}()\n\tconn.SendInformation(value)\n}", "func (z *zfsctl) Send(ctx context.Context, name, options string, i string) *execute {\n\targs := []string{\"send\"}\n\tif len(options) > 0 {\n\t\targs = append(args, options)\n\t}\n\tif len(i) > 0 {\n\t\targs = append(args, \"-i \"+i)\n\t}\n\targs = append(args, name)\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func send(t *testing.T, port, iMsg int) {\n\tdefer wg.Done()\n\tmsg := []byte(strconv.Itoa(iMsg))\n\taddress := fmt.Sprintf(\":%v\", port)\n\tif err := udpConnPool.Send(address, msg); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func generate(ch chan int){\n\tfor i:=2;;i++{\n\t\t//Send a number\n\t\tch <-i\n\t}\n}", "func main(){\n\t\n\tfor i:=0;i<100;i++{\n\t\twg.Add(1)\n\t\tgo createClientAndSend(i)\t\n\t}\n\twg.Wait()\n\tdur := time.Since(start)\n\tlog.Println(\"------ 100 clients send a message cost time:\",dur)\n\tlog.Println(\"------ average client:\",dur/100)\n}", "func Send(SendChan chan MessageRawTask) {\n\tfor {\n\t\tsendTask := <-SendChan\n\t\tlog.Printf(\"send message task: %v\\n\", sendTask)\n\n\t\ttarget, exist := connect.Pool.GetPlayer(sendTask.Target)\n\t\tif !exist {\n\t\t\tlog.Println(\"Cannot find player connection: \", sendTask.Target)\n\t\t\treturn\n\t\t}\n\n\t\t_, err := (*target.Conn).Write([]byte(sendTask.Msg))\n\t\tif err != nil {\n\t\t\tlog.Println(\"Send message failed: \", err)\n\t\t}\n\t}\n}", "func SendToChannel(msg interface{}, code TaskCode) error {\n\tinitOnce.Do(func() {\n\t\tCh = make(chan map[TaskCode][]byte, 10)\n\t})\n\n\tbytes, err := json.Marshal(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tmapData := make(map[TaskCode][]byte)\n\tmapData[code] = bytes\n\tCh <- mapData\n\treturn nil\n}", "func (r *Raft) send(serverId int, msg interface{}) {\n\tport := r.ClusterConfigObj.Servers[serverId].LogPort\n\t//added to reduce conns!\n\tconn := r.getSenderConn(serverId)\n\tif conn == nil {\n\t\tconn = r.makeConnection(port)\n\t\tif conn == nil {\n\t\t\treturn //returns cz it will retry making connection in next HB\n\t\t} else {\n\t\t\tr.setSenderConn(serverId, conn)\n\t\t}\n\t}\n\t//if conn has been closed by server(but still present in my map) or it crashed(so this conn is meaningless now),\n\t//encode will throw error, then set the conn map as nil for this\n\terr := r.EncodeInterface(conn, msg)\n\tif err != 0 {\n\t\tr.setSenderConn(serverId, nil)\n\t}\n\n}", "func (si ServerInstance) SendAll(message Message) {\n\t\n\tsi.clientsMutex.Lock()\n\t\n\tfor id, conn := range si.Clients {\n\t\tencoder := gob.NewEncoder(conn)\n\t\tif err := encoder.Encode(message); err != nil {\n\t\t\tfmt.Printf(\n\t\t\t\t\"Server Error (message encoding ): encoding message = [%v] for sending to client with id = [%v], error = [%v]\\n\",\n\t\t\t\tmessage, id, err)\n\t\t}\n\t}\n\t\n\tsi.clientsMutex.Unlock()\n}", "func SendToQueue(comm communication.Communication, message []byte, queue string){\r\n for {\r\n if err := comm.Send(message,queue); err != nil {\r\n\r\n for{\r\n err = comm.Connect()\r\n if err == nil{\r\n break\r\n }\r\n // Sleep for a random time before trying again\r\n time.Sleep(time.Duration(rand.Intn(10))*time.Second)\r\n }\r\n }else{\r\n break\r\n }\r\n }\r\n}", "func main() {\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n ch := make(chan int)\n go Print(ch, &wg)\n for i := 1; i <= 1100; i++ {\n ch <- i\n }\n close(ch)\n wg.Wait()\n\n\n}", "func (s *Session) send() {\n\treadFrames := func() ([]sendReady, error) {\n\t\tvar frs []sendReady\n\t\tfor len(s.sendCh) > 0 {\n\t\t\tframe := <-s.sendCh\n\t\t\tfrs = append(frs, frame)\n\t\t}\n\t\tif len(frs) == 0 {\n\t\t\tselect {\n\t\t\tcase frame := <-s.sendCh:\n\t\t\t\tfrs = append(frs, frame)\n\t\t\tcase <-s.shutdownCh:\n\t\t\t\treturn frs, ErrSessionShutdown\n\t\t\t}\n\t\t}\n\t\treturn frs, nil\n\t}\n\tfor !s.IsShutdown() {\n\t\tfrs, err := readFrames()\n\t\tvar wbuffers net.Buffers\n\t\tif nil == err {\n\t\t\tfor _, frame := range frs {\n\t\t\t\twbuffers, err = encodeFrameToBuffers(wbuffers, frame.F, s.cryptoContext)\n\t\t\t\t//err = writeFrame(s.connWriter, frame.F, s.cryptoContext)\n\t\t\t\t//putBytesToPool(frame.F)\n\t\t\t\tif nil != err {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(wbuffers) > 0 {\n\t\t\t_, err = wbuffers.WriteTo(s.connWriter)\n\t\t}\n\t\tfor _, frame := range frs {\n\t\t\t//putBytesToPool(frame.F)\n\t\t\tif nil != frame.Err {\n\t\t\t\tasyncSendErr(frame.Err, err)\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\tif err != ErrSessionShutdown {\n\t\t\t\tlog.Printf(\"[ERR] pmux: Failed to write frames: %v\", err)\n\t\t\t}\n\t\t\ts.exitErr(err)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (c *Client) send(stream gpb.GNMI_SubscribeServer) {\n\tfor {\n\t\tif err := c.processQueue(stream); err != nil {\n\t\t\tlog.Errorf(\"Client %s error: %v\", c, err)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (c *Client) writePump() {\n\tticker := time.NewTicker(pingPeriod)\n\tdefer func() {\n\t\tticker.Stop()\n\t\tfmt.Println(\"Closing writePump\")\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase message, ok := <-c.send:\n\t\t\tif !c.isAlive {\n\t\t\t\tfmt.Printf(\"Channel closed\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tc.conn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tif !ok {\n\t\t\t\t// The hub closed the channel.\n\t\t\t\tc.conn.WriteMessage(websocket.CloseMessage, []byte{})\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw, err := c.conn.NextWriter(websocket.TextMessage)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.Write(message)\n\n\t\t\t// Add queued chat messages to the current websocket message.\n\t\t\tn := len(c.send)\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\tw.Write(newline)\n\t\t\t\tw.Write(<-c.send)\n\t\t\t}\n\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\tc.conn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tif err := c.conn.WriteMessage(websocket.PingMessage, nil); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func sendTestMetrics(store []Metric, flushChannel chan Metric, finishChannel chan int) {\n\tflushTicker := time.Tick(*flushTime)\n\n\tfor {\n\t\tselect {\n\t\tcase <-flushTicker:\n\t\t\tfor _, metric := range store {\n\t\t\t\tflushChannel <- metric\n\t\t\t}\n\t\tcase <-finishChannel:\n\t\t\treturn\n\t\t}\n\t}\n\n}", "func (c *ManetConnection) Send(bytes []byte) {\n\tfor neighbor := range myNeighbors {\n\t\tif dropManetChance() {\n\t\t\tcontinue\n\t\t}\n\t\traddr := ToUDPAddr(neighbor)\n\t\tif _, err := c.conn.WriteToUDP(bytes, raddr); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func (pp *PacketParser) send(sentence []byte, received time.Time) {\n\tcurrent := uint32(len(pp.async))\n\tif current > pp.maxInChan {\n\t\tpp.maxInChan = current\n\t}\n\tpp.async <- sendSentence{\n\t\treceived: received,\n\t\ttext: sentence,\n\t}\n}", "func (c *Client) Send(cmd string, args ...interface{}) error {\n\tif err := c.conn.Send(cmd, args...); err != nil {\n\t\tc.Close()\n\t\treturn err\n\t}\n\tc.Pipeline.Send++\n\treturn nil\n}", "func (c *Client) writePump() {\n\tticker := time.NewTicker(pingPeriod)\n\tdefer func() {\n\t\tticker.Stop()\n\t\tc.conn.Close()\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase message, ok := <-c.send:\n\t\t\tc.conn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tif !ok {\n\t\t\t\t// The hub closed the channel.\n\t\t\t\tc.conn.WriteMessage(websocket.CloseMessage, []byte{})\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw, err := c.conn.NextWriter(websocket.TextMessage)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.Write(message)\n\n\t\t\t//// Add queued chat messages to the current websocket message.\n\t\t\t//n := len(c.send)\n\t\t\t//for i := 0; i < n; i++ {\n\t\t\t//\tw.Write(newline)\n\t\t\t//\tw.Write(<-c.send)\n\t\t\t//}\n\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\tc.conn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tif err := c.conn.WriteMessage(websocket.PingMessage, nil); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (this *InfluxdbBackend) Send(items []*cmodel.MetricValue) {\n\tfor _, item := range items {\n\t\tmyItem := item\n\t\tmyItem.Timestamp = item.Timestamp\n\n\t\tisSuccess := this.queue.PushFront(myItem)\n\n\t\t// statistics\n\t\tif !isSuccess {\n\t\t\tthis.dropCounter.Incr()\n\t\t}\n\t}\n}", "func count(thing string, c chan string) {\n\tfor i := 1; i <= 5; i++ {\n\t\tmsg := thing + \" \" + strconv.Itoa(i)\n\n\t\t// Pass a msg into the channel\n\t\tc <- msg\n\n\t\ttime.Sleep(time.Millisecond * 500)\n\t}\n\n\t/*\n\t\tWe need to close the channel because once the execution of this\n\t\tfunction is finished, we expect that the reciever goroutine should not\n\t\twait for any new messages from the channel.\n\n\t\tREMEMBER: The reciever must never close() the channel because on the\n\t\treciever end, we do not know whether the channel is finished sending\n\t\tall of its messages. Calling close() in reciever will prematurely\n\t\tclose the channel however the sender may still try to send messages\n\t\twhich will result in a panic.\n\t*/\n\tclose(c)\n}", "func (r *room) send(data interface{}, without proto.UID) {\n\tpeers := r.getPeers()\n\tfor id, p := range peers {\n\t\tif len(without) > 0 && id != without {\n\t\t\tif err := p.send(data); err != nil {\n\t\t\t\tlog.Errorf(\"send data to peer(%s) error: %v\", p.uid, err)\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *Client) writeRoutine() {\n\tticker := time.NewTicker(pingPeriod)\n\tdefer func() {\n\t\tticker.Stop()\n\t\tc.conn.Close()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase message, ok := <-c.send:\n\t\t\tc.conn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tif !ok {\n\t\t\t\t// The hub closed the channel.\n\t\t\t\tc.conn.WriteMessage(ws.CloseMessage, []byte{})\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw, err := c.conn.NextWriter(ws.TextMessage)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tmsg, err := json.Marshal(message)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"error:\", err)\n\t\t\t}\n\n\t\t\tw.Write(msg)\n\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\tc.conn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tif err := c.conn.WriteMessage(ws.PingMessage, nil); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *Connector) Send(data []byte, cnt int) error {\n\treturn nil\n}", "func write(text string, channel chan string) {\n\ttime.Sleep(time.Second * 5)\n\tfor i := 0; i < 5; i++ {\n\t\t// this is the line that sends a value to the channel\n\t\tchannel <- text\n\t\ttime.Sleep(time.Second)\n\t}\n\n\t// we can use close() to close connections\n}", "func send(w http.ResponseWriter, ch chan int){\r\n\tfor {\r\n\t\tselect {\r\n\t\tcase value := <-ch:\r\n\t\t\tif (value == 1){\r\n\t\t\t\tw.Write([]byte(\"hello http1\"))\r\n\t\t\t\tif f, ok := w.(http.Flusher); ok {\r\n\t\t\t\t\tf.Flush()\r\n\t\t\t\t}\r\n\r\n\t\t\t} else {\r\n\t\t\t\tw.Write([]byte(\"hello http2\"))\r\n\t\t\t\tif f, ok := w.(http.Flusher); ok {\r\n\t\t\t\t\tf.Flush()\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n}", "func (d device) deviceSend(interval int, ch chan<- string) {\n\t// check if device exists\n\tif getResp, err := d.getDevice(); err == nil {\n\t\tif getResp.StatusCode == 404 {\n\t\t\t// create the device because it does not exist\n\t\t\tif createResp, err := d.createDevice(); err == nil {\n\t\t\t\t// device was created\n\t\t\t\tch <- fmt.Sprint(\"Device created with response code \", createResp.StatusCode)\n\t\t\t} else {\n\t\t\t\t// there was an error creating the device\n\t\t\t\tch <- fmt.Sprintf(\"Could not create device %s. Error %v\", d.Name, err)\n\t\t\t}\n\n\t\t}\n\t} else {\n\t\t// there was an error calling getDevice\n\t\tch <- fmt.Sprintf(\"Could not get device %s. Error %v\", d.Name, err)\n\t}\n\n\tfor {\n\t\ttemperature := 20.0 + rand.Float64()*10\n\t\thumidity := 40.0 + rand.Float64()*10\n\t\tmessage := devicemessage{Temperature: temperature, Humidity: humidity}\n\n\t\t_, err := d.sendData(message)\n\t\tif err == nil {\n\t\t\tch <- fmt.Sprintf(\"Sent message from %s\", d.Name)\n\t\t} else {\n\t\t\t// there was a send error\n\t\t\tch <- fmt.Sprintf(\"Error sending message from %s. Error %v\", d.Name, err)\n\t\t}\n\n\t\ttime.Sleep(time.Duration(interval) * time.Second)\n\t}\n\n}", "func (gossiper *Gossiper) sendPacket() {\n\tfor packet := range gossiper.ToSend {\n\t\tbytes, err := protobuf.Encode(packet.GossipPacket)\n\t\tif util.CheckAndPrintError(err) {\n\t\t\tcontinue\n\t\t}\n\t\tgossiper.GossipServer.WriteToUDP(bytes, packet.Address)\n\t}\n}", "func (c Client) Send(bucket string, metrics ...Metric) {\n\tif !c.opened {\n\t\treturn\n\t}\n\n\tvar (\n\t\tt, prefix string\n\t\tpacket bytes.Buffer\n\t)\n\n\t// NOTE: \":\" is not allowed since it will be treated as value eg. in case daemonID is in form NUMBER:NUMBER\n\tbucket = strings.Replace(bucket, \":\", \"_\", -1)\n\n\tfor _, m := range metrics {\n\t\tswitch m.Type {\n\t\tcase Timer:\n\t\t\tt = \"ms\"\n\t\tcase Counter:\n\t\t\tt = \"c\"\n\t\tcase Gauge:\n\t\t\tt = \"g\"\n\t\tcase PersistentCounter:\n\t\t\tprefix = \"+\"\n\t\t\tt = \"g\"\n\t\tdefault:\n\t\t\tcmn.AssertMsg(false, fmt.Sprintf(\"Unknown type %+v\", m.Type))\n\t\t}\n\n\t\tif packet.Len() > 0 {\n\t\t\tpacket.WriteRune('\\n')\n\t\t}\n\t\tfmt.Fprintf(&packet, \"%s.%s.%s:%s%v|%s\", c.prefix, bucket, m.Name, prefix, m.Value, t)\n\t}\n\n\tif packet.Len() > 0 {\n\t\tc.conn.Write(packet.Bytes())\n\t}\n}", "func Produce(id string, n int, ch chan<- string, wg *sync.WaitGroup) {\n\tfor i := 0; i < n; i++ {\n\t\tRandomSleep(100) // Simulate time to produce data.\n\t\tch <- id + \":\" + strconv.Itoa(i)\n\t}\n\n\twg.Done()\n\n}", "func (d *Dao) SendWechatToGroup(c context.Context, chatid, msg string) (err error) {\n\tvar (\n\t\tnum int\n\t\treq *http.Request\n\t\tb = &bytes.Buffer{}\n\t\turl = _sagaWechatURL + \"/appchat/send\"\n\t\tbody = &ut.WechatGroupMsg{\n\t\t\tChatID: chatid,\n\t\t\tMsgType: \"text\",\n\t\t\tSafe: 0,\n\t\t}\n\t)\n\tmsgBlock := strings.Split(msg, \"\\n\")\n\tif len(msgBlock)%40 == 0 {\n\t\tnum = len(msgBlock)/40 - 1\n\t} else {\n\t\tnum = len(msgBlock) / 40\n\t}\n\tfor i := 0; i <= num; i++ {\n\t\tvar (\n\t\t\tres struct {\n\t\t\t\tCode int `json:\"code\"`\n\t\t\t\tMessage string `json:\"message\"`\n\t\t\t}\n\t\t)\n\t\tstart, end := 40*i, 40*(i+1)\n\t\tif end > len(msgBlock) {\n\t\t\tend = len(msgBlock)\n\t\t}\n\t\tbody.Text = &ut.TextContent{\n\t\t\tContent: strings.Join(msgBlock[start:end], \"\\n\") + fmt.Sprintf(\"\\n(%d/%d)\", i+1, num+1),\n\t\t}\n\t\tif err = json.NewEncoder(b).Encode(body); err != nil {\n\t\t\tlog.Error(\"apmSvc.SendWechatToGroup Error(%v)\", err)\n\t\t\treturn\n\t\t}\n\t\tif req, err = http.NewRequest(http.MethodPost, url, b); err != nil {\n\t\t\tlog.Error(\"apmSvc.SendWechatToGroup Error(%v)\", err)\n\t\t\treturn\n\t\t}\n\t\tif err = d.client.Do(c, req, &res); err != nil {\n\t\t\tlog.Error(\"apmSvc.SendWechatToGroup Error(%v)\", err)\n\t\t\treturn\n\t\t}\n\t\tif res.Code != 0 {\n\t\t\terr = fmt.Errorf(\"Http response Code(%v)!=0\", res.Code)\n\t\t\tlog.Error(\"apmSvc.SendWechatToGroup Error(%v)\", err)\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}", "func (c *Client) writePump(g ghess.Board) {\n\tticker := time.NewTicker(pingPeriod)\n\tdefer func() {\n\t\tticker.Stop()\n\t\tc.conn.Close()\n\t}()\n\tvar feedback string // For sending info to client\n\tfor {\n\t\tselect {\n\t\tcase message, ok := <-c.send:\n\t\t\tif !ok {\n\t\t\t\t// The hub closed the channel.\n\t\t\t\tc.write(websocket.CloseMessage, []byte{})\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tc.conn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tw, err := c.conn.NextWriter(websocket.TextMessage)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// read json from message\n\t\t\tmsg := inCome{}\n\t\t\tjson.Unmarshal([]byte(message), &msg)\n\t\t\tswitch msg.Type {\n\t\t\tcase \"move\":\n\t\t\t\tmv := &outGo{}\n\t\t\t\terr = g.ParseStand(msg.Origin,\n\t\t\t\t\tmsg.Destination)\n\t\t\t\tfen := g.Position()\n\t\t\t\tinfo := g.Stats()\n\t\t\t\tcheck, _ := strconv.ParseBool(info[\"check\"])\n\t\t\t\tcheckmate, _ := strconv.ParseBool(\n\t\t\t\t\tinfo[\"checkmate\"])\n\t\t\t\tif check {\n\t\t\t\t\tfeedback = \"Check!\"\n\t\t\t\t} else if checkmate {\n\t\t\t\t\tfeedback = \"Checkmate!\"\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tmv = &outGo{\n\t\t\t\t\t\tType: \"move\",\n\t\t\t\t\t\tPosition: fen,\n\t\t\t\t\t\tError: err.Error(),\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tmv = &outGo{\n\t\t\t\t\t\tType: \"move\",\n\t\t\t\t\t\tPosition: fen,\n\t\t\t\t\t\tError: feedback,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfeedback = \"\"\n\t\t\t\t// Marshal into json response\n\t\t\t\tj, _ := json.Marshal(mv)\n\t\t\t\t// Update the DB\n\t\t\t\terr := db.Update(func(tx *bolt.Tx) error {\n\t\t\t\t\tbucket := tx.Bucket([]byte(\"challenges\"))\n\t\t\t\t\terr = bucket.Put([]byte(msg.Id), []byte(fen))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t}\n\t\t\t\t// Write Message to Clien\n\t\t\t\tw.Write([]byte(j))\n\t\t\tcase \"message\":\n\t\t\t\tchat := &outGo{\n\t\t\t\t\tType: \"message\",\n\t\t\t\t\tMessage: msg.Message,\n\t\t\t\t}\n\t\t\t\tj, _ := json.Marshal(chat)\n\t\t\t\tw.Write([]byte(j))\n\t\t\tcase \"connection\":\n\t\t\t\t// Should this be put elsewhere?\n\t\t\t\tchat := &outGo{\n\t\t\t\t\tType: \"connection\",\n\t\t\t\t\tMessage: msg.Message,\n\t\t\t\t}\n\t\t\t\tj, _ := json.Marshal(chat)\n\t\t\t\tw.Write([]byte(j))\n\t\t\t}\n\n\t\t\t// Close the writer\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\tif err := c.write(websocket.PingMessage,\n\t\t\t\t[]byte{}); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (p packet) send() {\n\tfor i, tr := range p.transactions {\n\t\ttr.resp <- p.replies[i]\n\t\tif tr.closechan {\n\t\t\tclose(tr.resp)\n\t\t}\n\t}\n}", "func send(channel chan bool, sender int, receiver int, command bool, loyal bool) {\n\tif loyal == false && receiver%2 == 0 {\n\t\t// Traitor general sending to an even-valued general flips the command.\n\t\tchannel <- !command\n\t} else {\n\t\tchannel <- command\n\t}\n}", "func (c *Client) Write(ctx context.Context, chain *Chain) {\n\tticker := time.NewTicker(c.pingPeriod)\n\tdefer func() {\n\t\tticker.Stop()\n\t\tc.conn.Close()\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase message, ok := <-c.send:\n\t\t\tc.conn.SetWriteDeadline(time.Now().Add(c.writeWait))\n\t\t\tif !ok {\n\t\t\t\t// The hub closed the channel.\n\t\t\t\tc.conn.WriteMessage(websocket.CloseMessage, []byte{})\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw, err := c.conn.NextWriter(websocket.TextMessage)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.Write(message)\n\n\t\t\t// Add queued chat messages to the current websocket message.\n\t\t\tn := len(c.send)\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\t//w.Write(newline)\n\t\t\t\tw.Write(<-c.send)\n\t\t\t}\n\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\tc.conn.SetWriteDeadline(time.Now().Add(c.writeWait))\n\t\t\tif err := c.conn.WriteMessage(websocket.PingMessage, nil); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func main() {\n\tsize := 3\n\n\tvar buffChan = make(chan int, size)\n\n\t// reader\n\tgo func(){\n\t\tfor {\n\t\t\t_ = <- buffChan\n\t\t\ttime.Sleep(time.Second * 3)\n\t\t}\n\t}()\n\n\t// writer\n\twriter := func() {\n\t\tfor i := 1 ; i <= 10; i++ {\n\t\t\tbuffChan <- i\n\t\t\tprintln(i)\n\t\t}\n\t}\n\n\twriter()\n}", "func (c *Client) writePump() {\n\tc.log.Trace(\"writePump\")\n\tticker := time.NewTicker(c.hub.options.PingPeriod)\n\tdefer func() {\n\t\tc.log.Trace(\"writePump closed\")\n\t\tif err := recover(); err != nil {\n\t\t\tif er, ok := err.(error); ok {\n\t\t\t\tc.log.Error(\"writePump error. \" + er.Error())\n\t\t\t}\n\t\t\tdebug.PrintStack()\n\t\t}\n\t\tticker.Stop()\n\t\tc.conn.Close()\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase message, ok := <-c.send:\n\t\t\tc.conn.SetWriteDeadline(time.Now().Add(c.hub.options.WriteTimeout))\n\t\t\tif !ok {\n\t\t\t\t// The hub closed the channel.\n\t\t\t\tc.conn.WriteMessage(websocket.CloseMessage, []byte{})\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw, err := c.conn.NextWriter(websocket.TextMessage)\n\t\t\tif err != nil {\n\t\t\t\tc.log.Error(\"writePump - get NextWriter error:\" + err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t\t_, err = w.Write(message)\n\t\t\tif err != nil {\n\t\t\t\tc.log.Error(\"writePump - write msg error:\" + err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// Add queued chat messages to the current websocket message.\n\t\t\t// n := len(c.send)\n\t\t\t// for i := 0; i < n; i++ {\n\t\t\t// \t// w.Write(newline)\n\t\t\t// \t_, err = w.Write(<-c.send)\n\t\t\t// \tif err != nil {\n\t\t\t// \t\tc.log.Error(\"writePump - write msg error:\" + err.Error())\n\t\t\t// \t}\n\t\t\t// }\n\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\tc.log.Error(\"writePump - writer close failed. error:\" + err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\tc.conn.SetWriteDeadline(time.Now().Add(c.hub.options.WriteTimeout))\n\t\t\tif err := c.conn.WriteMessage(websocket.PingMessage, nil); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (nd *NodeDiscover) sendHeartbeat() {\n\t// Manage wait group → use for closing UDP connection\n\tdefer nd.connWG.Done()\n\n\t// Send loop\n\tfor {\n\t\tselect {\n\t\tcase <- nd.closed:\n\t\t\tlog.Debug(\"node-discovery: sendHeartbeat routine finished\")\n\t\t\treturn\n\t\tcase <- nd.sendTicker.C:\n\t\t\t// Create heartbeat message\n\t\t\tmsg := nd.generateHeartbeat()\n\n\t\t\t// Serialize it\n\t\t\tdata, err := msgpack.Marshal(msg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warnf(\"node-discovery: Fail to encode heartbeat: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Sent id\n\t\t\t_, err2 := nd.conn.WriteTo(data, nd.addr)\n\t\t\tif err2 != nil {\n\t\t\t\tlog.Warnf(\"node-discovery: Fail to sent packet: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Debugf(\"node-discovery: heartbeat sent with following services: %v\", msg.Services)\n\t\t}\n\t}\n}", "func sendOrdersWhenComeback(netCh config.NetworkChannels, elevatorMap map[int]*config.Elevator, comebackElev string, senderElev int, cabOrdersBackup map[string][config.NUM_FLOORS]bool){\n comebackElevInt,_ := strconv.Atoi(comebackElev)\n order := config.Order{}\n order.Sender_elev_ID = senderElev\n order.Sender_elev_rank = elevatorMap[senderElev].ElevRank\n order.Should_add = true\n order.Receiver_elev = comebackElevInt\n\n for i := 0; i < config.NUM_FLOORS; i++{\n if cabOrdersBackup[comebackElev][i]{\n \torder.Floor = i\n \torder.ButtonType = elevio.BT_Cab\n \tfor i:=0;i<config.NUM_PACKETS_SENT;i++{\n \tnetCh.TransmittOrderCh <- order\n } \n }\n for j := elevio.BT_HallUp; j != elevio.BT_Cab; j++{\n if elevatorMap[senderElev].HallOrders[i][j]{\n order.Floor = i\n order.ButtonType = j\n for i:=0;i<config.NUM_PACKETS_SENT;i++{\n \tnetCh.TransmittOrderCh <- order\n } \n }\n }\n }\n}", "func SendHeartbeat(){\n\n\tfor{\n\t/*\t\n\t\trandNum := rand.Intn(100) \n\t\tif randNum > 97 && r.id == r.clusterConfig.LeaderId { \n\t\t\t//r.clusterConfig.Servers[r.id].isLeader=2\t\t\t//break ThisLoop \n\t\t\ttime.Sleep(100 * time.Second)\n\t\t\t}\n\t*/\n\t\tselect{\n\t\t\t\n\t\t\tcase <-raft.C1:\n\t\t\t\t//log.Println(\"in send SendHeartbeat-Append\")\n\t\t\t\n\t\t\tcase <-raft.C2:\n\t\t\t\t//log.Println(\"in send SendHeartbeat-commit\")\n\t\t\t\n\t\t\tcase <-time.After(100*time.Millisecond):\n\t\t\t\tif r.clusterConfig.Servers[r.id].isLeader == 1 {\n\t\t\t\t\tfor i:=0; i<N; i++ {\n\t\t\t\t\t\t\tif i == r.id { continue }\t\t\t\t\n\t\t\t\t\t\t\targs := &HeartbeatRPCArgs{r.id,r.currentTerm}\t\t\t\t\n\t\t\t\t\t\t\tvar reply string\t\t\t\t\n\t\t\t\t\t\t\tvar err error = nil\n\t\t\t\t\t\t\trr := make(chan error, 1)\n\t\t\t\t\t\t\tgo func() { rr <- r.clusterConfig.Servers[i].Client.Call(\"RPC.HeartbeatRPC\", args, &reply) } ()\n\t\t\t\t\t\t\tselect{\n\t\t\t\t\t\t\t\tcase err = <-rr:\n\t\t\t\t\t\t\t\t\tif err != nil {\t\n\t\t\t\t\t\t\t\t\t\tlog.Println(\"[Server] HeartbeatRPC Error:\", err) \n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tcase <-time.After(20*time.Millisecond):\n\t\t\t\t\t\t\t\t//\tlog.Println(\"Heartbeat reply not got \",i)\n\t\t\t\t\t\t\t\t\tcontinue //log.Println(\"Heartbeat reply not got \",i)\n\t\t\t\t\t\t\t}// inner select loop\n\t\t\t\t\t}//end of inner for \n\t\t\t\t}//end of if\n\t\t}//end of select\n\t}//end of for loop\n}", "func main() {\n\tch := make(chan int)\n\tgo Print(ch)\n\tfor i := 1; i <= 11; i++ {\n\t\tch <- i\n\t}\n\tclose(ch)\n\n}", "func (transporter *IPCTransporter) Send(channel string, data interface{}) {\n\tswitch channel {\n\tcase \"status\":\n\t\tstatus, ok := data.(structures.Status)\n\t\tif ok {\n\t\t\ttransporter.SendJSON(IPCMessage{\n\t\t\t\tChannel: \"axm:monitor\",\n\t\t\t\tPayload: status.Process[0].AxmMonitor,\n\t\t\t})\n\t\t\tfor _, action := range status.Process[0].AxmActions {\n\t\t\t\ttransporter.SendJSON(IPCMessage{\n\t\t\t\t\tChannel: \"axm:action\",\n\t\t\t\t\tPayload: action,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\tdefault:\n\t\ttransporter.SendJSON(IPCMessage{\n\t\t\tChannel: channel,\n\t\t\tPayload: data,\n\t\t})\n\t}\n}", "func (batcher *MessageBatcher) sendLoop() {\n\tmessagesToBeAggregated := make([]*SendResult, 0, batcher.maxBuffer)\n\tvar messagesToSend [][]byte\n\tmessagesToSend = make([][]byte, 0, batcher.maxBuffer)\n\ttotalMessages := 0\n\trunning := true\n\tvar request *SendResult\n\n\tvar sleepTimer = time.NewTimer(retry_backoff_interval)\n\tvar closeDoneChannel chan interface{}\n\tfor running {\n\t\t// Blocking wait for the first message\n\t\tvar ok bool\n\t\tselect {\n\t\tcase closeDoneChannel = <-batcher.closeChannel:\n\t\tcase request, ok = <-batcher.pendingChannel:\n\t\t\tif ok {\n\t\t\t\t// We have a new request\n\t\t\t\tif batcher.debug {\n\t\t\t\t\tlog.Printf(\"Batcher receieved message - starting accumulation\\n\", request, ok)\n\t\t\t\t}\n\t\t\t\tmessagesToBeAggregated = append(messagesToBeAggregated, request)\n\t\t\t\ttotalMessages++\n\t\t\t\tgathering := true\n\t\t\t\t// Now pull as many requests as possible. When there are none left or we have reached our limit, send them.\n\t\t\t\tfor gathering {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase request, ok = <-batcher.pendingChannel:\n\t\t\t\t\t\tif ok {\n\t\t\t\t\t\t\t// We have additional requests, queue them.\n\t\t\t\t\t\t\tmessagesToBeAggregated = append(messagesToBeAggregated, request)\n\t\t\t\t\t\t\ttotalMessages++\n\t\t\t\t\t\t\tif totalMessages >= batcher.maxBuffer {\n\t\t\t\t\t\t\t\tgathering = false\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tgathering = false\n\t\t\t\t\t\t}\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tgathering = false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif batcher.debug {\n\t\t\t\t\tlog.Printf(\"Batcher accumulation completed with %v messages\\n\", len(messagesToBeAggregated))\n\t\t\t\t}\n\n\t\t\t\t// Build the messages to send.\n\t\t\t\tfor _, request := range messagesToBeAggregated {\n\t\t\t\t\tmessagesToSend = append(messagesToSend, request.msg)\n\t\t\t\t}\n\n\t\t\t\tretry := true\n\t\t\t\tretryAttempts := 0\n\t\t\t\tvar err error\n\t\t\t\tvar ids []int64\n\n\t\t\t\tfor retry {\n\t\t\t\t\tids, err = batcher.client.SendMessages(batcher.topic, messagesToSend, batcher.waitForCommit)\n\t\t\t\t\t// Cancel the retry if we don't have a transitory error\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tretry = false\n\t\t\t\t\t} else if err != ErrNoNodesAvailable {\n\t\t\t\t\t\tretry = false\n\t\t\t\t\t}\n\t\t\t\t\t// If we have a retry, go to sleep for a while\n\t\t\t\t\tif retry {\n\t\t\t\t\t\tretryAttempts++\n\t\t\t\t\t\t// Check whether we have retried sufficiently.\n\t\t\t\t\t\tif batcher.maxRetries != UNLIMITED_RETRIES && retryAttempts > batcher.maxRetries {\n\t\t\t\t\t\t\tretry = false\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tduration := time.Duration(retryAttempts) * retry_backoff_interval\n\t\t\t\t\t\t\tif duration > retry_backoff_limit {\n\t\t\t\t\t\t\t\tduration = retry_backoff_limit\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tsleepTimer.Reset(duration)\n\t\t\t\t\t\t\t// Try sleeping\n\t\t\t\t\t\t\tselect {\n\t\t\t\t\t\t\tcase closeDoneChannel = <-batcher.closeChannel:\n\t\t\t\t\t\t\t\tif batcher.debug {\n\t\t\t\t\t\t\t\t\tlog.Printf(\"MessageBatcher closed while sleeping for retry - aborting.\\n\")\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t// Set maxRetries to zero to avoid any further retry attempts during our shutdown\n\t\t\t\t\t\t\t\tbatcher.maxRetries = 0\n\t\t\t\t\t\t\t\terr = ErrMessageBatcherClosed\n\t\t\t\t\t\t\t\tretry = false\n\t\t\t\t\t\t\tcase <-sleepTimer.C:\n\t\t\t\t\t\t\t\tif batcher.debug {\n\t\t\t\t\t\t\t\t\tlog.Printf(\"MessageBatcher retrying after %v sleeping - attempt %v.\\n\", duration, retryAttempts)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Now update all senders with the results.\n\t\t\t\tfor i, request := range messagesToBeAggregated {\n\t\t\t\t\t// Only send the results to those that have provided a channel\n\t\t\t\t\tif request.resultChan != nil {\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\trequest.index = 0\n\t\t\t\t\t\t\trequest.err = err\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\trequest.index = ids[i]\n\t\t\t\t\t\t\trequest.err = nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\trequest.resultChan <- request\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// If we have an empty channel, keep the object in the pool for future async use\n\t\t\t\t\t\trequest.clean()\n\t\t\t\t\t\tbatcher.asyncSendResultPool.Put(request)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tmessagesToBeAggregated = messagesToBeAggregated[:0]\n\t\t\t\tmessagesToSend = messagesToSend[:0]\n\t\t\t\ttotalMessages = 0\n\t\t\t} else {\n\t\t\t\tif closeDoneChannel != nil {\n\t\t\t\t\t// Queue is empty and closed, time to quit\n\t\t\t\t\trunning = false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tcloseDoneChannel <- struct{}{}\n}", "func ws_SendMsg(ws *websocket.Conn, send_channel SendChannel) {\n\tfor {\n\t\tselect {\n\t\tcase send_msg := <-send_channel.containers:\n\t\t\tlog.Printf(\"[%s] containers sendMessage= \", __FILE__, send_msg)\n\t\t\twebsocket.JSON.Send(ws, send_msg)\n\t\tcase send_msg := <-send_channel.updateinfo:\n\t\t\tlog.Printf(\"[%s] update sendMessage=\", __FILE__, send_msg)\n\t\t}\n\t}\n}", "func (c *Client) writePump() {\n\tticker := time.NewTicker(pingPeriod)\n\tdefer func() {\n\t\tticker.Stop()\n\t\tc.ws.Close()\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase message, ok := <-c.send:\n\t\t\t// spew.Dump(c.clientID)\n\t\t\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tif !ok {\n\t\t\t\t// The hub closed the channel.\n\t\t\t\tc.ws.WriteMessage(websocket.CloseMessage, []byte{})\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw, err := c.ws.NextWriter(websocket.TextMessage)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.Write(message)\n\n\t\t\t// Add queued chat messages to the current websocket message.\n\t\t\tn := len(c.send)\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\tw.Write(newline)\n\t\t\t\tw.Write(<-c.send)\n\t\t\t}\n\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tif err := c.ws.WriteMessage(websocket.PingMessage, nil); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (c WriteChan) Send() {\n\tc <- Empty\n}", "func (m *PushOver) Send(title, msg string) {\n\tmessage := pushover.NewMessageWithTitle(msg, title)\n\tmessage.DeviceName = m.device\n\n\tfor _, id := range m.recipients {\n\t\tgo func(id string) {\n\t\t\tm.log.DEBUG.Printf(\"sending to %s\", id)\n\n\t\t\trecipient := pushover.NewRecipient(id)\n\t\t\tif _, err := m.app.SendMessage(message, recipient); err != nil {\n\t\t\t\tm.log.ERROR.Print(err)\n\t\t\t}\n\t\t}(id)\n\t}\n}", "func (w *chanWriter) Write(data []byte) (written int, err error) {\n\tfor len(data) > 0 {\n\t\tfor w.rwin < 1 {\n\t\t\twin, ok := <-w.win\n\t\t\tif !ok {\n\t\t\t\treturn 0, io.EOF\n\t\t\t}\n\t\t\tw.rwin += win\n\t\t}\n\t\tn := min(len(data), w.rwin)\n\t\tpeersId := w.clientChan.peersId\n\t\tpacket := []byte{\n\t\t\tmsgChannelData,\n\t\t\tbyte(peersId >> 24), byte(peersId >> 16), byte(peersId >> 8), byte(peersId),\n\t\t\tbyte(n >> 24), byte(n >> 16), byte(n >> 8), byte(n),\n\t\t}\n\t\tif err = w.clientChan.writePacket(append(packet, data[:n]...)); err != nil {\n\t\t\tbreak\n\t\t}\n\t\tdata = data[n:]\n\t\tw.rwin -= n\n\t\twritten += n\n\t}\n\treturn\n}", "func fastSender(c chan<- int) {\n\tfor i := 0; ; i++ {\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\tc <- i\n\t}\n}", "func (s Broker) Send(datum interface{}) {\n\tselect {\n\tcase s <- datum:\n\tdefault:\n\t}\n}", "func (rf *Raft) sendEntries() {\n\trf.mu.Lock()\n\tlastLog := rf.getLastLog()\n\trf.mu.Unlock()\n\tfor i := range rf.peers {\n\t\tif i == rf.me {\n\t\t\tcontinue\n\t\t}\n\t\trf.mu.Lock()\n\t\tmatchIndex := rf.LeaderStatus.matchIndex[i]\n\t\tnextIndex := rf.LeaderStatus.nextIndex[i]\n\t\t//DPrintf(\"send entry peer=%v matchIndex=%v lastIndex=%v nextIndex=%v\", i, matchIndex, lastLog.Index, nextIndex)\n\t\tvar req *AppendEntriesArgs\n\t\t// TODO: whether delete ???\n\t\tif matchIndex >= lastLog.Index {\n\t\t\treq = &AppendEntriesArgs{\n\t\t\t\tType: HeartBeat,\n\t\t\t\tTerm: rf.currentTerm,\n\t\t\t\tLeaderId: rf.peerId,\n\t\t\t\tLeaderCommit: rf.commitIndex,\n\t\t\t}\n\t\t\tDPrintf(\"peer=%v send heartbeat to peer=%v\", rf.me, i)\n\t\t} else {\n\t\t\t// TODO: if the logEntries be cutoff after make snapshot, we should shift the start index\n\t\t\tlogEntries := rf.logEntries[matchIndex+1 : min(nextIndex+1, len(rf.logEntries))]\n\t\t\tprevLog := rf.logEntries[matchIndex]\n\t\t\treq = &AppendEntriesArgs{\n\t\t\t\tType: Entries,\n\t\t\t\tTerm: rf.currentTerm,\n\t\t\t\tLeaderId: rf.peerId,\n\t\t\t\tPrevLogIndex: prevLog.Index,\n\t\t\t\tPrevLogTerm: prevLog.Term,\n\t\t\t\tLogEntries: logEntries, // TODO: refine to control each time send message count (case 2B)\n\t\t\t\tLeaderCommit: rf.commitIndex,\n\t\t\t}\n\t\t\t//DPrintf(\"peer=%v send entry=%v to=%v next=%v logEntrySize=%d\", rf.me, rf.logEntries[matchIndex+1 : nextIndex+1], i, nextIndex, len(logEntries))\n\t\t}\n\t\trf.mu.Unlock()\n\t\tgo rf.sendAppendEntries(i, req, &AppendEntriesReply{})\n\t}\n}", "func (c *Client) writePump() {\n\tticker := time.NewTicker(pingPeriod)\n\tdefer func() {\n\t\tticker.Stop()\n\t\tc.conn.Close()\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase message, ok := <-c.send:\n\t\t\tc.conn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tif !ok {\n\t\t\t\t// The hub closed the channel.\n\t\t\t\tc.conn.WriteMessage(websocket.CloseMessage, []byte{})\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw, err := c.conn.NextWriter(websocket.TextMessage)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.Write(message)\n\n\t\t\t// Add queued chat messages to the current websocket message.\n\t\t\tn := len(c.send)\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\tw.Write(newline)\n\t\t\t\tw.Write(<-c.send)\n\t\t\t}\n\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\tc.conn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tif err := c.conn.WriteMessage(websocket.PingMessage, nil); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func producer_fo(ch chan <- int) {\n\tfor {\n\t\t// sleep for some random time\n\t\tsleep_fo()\n\n\t\t// Generate some random number\n\t\tn := rand.Intn(100)\n\n\t\tfmt.Printf(\"Sending %d\\n\", n)\n\t\tch <- n\n\t}\n}" ]
[ "0.6796055", "0.6710032", "0.6474751", "0.64737123", "0.6444497", "0.6394851", "0.6377518", "0.6331639", "0.6326792", "0.62756455", "0.621305", "0.61896396", "0.61523896", "0.6148293", "0.614084", "0.613628", "0.6135475", "0.6125786", "0.61161226", "0.6088589", "0.6087523", "0.60781115", "0.6060515", "0.60510224", "0.60409206", "0.6023712", "0.60034996", "0.600312", "0.5996741", "0.59926516", "0.5967275", "0.5963885", "0.5960059", "0.5957914", "0.59392196", "0.593405", "0.5931063", "0.5928694", "0.5921965", "0.590902", "0.5887981", "0.5879869", "0.58632946", "0.58375835", "0.5822932", "0.58192366", "0.5815664", "0.57886976", "0.5787399", "0.5773063", "0.5771948", "0.57693404", "0.57667196", "0.57640207", "0.57585233", "0.5754725", "0.5745662", "0.5735458", "0.57331556", "0.5731359", "0.57187563", "0.571016", "0.5708194", "0.5704842", "0.5703479", "0.5696485", "0.5687924", "0.5684436", "0.56812966", "0.56761956", "0.5670898", "0.5666404", "0.56655085", "0.56580853", "0.5653199", "0.5651017", "0.56373674", "0.562079", "0.5620115", "0.5617262", "0.5611206", "0.56110156", "0.5604386", "0.56039786", "0.5599588", "0.55965286", "0.55914986", "0.55900806", "0.5588038", "0.5584768", "0.558473", "0.55825734", "0.5582091", "0.5579598", "0.5575009", "0.55605245", "0.5557113", "0.5556144", "0.5553508", "0.5543982", "0.554274" ]
0.0
-1
PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. freq: the time to wait between intervals in absence of a RetryAfter header. Allowed minimum is one second. A good starting value is 30 seconds. Note that some resources might benefit from a different value.
func (l AdaptiveNetworkHardeningsEnforcePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (AdaptiveNetworkHardeningsEnforceResponse, error) { respType := AdaptiveNetworkHardeningsEnforceResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (l WatchersClientGetTroubleshootingPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientGetTroubleshootingResponse, error) {\n\trespType := WatchersClientGetTroubleshootingResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.TroubleshootingResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientGetFlowLogStatusPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientGetFlowLogStatusResponse, error) {\n\trespType := WatchersClientGetFlowLogStatusResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.FlowLogInformation)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ApplicationGatewaysClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ApplicationGatewaysClientStopResponse, error) {\n\trespType := ApplicationGatewaysClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ConnectionMonitorsClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ConnectionMonitorsClientStopResponse, error) {\n\trespType := ConnectionMonitorsClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IntegrationRuntimesClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IntegrationRuntimesClientStopResponse, error) {\n\trespType := IntegrationRuntimesClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IntegrationRuntimesClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IntegrationRuntimesClientStopResponse, error) {\n\trespType := IntegrationRuntimesClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientGetNextHopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientGetNextHopResponse, error) {\n\trespType := WatchersClientGetNextHopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.NextHopResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TokensDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TokensDeleteResponse, error) {\n\trespType := TokensDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientGetTroubleshootingResultPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientGetTroubleshootingResultResponse, error) {\n\trespType := WatchersClientGetTroubleshootingResultResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.TroubleshootingResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l LiveEventsClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (LiveEventsClientStopResponse, error) {\n\trespType := LiveEventsClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DevicesClientScanForUpdatesPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DevicesClientScanForUpdatesResponse, error) {\n\trespType := DevicesClientScanForUpdatesResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l PacketCapturesClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (PacketCapturesClientStopResponse, error) {\n\trespType := PacketCapturesClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l StreamingEndpointsClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (StreamingEndpointsClientStopResponse, error) {\n\trespType := StreamingEndpointsClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ConnectionMonitorsClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ConnectionMonitorsClientStartResponse, error) {\n\trespType := ConnectionMonitorsClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TriggersClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TriggersClientStopResponse, error) {\n\trespType := TriggersClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l BandwidthSchedulesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (BandwidthSchedulesClientDeleteResponse, error) {\n\trespType := BandwidthSchedulesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ApplicationGatewaysClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ApplicationGatewaysClientStartResponse, error) {\n\trespType := ApplicationGatewaysClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientSetFlowLogConfigurationPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientSetFlowLogConfigurationResponse, error) {\n\trespType := WatchersClientSetFlowLogConfigurationResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.FlowLogInformation)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TokensUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TokensUpdateResponse, error) {\n\trespType := TokensUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Token)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l OutboundFirewallRulesDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (OutboundFirewallRulesDeleteResponse, error) {\n\trespType := OutboundFirewallRulesDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l KustoPoolsClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (KustoPoolsClientStopResponse, error) {\n\trespType := KustoPoolsClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l AzureFirewallsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (AzureFirewallsClientDeleteResponse, error) {\n\trespType := AzureFirewallsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DevicesClientInstallUpdatesPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DevicesClientInstallUpdatesResponse, error) {\n\trespType := DevicesClientInstallUpdatesResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l AlertsSimulatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (AlertsSimulateResponse, error) {\n\trespType := AlertsSimulateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l EnergyServicesClientCreatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (EnergyServicesClientCreateResponse, error) {\n\trespType := EnergyServicesClientCreateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.EnergyService)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l RouteFilterRulesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (RouteFilterRulesClientDeleteResponse, error) {\n\trespType := RouteFilterRulesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientGetNetworkConfigurationDiagnosticPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientGetNetworkConfigurationDiagnosticResponse, error) {\n\trespType := WatchersClientGetNetworkConfigurationDiagnosticResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ConfigurationDiagnosticResponse)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DevicesClientDownloadUpdatesPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DevicesClientDownloadUpdatesResponse, error) {\n\trespType := DevicesClientDownloadUpdatesResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IntegrationRuntimesClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IntegrationRuntimesClientStartResponse, error) {\n\trespType := IntegrationRuntimesClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.IntegrationRuntimeStatusResponse)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IntegrationRuntimesClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IntegrationRuntimesClientStartResponse, error) {\n\trespType := IntegrationRuntimesClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.IntegrationRuntimeStatusResponse)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l StreamingEndpointsClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (StreamingEndpointsClientStartResponse, error) {\n\trespType := StreamingEndpointsClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l PolicyStatesClientTriggerSubscriptionEvaluationPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (PolicyStatesClientTriggerSubscriptionEvaluationResponse, error) {\n\trespType := PolicyStatesClientTriggerSubscriptionEvaluationResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRoutePortsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRoutePortsClientDeleteResponse, error) {\n\trespType := ExpressRoutePortsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l LiveEventsClientResetPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (LiveEventsClientResetResponse, error) {\n\trespType := LiveEventsClientResetResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l RunsCancelPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (RunsCancelResponse, error) {\n\trespType := RunsCancelResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l InterfacesClientGetEffectiveRouteTablePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (InterfacesClientGetEffectiveRouteTableResponse, error) {\n\trespType := InterfacesClientGetEffectiveRouteTableResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.EffectiveRouteListResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IPFirewallRulesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IPFirewallRulesClientDeleteResponse, error) {\n\trespType := IPFirewallRulesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Object)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l LiveEventsClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (LiveEventsClientStartResponse, error) {\n\trespType := LiveEventsClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TablesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TablesClientDeleteResponse, error) {\n\trespType := TablesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l EnergyServicesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (EnergyServicesClientDeleteResponse, error) {\n\trespType := EnergyServicesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l FlowLogsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (FlowLogsClientDeleteResponse, error) {\n\trespType := FlowLogsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l SharesClientRefreshPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (SharesClientRefreshResponse, error) {\n\trespType := SharesClientRefreshResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TriggersClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TriggersClientStartResponse, error) {\n\trespType := TriggersClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l RouteTablesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (RouteTablesClientDeleteResponse, error) {\n\trespType := RouteTablesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VPNGatewaysClientResetPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VPNGatewaysClientResetResponse, error) {\n\trespType := VPNGatewaysClientResetResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.VPNGateway)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l RouteFiltersClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (RouteFiltersClientDeleteResponse, error) {\n\trespType := RouteFiltersClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l FirewallPoliciesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (FirewallPoliciesClientDeleteResponse, error) {\n\trespType := FirewallPoliciesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l RouteFilterRulesClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (RouteFilterRulesClientCreateOrUpdateResponse, error) {\n\trespType := RouteFilterRulesClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.RouteFilterRule)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l KustoPoolsClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (KustoPoolsClientStartResponse, error) {\n\trespType := KustoPoolsClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l P2SVPNGatewaysClientResetPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (P2SVPNGatewaysClientResetResponse, error) {\n\trespType := P2SVPNGatewaysClientResetResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.P2SVPNGateway)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VPNSitesConfigurationClientDownloadPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VPNSitesConfigurationClientDownloadResponse, error) {\n\trespType := VPNSitesConfigurationClientDownloadResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l AccountsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (AccountsClientDeleteResponse, error) {\n\trespType := AccountsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRouteGatewaysClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRouteGatewaysClientDeleteResponse, error) {\n\trespType := ExpressRouteGatewaysClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WebApplicationFirewallPoliciesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WebApplicationFirewallPoliciesClientDeleteResponse, error) {\n\trespType := WebApplicationFirewallPoliciesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IntegrationRuntimesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IntegrationRuntimesClientDeleteResponse, error) {\n\trespType := IntegrationRuntimesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DatabaseVulnerabilityAssessmentScansInitiateScanPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DatabaseVulnerabilityAssessmentScansInitiateScanResponse, error) {\n\trespType := DatabaseVulnerabilityAssessmentScansInitiateScanResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DevicesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DevicesClientDeleteResponse, error) {\n\trespType := DevicesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientGetAzureReachabilityReportPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientGetAzureReachabilityReportResponse, error) {\n\trespType := WatchersClientGetAzureReachabilityReportResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.AzureReachabilityReport)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TokensCreatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TokensCreateResponse, error) {\n\trespType := TokensCreateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Token)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRouteCircuitsClientListRoutesTablePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRouteCircuitsClientListRoutesTableResponse, error) {\n\trespType := ExpressRouteCircuitsClientListRoutesTableResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ExpressRouteCircuitsRoutesTableListResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ServersDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ServersDeleteResponse, error) {\n\trespType := ServersDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l SecurityRulesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (SecurityRulesClientDeleteResponse, error) {\n\trespType := SecurityRulesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l InterfaceTapConfigurationsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (InterfaceTapConfigurationsClientDeleteResponse, error) {\n\trespType := InterfaceTapConfigurationsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l NatRulesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (NatRulesClientDeleteResponse, error) {\n\trespType := NatRulesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TasksDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TasksDeleteResponse, error) {\n\trespType := TasksDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ServerKeysDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ServerKeysDeleteResponse, error) {\n\trespType := ServerKeysDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WebhooksDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WebhooksDeleteResponse, error) {\n\trespType := WebhooksDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRouteCircuitsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRouteCircuitsClientDeleteResponse, error) {\n\trespType := ExpressRouteCircuitsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TablesClientUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TablesClientUpdateResponse, error) {\n\trespType := TablesClientUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Table)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l OrdersClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (OrdersClientDeleteResponse, error) {\n\trespType := OrdersClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ServiceEndpointPoliciesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ServiceEndpointPoliciesClientDeleteResponse, error) {\n\trespType := ServiceEndpointPoliciesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l FlowLogsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (FlowLogsClientCreateOrUpdateResponse, error) {\n\trespType := FlowLogsClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.FlowLog)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IPFirewallRulesClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IPFirewallRulesClientCreateOrUpdateResponse, error) {\n\trespType := IPFirewallRulesClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.IPFirewallRuleInfo)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ServiceUnitsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ServiceUnitsClientCreateOrUpdateResponse, error) {\n\trespType := ServiceUnitsClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ServiceUnitResource)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (p *Poller) PollUntilDone(ctx context.Context) error {\n\tif p.poller == nil {\n\t\treturn fmt.Errorf(\"internal-error: `poller` was nil`\")\n\t}\n\tif _, ok := ctx.Deadline(); !ok {\n\t\treturn fmt.Errorf(\"internal-error: `ctx` should have a deadline\")\n\t}\n\n\tvar wait sync.WaitGroup\n\twait.Add(1)\n\n\tgo func() {\n\t\tconnectionDropCounter := 0\n\t\tretryDuration := p.initialDelayDuration\n\t\tfor true {\n\t\t\t// determine the next retry duration / how long to poll for\n\t\t\tif p.latestResponse != nil {\n\t\t\t\tretryDuration = p.latestResponse.PollInterval\n\t\t\t}\n\t\t\tendTime := time.Now().Add(retryDuration)\n\t\t\tselect {\n\t\t\tcase <-time.After(time.Until(endTime)):\n\t\t\t\t{\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tp.latestResponse, p.latestError = p.poller.Poll(ctx)\n\n\t\t\t// first check the connection drop status\n\t\t\tconnectionHasBeenDropped := false\n\t\t\tif p.latestResponse == nil && p.latestError == nil {\n\t\t\t\t// connection drops can either have no response/error (where we have no context)\n\t\t\t\tconnectionHasBeenDropped = true\n\t\t\t} else if _, ok := p.latestError.(PollingDroppedConnectionError); ok {\n\t\t\t\t// or have an error with more details (e.g. server not found, connection reset etc)\n\t\t\t\tconnectionHasBeenDropped = true\n\t\t\t}\n\t\t\tif connectionHasBeenDropped {\n\t\t\t\tconnectionDropCounter++\n\t\t\t\tif connectionDropCounter < p.maxNumberOfDroppedConnections {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif p.latestResponse == nil && p.latestError == nil {\n\t\t\t\t\t// the connection was dropped, but we have no context\n\t\t\t\t\tp.latestError = PollingDroppedConnectionError{}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tconnectionDropCounter = 0\n\t\t\t}\n\n\t\t\tif p.latestError != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif response := p.latestResponse; response != nil {\n\t\t\t\tretryDuration = response.PollInterval\n\n\t\t\t\tdone := false\n\t\t\t\tswitch response.Status {\n\t\t\t\t// Cancelled, Dropped Connections and Failed should be raised as errors containing additional info if available\n\n\t\t\t\tcase PollingStatusCancelled:\n\t\t\t\t\tp.latestError = fmt.Errorf(\"internal-error: a polling status of `Cancelled` should be surfaced as a PollingCancelledError\")\n\t\t\t\t\tdone = true\n\t\t\t\t\tbreak\n\n\t\t\t\tcase PollingStatusFailed:\n\t\t\t\t\tp.latestError = fmt.Errorf(\"internal-error: a polling status of `Failed` should be surfaced as a PollingFailedError\")\n\t\t\t\t\tdone = true\n\t\t\t\t\tbreak\n\n\t\t\t\tcase PollingStatusInProgress:\n\t\t\t\t\tcontinue\n\n\t\t\t\tcase PollingStatusSucceeded:\n\t\t\t\t\tdone = true\n\t\t\t\t\tbreak\n\n\t\t\t\tdefault:\n\t\t\t\t\tp.latestError = fmt.Errorf(\"internal-error: unimplemented polling status %q\", string(response.Status))\n\t\t\t\t\tdone = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tif done {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\twait.Done()\n\t}()\n\n\twaitDone := make(chan struct{}, 1)\n\tgo func() {\n\t\twait.Wait()\n\t\twaitDone <- struct{}{}\n\t}()\n\n\tselect {\n\tcase <-waitDone:\n\t\tbreak\n\tcase <-ctx.Done():\n\t\t{\n\t\t\tp.latestResponse = nil\n\t\t\tp.latestError = ctx.Err()\n\t\t\treturn p.latestError\n\t\t}\n\t}\n\n\tif p.latestError != nil {\n\t\tp.latestResponse = nil\n\t}\n\n\treturn p.latestError\n}", "func (l StreamingEndpointsClientScalePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (StreamingEndpointsClientScaleResponse, error) {\n\trespType := StreamingEndpointsClientScaleResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VirtualHubRouteTableV2SClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VirtualHubRouteTableV2SClientDeleteResponse, error) {\n\trespType := VirtualHubRouteTableV2SClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VirtualHubsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VirtualHubsClientDeleteResponse, error) {\n\trespType := VirtualHubsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VirtualAppliancesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VirtualAppliancesClientDeleteResponse, error) {\n\trespType := VirtualAppliancesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l RoutesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (RoutesClientDeleteResponse, error) {\n\trespType := RoutesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientDeleteResponse, error) {\n\trespType := WatchersClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l PacketCapturesClientGetStatusPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (PacketCapturesClientGetStatusResponse, error) {\n\trespType := PacketCapturesClientGetStatusResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.PacketCaptureQueryStatusResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l AzureFirewallsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (AzureFirewallsClientCreateOrUpdateResponse, error) {\n\trespType := AzureFirewallsClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.AzureFirewall)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRouteCircuitsClientListRoutesTableSummaryPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRouteCircuitsClientListRoutesTableSummaryResponse, error) {\n\trespType := ExpressRouteCircuitsClientListRoutesTableSummaryResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ExpressRouteCircuitsRoutesTableSummaryListResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DeletedServersRecoverPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DeletedServersRecoverResponse, error) {\n\trespType := DeletedServersRecoverResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.DeletedServer)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ConnectionMonitorsClientQueryPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ConnectionMonitorsClientQueryResponse, error) {\n\trespType := ConnectionMonitorsClientQueryResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ConnectionMonitorQueryResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRouteCircuitsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRouteCircuitsClientCreateOrUpdateResponse, error) {\n\trespType := ExpressRouteCircuitsClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ExpressRouteCircuit)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VPNServerConfigurationsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VPNServerConfigurationsClientDeleteResponse, error) {\n\trespType := VPNServerConfigurationsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l NatGatewaysClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (NatGatewaysClientDeleteResponse, error) {\n\trespType := NatGatewaysClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l InboundNatRulesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (InboundNatRulesClientDeleteResponse, error) {\n\trespType := InboundNatRulesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l HubRouteTablesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (HubRouteTablesClientDeleteResponse, error) {\n\trespType := HubRouteTablesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VPNGatewaysClientStopPacketCapturePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VPNGatewaysClientStopPacketCaptureResponse, error) {\n\trespType := VPNGatewaysClientStopPacketCaptureResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Value)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VirtualNetworkRulesDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VirtualNetworkRulesDeleteResponse, error) {\n\trespType := VirtualNetworkRulesDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DscpConfigurationClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DscpConfigurationClientDeleteResponse, error) {\n\trespType := DscpConfigurationClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l BandwidthSchedulesClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (BandwidthSchedulesClientCreateOrUpdateResponse, error) {\n\trespType := BandwidthSchedulesClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.BandwidthSchedule)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRoutePortsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRoutePortsClientCreateOrUpdateResponse, error) {\n\trespType := ExpressRoutePortsClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ExpressRoutePort)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l P2SVPNGatewaysClientGetP2SVPNConnectionHealthDetailedPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (P2SVPNGatewaysClientGetP2SVPNConnectionHealthDetailedResponse, error) {\n\trespType := P2SVPNGatewaysClientGetP2SVPNConnectionHealthDetailedResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.P2SVPNConnectionHealth)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l LinkedServicesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (LinkedServicesClientDeleteResponse, error) {\n\trespType := LinkedServicesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.LinkedService)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VirtualNetworkGatewayConnectionsClientGetIkeSasPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VirtualNetworkGatewayConnectionsClientGetIkeSasResponse, error) {\n\trespType := VirtualNetworkGatewayConnectionsClientGetIkeSasResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Value)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l UsersClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (UsersClientDeleteResponse, error) {\n\trespType := UsersClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRouteGatewaysClientUpdateTagsPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRouteGatewaysClientUpdateTagsResponse, error) {\n\trespType := ExpressRouteGatewaysClientUpdateTagsResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ExpressRouteGateway)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}" ]
[ "0.6259292", "0.6195853", "0.61802083", "0.6166713", "0.61609143", "0.61609143", "0.6154409", "0.6075394", "0.60710424", "0.60707647", "0.60615396", "0.6043415", "0.60430884", "0.60313463", "0.6021775", "0.6007862", "0.5998663", "0.59888476", "0.5980255", "0.595896", "0.5956378", "0.594661", "0.59132135", "0.5910213", "0.5899073", "0.5896752", "0.5880312", "0.58777475", "0.58739704", "0.58739704", "0.5854519", "0.5853649", "0.5840586", "0.583973", "0.58278793", "0.58115023", "0.5809201", "0.5809191", "0.58072776", "0.57925636", "0.5786058", "0.57844967", "0.5780295", "0.57786757", "0.57670593", "0.5761527", "0.576144", "0.5757725", "0.5755513", "0.5750583", "0.57389987", "0.5738375", "0.5736762", "0.57362086", "0.5731007", "0.57260036", "0.5725031", "0.5724962", "0.5723625", "0.57170564", "0.57154214", "0.57139236", "0.5710196", "0.570934", "0.5707234", "0.57039857", "0.570383", "0.5703449", "0.5703015", "0.56978655", "0.56903607", "0.56855464", "0.56841564", "0.5681204", "0.56810015", "0.5680053", "0.5679557", "0.56780916", "0.56697315", "0.56684875", "0.5666967", "0.56610644", "0.5657614", "0.56560224", "0.5647862", "0.5641831", "0.56418073", "0.5638144", "0.56339353", "0.5629027", "0.5628087", "0.5627663", "0.5627321", "0.56243616", "0.56203306", "0.561998", "0.5618211", "0.5616647", "0.5614523", "0.5613799", "0.5609946" ]
0.0
-1
Resume rehydrates a AdaptiveNetworkHardeningsEnforcePollerResponse from the provided client and resume token.
func (l *AdaptiveNetworkHardeningsEnforcePollerResponse) Resume(ctx context.Context, client *AdaptiveNetworkHardeningsClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("AdaptiveNetworkHardeningsClient.Enforce", token, client.pl, client.enforceHandleError) if err != nil { return err } poller := &AdaptiveNetworkHardeningsEnforcePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (l *WorkspaceManagedSQLServerEncryptionProtectorClientRevalidatePollerResponse) Resume(ctx context.Context, client *WorkspaceManagedSQLServerEncryptionProtectorClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"WorkspaceManagedSQLServerEncryptionProtectorClient.Revalidate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &WorkspaceManagedSQLServerEncryptionProtectorClientRevalidatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagedInstanceEncryptionProtectorsRevalidatePollerResponse) Resume(ctx context.Context, client *ManagedInstanceEncryptionProtectorsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagedInstanceEncryptionProtectorsClient.Revalidate\", token, client.pl, client.revalidateHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagedInstanceEncryptionProtectorsRevalidatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ApplicationGatewaysClientStartPollerResponse) Resume(ctx context.Context, client *ApplicationGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ApplicationGatewaysClient.Start\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ApplicationGatewaysClientStartPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *EncryptionProtectorsRevalidatePollerResponse) Resume(ctx context.Context, client *EncryptionProtectorsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"EncryptionProtectorsClient.Revalidate\", token, client.pl, client.revalidateHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &EncryptionProtectorsRevalidatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewaysClientResetPollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewaysClient.Reset\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewaysClientResetPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VPNGatewaysClientResetPollerResponse) Resume(ctx context.Context, client *VPNGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VPNGatewaysClient.Reset\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VPNGatewaysClientResetPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewaysClientResetVPNClientSharedKeyPollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewaysClient.ResetVPNClientSharedKey\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewaysClientResetVPNClientSharedKeyPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ApplicationGatewaysClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *ApplicationGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ApplicationGatewaysClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ApplicationGatewaysClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ApplicationGatewaysClientBackendHealthOnDemandPollerResponse) Resume(ctx context.Context, client *ApplicationGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ApplicationGatewaysClient.BackendHealthOnDemand\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ApplicationGatewaysClientBackendHealthOnDemandPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *LiveEventsClientAllocatePollerResponse) Resume(ctx context.Context, client *LiveEventsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"LiveEventsClient.Allocate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &LiveEventsClientAllocatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SharesClientRefreshPollerResponse) Resume(ctx context.Context, client *SharesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SharesClient.Refresh\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SharesClientRefreshPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewaysClientGeneratevpnclientpackagePollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewaysClient.Generatevpnclientpackage\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewaysClientGeneratevpnclientpackagePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SQLPoolsClientResumePollerResponse) Resume(ctx context.Context, client *SQLPoolsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SQLPoolsClient.Resume\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SQLPoolsClientResumePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *WatchersClientVerifyIPFlowPollerResponse) Resume(ctx context.Context, client *WatchersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"WatchersClient.VerifyIPFlow\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &WatchersClientVerifyIPFlowPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *BackupPoliciesClientUpdatePollerResponse) Resume(ctx context.Context, client *BackupPoliciesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"BackupPoliciesClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &BackupPoliciesClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewaysClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewaysClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewaysClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *P2SVPNGatewaysClientResetPollerResponse) Resume(ctx context.Context, client *P2SVPNGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"P2SVPNGatewaysClient.Reset\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &P2SVPNGatewaysClientResetPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ExpressRouteGatewaysClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *ExpressRouteGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ExpressRouteGatewaysClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ExpressRouteGatewaysClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VPNServerConfigurationsAssociatedWithVirtualWanClientListPollerResponse) Resume(ctx context.Context, client *VPNServerConfigurationsAssociatedWithVirtualWanClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VPNServerConfigurationsAssociatedWithVirtualWanClient.List\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VPNServerConfigurationsAssociatedWithVirtualWanClientListPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewayNatRulesClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewayNatRulesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewayNatRulesClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewayNatRulesClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *KustoPoolDataConnectionsClientDataConnectionValidationPollerResponse) Resume(ctx context.Context, client *KustoPoolDataConnectionsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"KustoPoolDataConnectionsClient.DataConnectionValidation\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &KustoPoolDataConnectionsClientDataConnectionValidationPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewaysClientSetVpnclientIPSecParametersPollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewaysClient.SetVpnclientIPSecParameters\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewaysClientSetVpnclientIPSecParametersPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagedInstancesFailoverPollerResponse) Resume(ctx context.Context, client *ManagedInstancesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagedInstancesClient.Failover\", token, client.pl, client.failoverHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagedInstancesFailoverPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *InstanceFailoverGroupsForceFailoverAllowDataLossPollerResponse) Resume(ctx context.Context, client *InstanceFailoverGroupsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"InstanceFailoverGroupsClient.ForceFailoverAllowDataLoss\", token, client.pl, client.forceFailoverAllowDataLossHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &InstanceFailoverGroupsForceFailoverAllowDataLossPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ApplicationGatewaysClientDeletePollerResponse) Resume(ctx context.Context, client *ApplicationGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ApplicationGatewaysClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ApplicationGatewaysClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *NatGatewaysClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *NatGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"NatGatewaysClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &NatGatewaysClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkTapsClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *VirtualNetworkTapsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkTapsClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkTapsClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *WatchersClientCheckConnectivityPollerResponse) Resume(ctx context.Context, client *WatchersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"WatchersClient.CheckConnectivity\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &WatchersClientCheckConnectivityPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ContainersClientRefreshPollerResponse) Resume(ctx context.Context, client *ContainersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ContainersClient.Refresh\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ContainersClientRefreshPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ApplicationGatewayPrivateEndpointConnectionsClientUpdatePollerResponse) Resume(ctx context.Context, client *ApplicationGatewayPrivateEndpointConnectionsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ApplicationGatewayPrivateEndpointConnectionsClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ApplicationGatewayPrivateEndpointConnectionsClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *IntegrationRuntimeObjectMetadataClientRefreshPollerResponse) Resume(ctx context.Context, client *IntegrationRuntimeObjectMetadataClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"IntegrationRuntimeObjectMetadataClient.Refresh\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &IntegrationRuntimeObjectMetadataClientRefreshPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *IntegrationRuntimeObjectMetadataClientRefreshPollerResponse) Resume(ctx context.Context, client *IntegrationRuntimeObjectMetadataClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"IntegrationRuntimeObjectMetadataClient.Refresh\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &IntegrationRuntimeObjectMetadataClientRefreshPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewayConnectionsClientResetSharedKeyPollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewayConnectionsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewayConnectionsClient.ResetSharedKey\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewayConnectionsClientResetSharedKeyPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ServerDNSAliasesAcquirePollerResponse) Resume(ctx context.Context, client *ServerDNSAliasesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ServerDNSAliasesClient.Acquire\", token, client.pl, client.acquireHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ServerDNSAliasesAcquirePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SQLPoolVulnerabilityAssessmentScansClientInitiateScanPollerResponse) Resume(ctx context.Context, client *SQLPoolVulnerabilityAssessmentScansClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SQLPoolVulnerabilityAssessmentScansClient.InitiateScan\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SQLPoolVulnerabilityAssessmentScansClientInitiateScanPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VPNGatewaysClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *VPNGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VPNGatewaysClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VPNGatewaysClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewayConnectionsClientSetSharedKeyPollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewayConnectionsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewayConnectionsClient.SetSharedKey\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewayConnectionsClientSetSharedKeyPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *LocalNetworkGatewaysClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *LocalNetworkGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"LocalNetworkGatewaysClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &LocalNetworkGatewaysClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkPeeringsClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *VirtualNetworkPeeringsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkPeeringsClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkPeeringsClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewaysClientGetAdvertisedRoutesPollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewaysClient.GetAdvertisedRoutes\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewaysClientGetAdvertisedRoutesPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *LiveEventsClientResetPollerResponse) Resume(ctx context.Context, client *LiveEventsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"LiveEventsClient.Reset\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &LiveEventsClientResetPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *InboundNatRulesClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *InboundNatRulesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"InboundNatRulesClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &InboundNatRulesClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *InstanceFailoverGroupsFailoverPollerResponse) Resume(ctx context.Context, client *InstanceFailoverGroupsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"InstanceFailoverGroupsClient.Failover\", token, client.pl, client.failoverHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &InstanceFailoverGroupsFailoverPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *IPFirewallRulesClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *IPFirewallRulesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"IPFirewallRulesClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &IPFirewallRulesClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *FailoverGroupsForceFailoverAllowDataLossPollerResponse) Resume(ctx context.Context, client *FailoverGroupsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"FailoverGroupsClient.ForceFailoverAllowDataLoss\", token, client.pl, client.forceFailoverAllowDataLossHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &FailoverGroupsForceFailoverAllowDataLossPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualRouterPeeringsClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *VirtualRouterPeeringsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualRouterPeeringsClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualRouterPeeringsClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewaysClientGetVpnclientConnectionHealthPollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewaysClient.GetVpnclientConnectionHealth\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewaysClientGetVpnclientConnectionHealthPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkLinksClientUpdatePollerResponse) Resume(ctx context.Context, client *VirtualNetworkLinksClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkLinksClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkLinksClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *RouteFilterRulesClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *RouteFilterRulesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"RouteFilterRulesClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &RouteFilterRulesClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *FailoverGroupsFailoverPollerResponse) Resume(ctx context.Context, client *FailoverGroupsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"FailoverGroupsClient.Failover\", token, client.pl, client.failoverHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &FailoverGroupsFailoverPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewaysClientGetVpnclientIPSecParametersPollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewaysClient.GetVpnclientIPSecParameters\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewaysClientGetVpnclientIPSecParametersPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *PoolsClientUpdatePollerResponse) Resume(ctx context.Context, client *PoolsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"PoolsClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &PoolsClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewaysClientGetLearnedRoutesPollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewaysClient.GetLearnedRoutes\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewaysClientGetLearnedRoutesPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *FirewallPoliciesClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *FirewallPoliciesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"FirewallPoliciesClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &FirewallPoliciesClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *NatRulesClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *NatRulesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"NatRulesClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &NatRulesClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ElasticPoolsFailoverPollerResponse) Resume(ctx context.Context, client *ElasticPoolsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ElasticPoolsClient.Failover\", token, client.pl, client.failoverHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ElasticPoolsFailoverPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkRulesCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *VirtualNetworkRulesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkRulesClient.CreateOrUpdate\", token, client.pl, client.createOrUpdateHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkRulesCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ExpressRouteCrossConnectionPeeringsClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *ExpressRouteCrossConnectionPeeringsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ExpressRouteCrossConnectionPeeringsClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ExpressRouteCrossConnectionPeeringsClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagedDatabaseVulnerabilityAssessmentScansInitiateScanPollerResponse) Resume(ctx context.Context, client *ManagedDatabaseVulnerabilityAssessmentScansClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagedDatabaseVulnerabilityAssessmentScansClient.InitiateScan\", token, client.pl, client.initiateScanHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagedDatabaseVulnerabilityAssessmentScansInitiateScanPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagedInstanceAzureADOnlyAuthenticationsCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *ManagedInstanceAzureADOnlyAuthenticationsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagedInstanceAzureADOnlyAuthenticationsClient.CreateOrUpdate\", token, client.pl, client.createOrUpdateHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagedInstanceAzureADOnlyAuthenticationsCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SubnetsClientPrepareNetworkPoliciesPollerResponse) Resume(ctx context.Context, client *SubnetsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SubnetsClient.PrepareNetworkPolicies\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SubnetsClientPrepareNetworkPoliciesPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SnapshotPoliciesClientUpdatePollerResponse) Resume(ctx context.Context, client *SnapshotPoliciesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SnapshotPoliciesClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SnapshotPoliciesClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ApplicationGatewaysClientBackendHealthPollerResponse) Resume(ctx context.Context, client *ApplicationGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ApplicationGatewaysClient.BackendHealth\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ApplicationGatewaysClientBackendHealthPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *PrivateZonesClientUpdatePollerResponse) Resume(ctx context.Context, client *PrivateZonesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"PrivateZonesClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &PrivateZonesClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *WatchersClientGetVMSecurityRulesPollerResponse) Resume(ctx context.Context, client *WatchersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"WatchersClient.GetVMSecurityRules\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &WatchersClientGetVMSecurityRulesPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *AccountsClientUpdatePollerResponse) Resume(ctx context.Context, client *AccountsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"AccountsClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &AccountsClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagedInstancesUpdatePollerResponse) Resume(ctx context.Context, client *ManagedInstancesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagedInstancesClient.Update\", token, client.pl, client.updateHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagedInstancesUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *KustoPoolsClientUpdatePollerResponse) Resume(ctx context.Context, client *KustoPoolsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"KustoPoolsClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &KustoPoolsClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ServiceEndpointPoliciesClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *ServiceEndpointPoliciesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ServiceEndpointPoliciesClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ServiceEndpointPoliciesClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *DiagnosticSettingsClientUpdateDiagnosticRemoteSupportSettingsPollerResponse) Resume(ctx context.Context, client *DiagnosticSettingsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"DiagnosticSettingsClient.UpdateDiagnosticRemoteSupportSettings\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &DiagnosticSettingsClientUpdateDiagnosticRemoteSupportSettingsPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewayConnectionsClientResetConnectionPollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewayConnectionsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewayConnectionsClient.ResetConnection\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewayConnectionsClientResetConnectionPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ExpressRouteGatewaysClientDeletePollerResponse) Resume(ctx context.Context, client *ExpressRouteGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ExpressRouteGatewaysClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ExpressRouteGatewaysClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *WorkspaceManagedSQLServerEncryptionProtectorClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *WorkspaceManagedSQLServerEncryptionProtectorClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"WorkspaceManagedSQLServerEncryptionProtectorClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &WorkspaceManagedSQLServerEncryptionProtectorClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualWansClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *VirtualWansClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualWansClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualWansClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *DeletedServersRecoverPollerResponse) Resume(ctx context.Context, client *DeletedServersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"DeletedServersClient.Recover\", token, client.pl, client.recoverHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &DeletedServersRecoverPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewaysClientStartPacketCapturePollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewaysClient.StartPacketCapture\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewaysClientStartPacketCapturePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *DatabaseVulnerabilityAssessmentScansInitiateScanPollerResponse) Resume(ctx context.Context, client *DatabaseVulnerabilityAssessmentScansClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"DatabaseVulnerabilityAssessmentScansClient.InitiateScan\", token, client.pl, client.initiateScanHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &DatabaseVulnerabilityAssessmentScansInitiateScanPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SQLPoolsClientPausePollerResponse) Resume(ctx context.Context, client *SQLPoolsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SQLPoolsClient.Pause\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SQLPoolsClientPausePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagedBackupShortTermRetentionPoliciesUpdatePollerResponse) Resume(ctx context.Context, client *ManagedBackupShortTermRetentionPoliciesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagedBackupShortTermRetentionPoliciesClient.Update\", token, client.pl, client.updateHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagedBackupShortTermRetentionPoliciesUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *FirewallPoliciesClientDeletePollerResponse) Resume(ctx context.Context, client *FirewallPoliciesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"FirewallPoliciesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &FirewallPoliciesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *IPFirewallRulesClientDeletePollerResponse) Resume(ctx context.Context, client *IPFirewallRulesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"IPFirewallRulesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &IPFirewallRulesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *PolicyStatesClientTriggerResourceGroupEvaluationPollerResponse) Resume(ctx context.Context, client *PolicyStatesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"PolicyStatesClient.TriggerResourceGroupEvaluation\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &PolicyStatesClientTriggerResourceGroupEvaluationPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ExpressRouteCircuitPeeringsClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *ExpressRouteCircuitPeeringsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ExpressRouteCircuitPeeringsClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ExpressRouteCircuitPeeringsClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *FailoverGroupsUpdatePollerResponse) Resume(ctx context.Context, client *FailoverGroupsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"FailoverGroupsClient.Update\", token, client.pl, client.updateHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &FailoverGroupsUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewaysClientDeletePollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewaysClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewaysClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ConnectionMonitorsClientStartPollerResponse) Resume(ctx context.Context, client *ConnectionMonitorsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ConnectionMonitorsClient.Start\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ConnectionMonitorsClientStartPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworksClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *VirtualNetworksClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworksClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworksClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SecurityRulesClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *SecurityRulesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SecurityRulesClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SecurityRulesClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *TasksUpdatePollerResponse) Resume(ctx context.Context, client *TasksClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"TasksClient.Update\", token, client.pl, client.updateHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &TasksUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *WatchersClientGetNetworkConfigurationDiagnosticPollerResponse) Resume(ctx context.Context, client *WatchersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"WatchersClient.GetNetworkConfigurationDiagnostic\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &WatchersClientGetNetworkConfigurationDiagnosticPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *TablesClientUpdatePollerResponse) Resume(ctx context.Context, client *TablesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"TablesClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &TablesClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *WorkspaceManagedIdentitySQLControlSettingsClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *WorkspaceManagedIdentitySQLControlSettingsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"WorkspaceManagedIdentitySQLControlSettingsClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &WorkspaceManagedIdentitySQLControlSettingsClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkRulesDeletePollerResponse) Resume(ctx context.Context, client *VirtualNetworkRulesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkRulesClient.Delete\", token, client.pl, client.deleteHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkRulesDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *RoutingIntentClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *RoutingIntentClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"RoutingIntentClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &RoutingIntentClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VolumesClientAuthorizeReplicationPollerResponse) Resume(ctx context.Context, client *VolumesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VolumesClient.AuthorizeReplication\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VolumesClientAuthorizeReplicationPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *P2SVPNGatewaysClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *P2SVPNGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"P2SVPNGatewaysClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &P2SVPNGatewaysClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *TriggersClientStartPollerResponse) Resume(ctx context.Context, client *TriggersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"TriggersClient.Start\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &TriggersClientStartPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *WebApplicationFirewallPoliciesClientDeletePollerResponse) Resume(ctx context.Context, client *WebApplicationFirewallPoliciesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"WebApplicationFirewallPoliciesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &WebApplicationFirewallPoliciesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ApplicationGatewaysClientStopPollerResponse) Resume(ctx context.Context, client *ApplicationGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ApplicationGatewaysClient.Stop\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ApplicationGatewaysClientStopPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ServiceEndpointPolicyDefinitionsClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *ServiceEndpointPolicyDefinitionsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ServiceEndpointPolicyDefinitionsClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ServiceEndpointPolicyDefinitionsClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}" ]
[ "0.7977291", "0.78237575", "0.772744", "0.7726858", "0.7710988", "0.7694588", "0.7675609", "0.75921535", "0.7566447", "0.75447327", "0.75436836", "0.75272745", "0.7527257", "0.75233424", "0.75203735", "0.7513311", "0.7507816", "0.75030106", "0.74958706", "0.749215", "0.747994", "0.74709946", "0.7463019", "0.74615973", "0.7456642", "0.7452077", "0.7449284", "0.74352795", "0.7434346", "0.74312824", "0.7421636", "0.7421636", "0.7420427", "0.74192035", "0.7416057", "0.741295", "0.7412444", "0.7411122", "0.74107385", "0.7408519", "0.74069834", "0.74031556", "0.74026877", "0.74007905", "0.7400002", "0.7399386", "0.7392851", "0.73880213", "0.7387514", "0.738625", "0.73853433", "0.7381354", "0.7376474", "0.73718226", "0.7370183", "0.7357174", "0.7354977", "0.7354959", "0.734674", "0.7333913", "0.7332286", "0.7329301", "0.73274755", "0.73238665", "0.73217404", "0.7318492", "0.731595", "0.7314396", "0.7307309", "0.7306048", "0.73053885", "0.73009247", "0.7298561", "0.72970545", "0.72930646", "0.729172", "0.7288664", "0.7288301", "0.7287205", "0.7285396", "0.72820765", "0.7279316", "0.7279274", "0.7278003", "0.72762036", "0.7274541", "0.7272614", "0.7271578", "0.72700435", "0.72667956", "0.7265091", "0.7264803", "0.72622097", "0.72621465", "0.7260642", "0.72569007", "0.72563696", "0.7254452", "0.72519624", "0.7251205" ]
0.8376427
0
PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. freq: the time to wait between intervals in absence of a RetryAfter header. Allowed minimum is one second. A good starting value is 30 seconds. Note that some resources might benefit from a different value.
func (l AlertsSimulatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (AlertsSimulateResponse, error) { respType := AlertsSimulateResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (l WatchersClientGetTroubleshootingPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientGetTroubleshootingResponse, error) {\n\trespType := WatchersClientGetTroubleshootingResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.TroubleshootingResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientGetFlowLogStatusPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientGetFlowLogStatusResponse, error) {\n\trespType := WatchersClientGetFlowLogStatusResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.FlowLogInformation)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ApplicationGatewaysClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ApplicationGatewaysClientStopResponse, error) {\n\trespType := ApplicationGatewaysClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ConnectionMonitorsClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ConnectionMonitorsClientStopResponse, error) {\n\trespType := ConnectionMonitorsClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IntegrationRuntimesClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IntegrationRuntimesClientStopResponse, error) {\n\trespType := IntegrationRuntimesClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IntegrationRuntimesClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IntegrationRuntimesClientStopResponse, error) {\n\trespType := IntegrationRuntimesClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientGetNextHopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientGetNextHopResponse, error) {\n\trespType := WatchersClientGetNextHopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.NextHopResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TokensDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TokensDeleteResponse, error) {\n\trespType := TokensDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientGetTroubleshootingResultPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientGetTroubleshootingResultResponse, error) {\n\trespType := WatchersClientGetTroubleshootingResultResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.TroubleshootingResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l LiveEventsClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (LiveEventsClientStopResponse, error) {\n\trespType := LiveEventsClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DevicesClientScanForUpdatesPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DevicesClientScanForUpdatesResponse, error) {\n\trespType := DevicesClientScanForUpdatesResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l PacketCapturesClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (PacketCapturesClientStopResponse, error) {\n\trespType := PacketCapturesClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l StreamingEndpointsClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (StreamingEndpointsClientStopResponse, error) {\n\trespType := StreamingEndpointsClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ConnectionMonitorsClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ConnectionMonitorsClientStartResponse, error) {\n\trespType := ConnectionMonitorsClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TriggersClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TriggersClientStopResponse, error) {\n\trespType := TriggersClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l BandwidthSchedulesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (BandwidthSchedulesClientDeleteResponse, error) {\n\trespType := BandwidthSchedulesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ApplicationGatewaysClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ApplicationGatewaysClientStartResponse, error) {\n\trespType := ApplicationGatewaysClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientSetFlowLogConfigurationPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientSetFlowLogConfigurationResponse, error) {\n\trespType := WatchersClientSetFlowLogConfigurationResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.FlowLogInformation)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TokensUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TokensUpdateResponse, error) {\n\trespType := TokensUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Token)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l OutboundFirewallRulesDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (OutboundFirewallRulesDeleteResponse, error) {\n\trespType := OutboundFirewallRulesDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l KustoPoolsClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (KustoPoolsClientStopResponse, error) {\n\trespType := KustoPoolsClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l AzureFirewallsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (AzureFirewallsClientDeleteResponse, error) {\n\trespType := AzureFirewallsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DevicesClientInstallUpdatesPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DevicesClientInstallUpdatesResponse, error) {\n\trespType := DevicesClientInstallUpdatesResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l EnergyServicesClientCreatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (EnergyServicesClientCreateResponse, error) {\n\trespType := EnergyServicesClientCreateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.EnergyService)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l RouteFilterRulesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (RouteFilterRulesClientDeleteResponse, error) {\n\trespType := RouteFilterRulesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientGetNetworkConfigurationDiagnosticPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientGetNetworkConfigurationDiagnosticResponse, error) {\n\trespType := WatchersClientGetNetworkConfigurationDiagnosticResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ConfigurationDiagnosticResponse)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DevicesClientDownloadUpdatesPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DevicesClientDownloadUpdatesResponse, error) {\n\trespType := DevicesClientDownloadUpdatesResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IntegrationRuntimesClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IntegrationRuntimesClientStartResponse, error) {\n\trespType := IntegrationRuntimesClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.IntegrationRuntimeStatusResponse)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IntegrationRuntimesClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IntegrationRuntimesClientStartResponse, error) {\n\trespType := IntegrationRuntimesClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.IntegrationRuntimeStatusResponse)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l StreamingEndpointsClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (StreamingEndpointsClientStartResponse, error) {\n\trespType := StreamingEndpointsClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l PolicyStatesClientTriggerSubscriptionEvaluationPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (PolicyStatesClientTriggerSubscriptionEvaluationResponse, error) {\n\trespType := PolicyStatesClientTriggerSubscriptionEvaluationResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRoutePortsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRoutePortsClientDeleteResponse, error) {\n\trespType := ExpressRoutePortsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l LiveEventsClientResetPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (LiveEventsClientResetResponse, error) {\n\trespType := LiveEventsClientResetResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l RunsCancelPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (RunsCancelResponse, error) {\n\trespType := RunsCancelResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l InterfacesClientGetEffectiveRouteTablePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (InterfacesClientGetEffectiveRouteTableResponse, error) {\n\trespType := InterfacesClientGetEffectiveRouteTableResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.EffectiveRouteListResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IPFirewallRulesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IPFirewallRulesClientDeleteResponse, error) {\n\trespType := IPFirewallRulesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Object)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l LiveEventsClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (LiveEventsClientStartResponse, error) {\n\trespType := LiveEventsClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TablesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TablesClientDeleteResponse, error) {\n\trespType := TablesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l EnergyServicesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (EnergyServicesClientDeleteResponse, error) {\n\trespType := EnergyServicesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l FlowLogsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (FlowLogsClientDeleteResponse, error) {\n\trespType := FlowLogsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l SharesClientRefreshPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (SharesClientRefreshResponse, error) {\n\trespType := SharesClientRefreshResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TriggersClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TriggersClientStartResponse, error) {\n\trespType := TriggersClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l RouteTablesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (RouteTablesClientDeleteResponse, error) {\n\trespType := RouteTablesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VPNGatewaysClientResetPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VPNGatewaysClientResetResponse, error) {\n\trespType := VPNGatewaysClientResetResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.VPNGateway)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l FirewallPoliciesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (FirewallPoliciesClientDeleteResponse, error) {\n\trespType := FirewallPoliciesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l RouteFiltersClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (RouteFiltersClientDeleteResponse, error) {\n\trespType := RouteFiltersClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l RouteFilterRulesClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (RouteFilterRulesClientCreateOrUpdateResponse, error) {\n\trespType := RouteFilterRulesClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.RouteFilterRule)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l KustoPoolsClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (KustoPoolsClientStartResponse, error) {\n\trespType := KustoPoolsClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l P2SVPNGatewaysClientResetPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (P2SVPNGatewaysClientResetResponse, error) {\n\trespType := P2SVPNGatewaysClientResetResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.P2SVPNGateway)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VPNSitesConfigurationClientDownloadPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VPNSitesConfigurationClientDownloadResponse, error) {\n\trespType := VPNSitesConfigurationClientDownloadResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l AccountsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (AccountsClientDeleteResponse, error) {\n\trespType := AccountsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRouteGatewaysClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRouteGatewaysClientDeleteResponse, error) {\n\trespType := ExpressRouteGatewaysClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WebApplicationFirewallPoliciesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WebApplicationFirewallPoliciesClientDeleteResponse, error) {\n\trespType := WebApplicationFirewallPoliciesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IntegrationRuntimesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IntegrationRuntimesClientDeleteResponse, error) {\n\trespType := IntegrationRuntimesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DatabaseVulnerabilityAssessmentScansInitiateScanPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DatabaseVulnerabilityAssessmentScansInitiateScanResponse, error) {\n\trespType := DatabaseVulnerabilityAssessmentScansInitiateScanResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientGetAzureReachabilityReportPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientGetAzureReachabilityReportResponse, error) {\n\trespType := WatchersClientGetAzureReachabilityReportResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.AzureReachabilityReport)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DevicesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DevicesClientDeleteResponse, error) {\n\trespType := DevicesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TokensCreatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TokensCreateResponse, error) {\n\trespType := TokensCreateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Token)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRouteCircuitsClientListRoutesTablePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRouteCircuitsClientListRoutesTableResponse, error) {\n\trespType := ExpressRouteCircuitsClientListRoutesTableResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ExpressRouteCircuitsRoutesTableListResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ServersDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ServersDeleteResponse, error) {\n\trespType := ServersDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l SecurityRulesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (SecurityRulesClientDeleteResponse, error) {\n\trespType := SecurityRulesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l InterfaceTapConfigurationsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (InterfaceTapConfigurationsClientDeleteResponse, error) {\n\trespType := InterfaceTapConfigurationsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l NatRulesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (NatRulesClientDeleteResponse, error) {\n\trespType := NatRulesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TasksDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TasksDeleteResponse, error) {\n\trespType := TasksDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WebhooksDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WebhooksDeleteResponse, error) {\n\trespType := WebhooksDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ServerKeysDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ServerKeysDeleteResponse, error) {\n\trespType := ServerKeysDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRouteCircuitsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRouteCircuitsClientDeleteResponse, error) {\n\trespType := ExpressRouteCircuitsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TablesClientUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TablesClientUpdateResponse, error) {\n\trespType := TablesClientUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Table)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l OrdersClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (OrdersClientDeleteResponse, error) {\n\trespType := OrdersClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ServiceEndpointPoliciesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ServiceEndpointPoliciesClientDeleteResponse, error) {\n\trespType := ServiceEndpointPoliciesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l FlowLogsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (FlowLogsClientCreateOrUpdateResponse, error) {\n\trespType := FlowLogsClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.FlowLog)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IPFirewallRulesClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IPFirewallRulesClientCreateOrUpdateResponse, error) {\n\trespType := IPFirewallRulesClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.IPFirewallRuleInfo)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ServiceUnitsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ServiceUnitsClientCreateOrUpdateResponse, error) {\n\trespType := ServiceUnitsClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ServiceUnitResource)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (p *Poller) PollUntilDone(ctx context.Context) error {\n\tif p.poller == nil {\n\t\treturn fmt.Errorf(\"internal-error: `poller` was nil`\")\n\t}\n\tif _, ok := ctx.Deadline(); !ok {\n\t\treturn fmt.Errorf(\"internal-error: `ctx` should have a deadline\")\n\t}\n\n\tvar wait sync.WaitGroup\n\twait.Add(1)\n\n\tgo func() {\n\t\tconnectionDropCounter := 0\n\t\tretryDuration := p.initialDelayDuration\n\t\tfor true {\n\t\t\t// determine the next retry duration / how long to poll for\n\t\t\tif p.latestResponse != nil {\n\t\t\t\tretryDuration = p.latestResponse.PollInterval\n\t\t\t}\n\t\t\tendTime := time.Now().Add(retryDuration)\n\t\t\tselect {\n\t\t\tcase <-time.After(time.Until(endTime)):\n\t\t\t\t{\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tp.latestResponse, p.latestError = p.poller.Poll(ctx)\n\n\t\t\t// first check the connection drop status\n\t\t\tconnectionHasBeenDropped := false\n\t\t\tif p.latestResponse == nil && p.latestError == nil {\n\t\t\t\t// connection drops can either have no response/error (where we have no context)\n\t\t\t\tconnectionHasBeenDropped = true\n\t\t\t} else if _, ok := p.latestError.(PollingDroppedConnectionError); ok {\n\t\t\t\t// or have an error with more details (e.g. server not found, connection reset etc)\n\t\t\t\tconnectionHasBeenDropped = true\n\t\t\t}\n\t\t\tif connectionHasBeenDropped {\n\t\t\t\tconnectionDropCounter++\n\t\t\t\tif connectionDropCounter < p.maxNumberOfDroppedConnections {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif p.latestResponse == nil && p.latestError == nil {\n\t\t\t\t\t// the connection was dropped, but we have no context\n\t\t\t\t\tp.latestError = PollingDroppedConnectionError{}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tconnectionDropCounter = 0\n\t\t\t}\n\n\t\t\tif p.latestError != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif response := p.latestResponse; response != nil {\n\t\t\t\tretryDuration = response.PollInterval\n\n\t\t\t\tdone := false\n\t\t\t\tswitch response.Status {\n\t\t\t\t// Cancelled, Dropped Connections and Failed should be raised as errors containing additional info if available\n\n\t\t\t\tcase PollingStatusCancelled:\n\t\t\t\t\tp.latestError = fmt.Errorf(\"internal-error: a polling status of `Cancelled` should be surfaced as a PollingCancelledError\")\n\t\t\t\t\tdone = true\n\t\t\t\t\tbreak\n\n\t\t\t\tcase PollingStatusFailed:\n\t\t\t\t\tp.latestError = fmt.Errorf(\"internal-error: a polling status of `Failed` should be surfaced as a PollingFailedError\")\n\t\t\t\t\tdone = true\n\t\t\t\t\tbreak\n\n\t\t\t\tcase PollingStatusInProgress:\n\t\t\t\t\tcontinue\n\n\t\t\t\tcase PollingStatusSucceeded:\n\t\t\t\t\tdone = true\n\t\t\t\t\tbreak\n\n\t\t\t\tdefault:\n\t\t\t\t\tp.latestError = fmt.Errorf(\"internal-error: unimplemented polling status %q\", string(response.Status))\n\t\t\t\t\tdone = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tif done {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\twait.Done()\n\t}()\n\n\twaitDone := make(chan struct{}, 1)\n\tgo func() {\n\t\twait.Wait()\n\t\twaitDone <- struct{}{}\n\t}()\n\n\tselect {\n\tcase <-waitDone:\n\t\tbreak\n\tcase <-ctx.Done():\n\t\t{\n\t\t\tp.latestResponse = nil\n\t\t\tp.latestError = ctx.Err()\n\t\t\treturn p.latestError\n\t\t}\n\t}\n\n\tif p.latestError != nil {\n\t\tp.latestResponse = nil\n\t}\n\n\treturn p.latestError\n}", "func (l VirtualHubRouteTableV2SClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VirtualHubRouteTableV2SClientDeleteResponse, error) {\n\trespType := VirtualHubRouteTableV2SClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l StreamingEndpointsClientScalePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (StreamingEndpointsClientScaleResponse, error) {\n\trespType := StreamingEndpointsClientScaleResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VirtualHubsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VirtualHubsClientDeleteResponse, error) {\n\trespType := VirtualHubsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VirtualAppliancesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VirtualAppliancesClientDeleteResponse, error) {\n\trespType := VirtualAppliancesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l RoutesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (RoutesClientDeleteResponse, error) {\n\trespType := RoutesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientDeleteResponse, error) {\n\trespType := WatchersClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l PacketCapturesClientGetStatusPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (PacketCapturesClientGetStatusResponse, error) {\n\trespType := PacketCapturesClientGetStatusResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.PacketCaptureQueryStatusResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l AzureFirewallsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (AzureFirewallsClientCreateOrUpdateResponse, error) {\n\trespType := AzureFirewallsClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.AzureFirewall)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRouteCircuitsClientListRoutesTableSummaryPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRouteCircuitsClientListRoutesTableSummaryResponse, error) {\n\trespType := ExpressRouteCircuitsClientListRoutesTableSummaryResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ExpressRouteCircuitsRoutesTableSummaryListResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DeletedServersRecoverPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DeletedServersRecoverResponse, error) {\n\trespType := DeletedServersRecoverResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.DeletedServer)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRouteCircuitsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRouteCircuitsClientCreateOrUpdateResponse, error) {\n\trespType := ExpressRouteCircuitsClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ExpressRouteCircuit)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ConnectionMonitorsClientQueryPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ConnectionMonitorsClientQueryResponse, error) {\n\trespType := ConnectionMonitorsClientQueryResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ConnectionMonitorQueryResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VPNServerConfigurationsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VPNServerConfigurationsClientDeleteResponse, error) {\n\trespType := VPNServerConfigurationsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l NatGatewaysClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (NatGatewaysClientDeleteResponse, error) {\n\trespType := NatGatewaysClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l InboundNatRulesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (InboundNatRulesClientDeleteResponse, error) {\n\trespType := InboundNatRulesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l HubRouteTablesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (HubRouteTablesClientDeleteResponse, error) {\n\trespType := HubRouteTablesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VPNGatewaysClientStopPacketCapturePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VPNGatewaysClientStopPacketCaptureResponse, error) {\n\trespType := VPNGatewaysClientStopPacketCaptureResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Value)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VirtualNetworkRulesDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VirtualNetworkRulesDeleteResponse, error) {\n\trespType := VirtualNetworkRulesDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DscpConfigurationClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DscpConfigurationClientDeleteResponse, error) {\n\trespType := DscpConfigurationClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l BandwidthSchedulesClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (BandwidthSchedulesClientCreateOrUpdateResponse, error) {\n\trespType := BandwidthSchedulesClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.BandwidthSchedule)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRoutePortsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRoutePortsClientCreateOrUpdateResponse, error) {\n\trespType := ExpressRoutePortsClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ExpressRoutePort)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l P2SVPNGatewaysClientGetP2SVPNConnectionHealthDetailedPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (P2SVPNGatewaysClientGetP2SVPNConnectionHealthDetailedResponse, error) {\n\trespType := P2SVPNGatewaysClientGetP2SVPNConnectionHealthDetailedResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.P2SVPNConnectionHealth)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l LinkedServicesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (LinkedServicesClientDeleteResponse, error) {\n\trespType := LinkedServicesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.LinkedService)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VirtualNetworkGatewayConnectionsClientGetIkeSasPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VirtualNetworkGatewayConnectionsClientGetIkeSasResponse, error) {\n\trespType := VirtualNetworkGatewayConnectionsClientGetIkeSasResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Value)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l UsersClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (UsersClientDeleteResponse, error) {\n\trespType := UsersClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRouteGatewaysClientUpdateTagsPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRouteGatewaysClientUpdateTagsResponse, error) {\n\trespType := ExpressRouteGatewaysClientUpdateTagsResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ExpressRouteGateway)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}" ]
[ "0.62593186", "0.6195651", "0.61801505", "0.61666083", "0.61615384", "0.61615384", "0.61542344", "0.60752875", "0.6071086", "0.6070844", "0.6060712", "0.60435015", "0.6043004", "0.6030803", "0.6021779", "0.6008477", "0.5998286", "0.5988331", "0.5979664", "0.59588003", "0.5956121", "0.5947151", "0.5912396", "0.5898727", "0.5896794", "0.5880054", "0.58771765", "0.5874452", "0.5874452", "0.5854186", "0.5853251", "0.5840378", "0.5839102", "0.5827677", "0.58119726", "0.5809271", "0.58090353", "0.58074", "0.5792292", "0.5786232", "0.5783834", "0.5780077", "0.5778913", "0.57663983", "0.5761727", "0.576172", "0.5757367", "0.57549006", "0.5749928", "0.57388604", "0.5738559", "0.57367176", "0.5736436", "0.5731681", "0.5725722", "0.57253516", "0.5724978", "0.5723574", "0.57168174", "0.57153654", "0.57141024", "0.57100236", "0.57093906", "0.57074827", "0.5704071", "0.5704001", "0.5703067", "0.57024825", "0.56975996", "0.5690398", "0.5685622", "0.5684038", "0.5680911", "0.56804585", "0.5680206", "0.5679911", "0.5678592", "0.56702304", "0.5668656", "0.5666902", "0.56606305", "0.56578356", "0.5656016", "0.5647274", "0.5641193", "0.5640907", "0.563784", "0.5634117", "0.5629025", "0.5628453", "0.56278455", "0.5627458", "0.56239086", "0.5620916", "0.561948", "0.5618285", "0.5616452", "0.561405", "0.56136423", "0.56096095" ]
0.591079
23
Resume rehydrates a AlertsSimulatePollerResponse from the provided client and resume token.
func (l *AlertsSimulatePollerResponse) Resume(ctx context.Context, client *AlertsClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("AlertsClient.Simulate", token, client.pl, client.simulateHandleError) if err != nil { return err } poller := &AlertsSimulatePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (l *ApplicationGatewaysClientStartPollerResponse) Resume(ctx context.Context, client *ApplicationGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ApplicationGatewaysClient.Start\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ApplicationGatewaysClientStartPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *LiveEventsClientResetPollerResponse) Resume(ctx context.Context, client *LiveEventsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"LiveEventsClient.Reset\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &LiveEventsClientResetPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VPNGatewaysClientResetPollerResponse) Resume(ctx context.Context, client *VPNGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VPNGatewaysClient.Reset\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VPNGatewaysClientResetPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *TriggersClientStartPollerResponse) Resume(ctx context.Context, client *TriggersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"TriggersClient.Start\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &TriggersClientStartPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *LiveEventsClientStartPollerResponse) Resume(ctx context.Context, client *LiveEventsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"LiveEventsClient.Start\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &LiveEventsClientStartPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewaysClientResetPollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewaysClient.Reset\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewaysClientResetPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SQLPoolsClientResumePollerResponse) Resume(ctx context.Context, client *SQLPoolsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SQLPoolsClient.Resume\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SQLPoolsClientResumePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SharesClientRefreshPollerResponse) Resume(ctx context.Context, client *SharesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SharesClient.Refresh\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SharesClientRefreshPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *P2SVPNGatewaysClientResetPollerResponse) Resume(ctx context.Context, client *P2SVPNGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"P2SVPNGatewaysClient.Reset\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &P2SVPNGatewaysClientResetPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ContainersClientRefreshPollerResponse) Resume(ctx context.Context, client *ContainersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ContainersClient.Refresh\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ContainersClientRefreshPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagementClientGetActiveSessionsPollerResponse) Resume(ctx context.Context, client *ManagementClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagementClient.GetActiveSessions\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagementClientGetActiveSessionsPoller{\n\t\tpt: pt,\n\t\tclient: client,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewaysClientResetVPNClientSharedKeyPollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewaysClient.ResetVPNClientSharedKey\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewaysClientResetVPNClientSharedKeyPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *WorkspaceManagedSQLServerEncryptionProtectorClientRevalidatePollerResponse) Resume(ctx context.Context, client *WorkspaceManagedSQLServerEncryptionProtectorClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"WorkspaceManagedSQLServerEncryptionProtectorClient.Revalidate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &WorkspaceManagedSQLServerEncryptionProtectorClientRevalidatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *IntegrationRuntimeObjectMetadataClientRefreshPollerResponse) Resume(ctx context.Context, client *IntegrationRuntimeObjectMetadataClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"IntegrationRuntimeObjectMetadataClient.Refresh\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &IntegrationRuntimeObjectMetadataClientRefreshPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *IntegrationRuntimeObjectMetadataClientRefreshPollerResponse) Resume(ctx context.Context, client *IntegrationRuntimeObjectMetadataClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"IntegrationRuntimeObjectMetadataClient.Refresh\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &IntegrationRuntimeObjectMetadataClientRefreshPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *LiveEventsClientAllocatePollerResponse) Resume(ctx context.Context, client *LiveEventsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"LiveEventsClient.Allocate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &LiveEventsClientAllocatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *StreamingEndpointsClientStartPollerResponse) Resume(ctx context.Context, client *StreamingEndpointsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"StreamingEndpointsClient.Start\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &StreamingEndpointsClientStartPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagedInstancesUpdatePollerResponse) Resume(ctx context.Context, client *ManagedInstancesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagedInstancesClient.Update\", token, client.pl, client.updateHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagedInstancesUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagedInstanceEncryptionProtectorsRevalidatePollerResponse) Resume(ctx context.Context, client *ManagedInstanceEncryptionProtectorsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagedInstanceEncryptionProtectorsClient.Revalidate\", token, client.pl, client.revalidateHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagedInstanceEncryptionProtectorsRevalidatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *KustoPoolsClientStartPollerResponse) Resume(ctx context.Context, client *KustoPoolsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"KustoPoolsClient.Start\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &KustoPoolsClientStartPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SnapshotsClientUpdatePollerResponse) Resume(ctx context.Context, client *SnapshotsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SnapshotsClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SnapshotsClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *DeletedServersRecoverPollerResponse) Resume(ctx context.Context, client *DeletedServersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"DeletedServersClient.Recover\", token, client.pl, client.recoverHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &DeletedServersRecoverPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *WatchersClientVerifyIPFlowPollerResponse) Resume(ctx context.Context, client *WatchersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"WatchersClient.VerifyIPFlow\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &WatchersClientVerifyIPFlowPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ConnectionMonitorsClientStartPollerResponse) Resume(ctx context.Context, client *ConnectionMonitorsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ConnectionMonitorsClient.Start\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ConnectionMonitorsClientStartPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualRoutersClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *VirtualRoutersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualRoutersClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualRoutersClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualRouterPeeringsClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *VirtualRouterPeeringsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualRouterPeeringsClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualRouterPeeringsClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SyncAgentsCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *SyncAgentsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SyncAgentsClient.CreateOrUpdate\", token, client.pl, client.createOrUpdateHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SyncAgentsCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkTapsClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *VirtualNetworkTapsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkTapsClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkTapsClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *LiveEventsClientUpdatePollerResponse) Resume(ctx context.Context, client *LiveEventsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"LiveEventsClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &LiveEventsClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ApplicationGatewaysClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *ApplicationGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ApplicationGatewaysClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ApplicationGatewaysClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *IntegrationRuntimesClientStartPollerResponse) Resume(ctx context.Context, client *IntegrationRuntimesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"IntegrationRuntimesClient.Start\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &IntegrationRuntimesClientStartPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *IntegrationRuntimesClientStartPollerResponse) Resume(ctx context.Context, client *IntegrationRuntimesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"IntegrationRuntimesClient.Start\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &IntegrationRuntimesClientStartPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *RoutesClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *RoutesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"RoutesClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &RoutesClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VolumesClientAuthorizeReplicationPollerResponse) Resume(ctx context.Context, client *VolumesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VolumesClient.AuthorizeReplication\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VolumesClientAuthorizeReplicationPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ClientDeletePollerResponse) Resume(ctx context.Context, client *Client, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"Client.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ClustersClientUpdatePollerResponse) Resume(ctx context.Context, client *ClustersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ClustersClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ClustersClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ApplicationGatewaysClientDeletePollerResponse) Resume(ctx context.Context, client *ApplicationGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ApplicationGatewaysClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ApplicationGatewaysClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *BackupPoliciesClientUpdatePollerResponse) Resume(ctx context.Context, client *BackupPoliciesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"BackupPoliciesClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &BackupPoliciesClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *DataFlowDebugSessionClientCreatePollerResponse) Resume(ctx context.Context, client *DataFlowDebugSessionClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"DataFlowDebugSessionClient.Create\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &DataFlowDebugSessionClientCreatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SQLPoolVulnerabilityAssessmentScansClientInitiateScanPollerResponse) Resume(ctx context.Context, client *SQLPoolVulnerabilityAssessmentScansClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SQLPoolVulnerabilityAssessmentScansClient.InitiateScan\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SQLPoolVulnerabilityAssessmentScansClientInitiateScanPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *IPAllocationsClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *IPAllocationsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"IPAllocationsClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &IPAllocationsClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *EnergyServicesClientCreatePollerResponse) Resume(ctx context.Context, client *EnergyServicesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"EnergyServicesClient.Create\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &EnergyServicesClientCreatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SnapshotPoliciesClientUpdatePollerResponse) Resume(ctx context.Context, client *SnapshotPoliciesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SnapshotPoliciesClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SnapshotPoliciesClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *RunsCancelPollerResponse) Resume(ctx context.Context, client *RunsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"RunsClient.Cancel\", token, client.pl, client.cancelHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &RunsCancelPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *IPAllocationsClientDeletePollerResponse) Resume(ctx context.Context, client *IPAllocationsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"IPAllocationsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &IPAllocationsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *OrdersClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *OrdersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"OrdersClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &OrdersClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ServiceEndpointPoliciesClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *ServiceEndpointPoliciesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ServiceEndpointPoliciesClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ServiceEndpointPoliciesClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagedInstancesDeletePollerResponse) Resume(ctx context.Context, client *ManagedInstancesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagedInstancesClient.Delete\", token, client.pl, client.deleteHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagedInstancesDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *EncryptionProtectorsRevalidatePollerResponse) Resume(ctx context.Context, client *EncryptionProtectorsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"EncryptionProtectorsClient.Revalidate\", token, client.pl, client.revalidateHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &EncryptionProtectorsRevalidatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SnapshotsClientCreatePollerResponse) Resume(ctx context.Context, client *SnapshotsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SnapshotsClient.Create\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SnapshotsClientCreatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *RoutesClientDeletePollerResponse) Resume(ctx context.Context, client *RoutesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"RoutesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &RoutesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *AccountsClientUpdatePollerResponse) Resume(ctx context.Context, client *AccountsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"AccountsClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &AccountsClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VPNGatewaysClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *VPNGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VPNGatewaysClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VPNGatewaysClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagedDatabaseVulnerabilityAssessmentScansInitiateScanPollerResponse) Resume(ctx context.Context, client *ManagedDatabaseVulnerabilityAssessmentScansClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagedDatabaseVulnerabilityAssessmentScansClient.InitiateScan\", token, client.pl, client.initiateScanHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagedDatabaseVulnerabilityAssessmentScansInitiateScanPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SyncAgentsDeletePollerResponse) Resume(ctx context.Context, client *SyncAgentsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SyncAgentsClient.Delete\", token, client.pl, client.deleteHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SyncAgentsDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SnapshotsClientDeletePollerResponse) Resume(ctx context.Context, client *SnapshotsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SnapshotsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SnapshotsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *MonitoringConfigClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *MonitoringConfigClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"MonitoringConfigClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &MonitoringConfigClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *TriggersClientDeletePollerResponse) Resume(ctx context.Context, client *TriggersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"TriggersClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &TriggersClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *WorkspaceManagedSQLServerSecurityAlertPolicyClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *WorkspaceManagedSQLServerSecurityAlertPolicyClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"WorkspaceManagedSQLServerSecurityAlertPolicyClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &WorkspaceManagedSQLServerSecurityAlertPolicyClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VPNServerConfigurationsAssociatedWithVirtualWanClientListPollerResponse) Resume(ctx context.Context, client *VPNServerConfigurationsAssociatedWithVirtualWanClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VPNServerConfigurationsAssociatedWithVirtualWanClient.List\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VPNServerConfigurationsAssociatedWithVirtualWanClientListPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ApplicationSecurityGroupsClientDeletePollerResponse) Resume(ctx context.Context, client *ApplicationSecurityGroupsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ApplicationSecurityGroupsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ApplicationSecurityGroupsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *Client, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"Client.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *LiveEventsClientCreatePollerResponse) Resume(ctx context.Context, client *LiveEventsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"LiveEventsClient.Create\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &LiveEventsClientCreatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagedInstanceAzureADOnlyAuthenticationsCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *ManagedInstanceAzureADOnlyAuthenticationsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagedInstanceAzureADOnlyAuthenticationsClient.CreateOrUpdate\", token, client.pl, client.createOrUpdateHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagedInstanceAzureADOnlyAuthenticationsCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *PolicyStatesClientTriggerSubscriptionEvaluationPollerResponse) Resume(ctx context.Context, client *PolicyStatesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"PolicyStatesClient.TriggerSubscriptionEvaluation\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &PolicyStatesClientTriggerSubscriptionEvaluationPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *TriggersClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *TriggersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"TriggersClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &TriggersClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagedInstancesFailoverPollerResponse) Resume(ctx context.Context, client *ManagedInstancesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagedInstancesClient.Failover\", token, client.pl, client.failoverHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagedInstancesFailoverPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *LiveEventsClientDeletePollerResponse) Resume(ctx context.Context, client *LiveEventsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"LiveEventsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &LiveEventsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *RolesClientDeletePollerResponse) Resume(ctx context.Context, client *RolesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"RolesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &RolesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagedInstancesCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *ManagedInstancesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagedInstancesClient.CreateOrUpdate\", token, client.pl, client.createOrUpdateHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagedInstancesCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *PoolsClientUpdatePollerResponse) Resume(ctx context.Context, client *PoolsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"PoolsClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &PoolsClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *TablesClientUpdatePollerResponse) Resume(ctx context.Context, client *TablesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"TablesClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &TablesClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *RouteFiltersClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *RouteFiltersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"RouteFiltersClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &RouteFiltersClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *EnergyServicesClientDeletePollerResponse) Resume(ctx context.Context, client *EnergyServicesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"EnergyServicesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &EnergyServicesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *OrdersClientDeletePollerResponse) Resume(ctx context.Context, client *OrdersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"OrdersClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &OrdersClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewayConnectionsClientResetSharedKeyPollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewayConnectionsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewayConnectionsClient.ResetSharedKey\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewayConnectionsClientResetSharedKeyPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ApplicationsDeletePollerResponse) Resume(ctx context.Context, client *ApplicationsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ApplicationsClient.Delete\", token, client.pl, client.deleteHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ApplicationsDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *DatabaseVulnerabilityAssessmentScansInitiateScanPollerResponse) Resume(ctx context.Context, client *DatabaseVulnerabilityAssessmentScansClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"DatabaseVulnerabilityAssessmentScansClient.InitiateScan\", token, client.pl, client.initiateScanHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &DatabaseVulnerabilityAssessmentScansInitiateScanPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualWansClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *VirtualWansClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualWansClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualWansClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *PolicyStatesClientTriggerResourceGroupEvaluationPollerResponse) Resume(ctx context.Context, client *PolicyStatesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"PolicyStatesClient.TriggerResourceGroupEvaluation\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &PolicyStatesClientTriggerResourceGroupEvaluationPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *AdaptiveNetworkHardeningsEnforcePollerResponse) Resume(ctx context.Context, client *AdaptiveNetworkHardeningsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"AdaptiveNetworkHardeningsClient.Enforce\", token, client.pl, client.enforceHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &AdaptiveNetworkHardeningsEnforcePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ExpressRouteGatewaysClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *ExpressRouteGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ExpressRouteGatewaysClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ExpressRouteGatewaysClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *RoutingIntentClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *RoutingIntentClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"RoutingIntentClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &RoutingIntentClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualRoutersClientDeletePollerResponse) Resume(ctx context.Context, client *VirtualRoutersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualRoutersClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualRoutersClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VolumesClientReInitializeReplicationPollerResponse) Resume(ctx context.Context, client *VolumesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VolumesClient.ReInitializeReplication\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VolumesClientReInitializeReplicationPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ApplicationGatewayPrivateEndpointConnectionsClientUpdatePollerResponse) Resume(ctx context.Context, client *ApplicationGatewayPrivateEndpointConnectionsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ApplicationGatewayPrivateEndpointConnectionsClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ApplicationGatewayPrivateEndpointConnectionsClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *PrivateZonesClientUpdatePollerResponse) Resume(ctx context.Context, client *PrivateZonesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"PrivateZonesClient.Update\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &PrivateZonesClientUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ServiceEndpointPoliciesClientDeletePollerResponse) Resume(ctx context.Context, client *ServiceEndpointPoliciesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ServiceEndpointPoliciesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ServiceEndpointPoliciesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ApplicationGatewaysClientBackendHealthOnDemandPollerResponse) Resume(ctx context.Context, client *ApplicationGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ApplicationGatewaysClient.BackendHealthOnDemand\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ApplicationGatewaysClientBackendHealthOnDemandPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *HubRouteTablesClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *HubRouteTablesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"HubRouteTablesClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &HubRouteTablesClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagedInstanceKeysCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *ManagedInstanceKeysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagedInstanceKeysClient.CreateOrUpdate\", token, client.pl, client.createOrUpdateHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagedInstanceKeysCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewaysClientGeneratevpnclientpackagePollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewaysClient.Generatevpnclientpackage\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewaysClientGeneratevpnclientpackagePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *PrivateEndpointsClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *PrivateEndpointsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"PrivateEndpointsClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &PrivateEndpointsClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *RouteFiltersClientDeletePollerResponse) Resume(ctx context.Context, client *RouteFiltersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"RouteFiltersClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &RouteFiltersClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *DataFlowDebugSessionClientExecuteCommandPollerResponse) Resume(ctx context.Context, client *DataFlowDebugSessionClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"DataFlowDebugSessionClient.ExecuteCommand\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &DataFlowDebugSessionClientExecuteCommandPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualApplianceSitesClientDeletePollerResponse) Resume(ctx context.Context, client *VirtualApplianceSitesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualApplianceSitesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualApplianceSitesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *RouteTablesClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *RouteTablesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"RouteTablesClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &RouteTablesClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ApplicationGatewaysClientStopPollerResponse) Resume(ctx context.Context, client *ApplicationGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ApplicationGatewaysClient.Stop\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ApplicationGatewaysClientStopPoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *FlowLogsClientCreateOrUpdatePollerResponse) Resume(ctx context.Context, client *FlowLogsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"FlowLogsClient.CreateOrUpdate\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &FlowLogsClientCreateOrUpdatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *AzureADOnlyAuthenticationsClientCreatePollerResponse) Resume(ctx context.Context, client *AzureADOnlyAuthenticationsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"AzureADOnlyAuthenticationsClient.Create\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &AzureADOnlyAuthenticationsClientCreatePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}" ]
[ "0.7727089", "0.7702642", "0.7667797", "0.76061624", "0.7586481", "0.7535351", "0.75290906", "0.750984", "0.74973065", "0.7481278", "0.74666893", "0.7465214", "0.7454607", "0.74507034", "0.74507034", "0.7420852", "0.7406041", "0.7398732", "0.7377792", "0.73760927", "0.73669505", "0.7365932", "0.73611665", "0.7353831", "0.7349908", "0.73481625", "0.7347849", "0.73475283", "0.7339891", "0.7338281", "0.73319584", "0.73319584", "0.7331248", "0.73312086", "0.7327746", "0.7313939", "0.73131937", "0.7312059", "0.7311117", "0.7305972", "0.730366", "0.73036575", "0.73031265", "0.72919565", "0.7290244", "0.72846776", "0.7284393", "0.7279347", "0.72776926", "0.7276688", "0.7275678", "0.7274929", "0.7274491", "0.72730654", "0.7271443", "0.727042", "0.7268341", "0.72645897", "0.72620237", "0.7260719", "0.725889", "0.72584033", "0.7255951", "0.7253855", "0.72521144", "0.7251514", "0.725043", "0.7250105", "0.7248559", "0.7247546", "0.72471136", "0.7245623", "0.724396", "0.72431034", "0.7240522", "0.7239583", "0.72392166", "0.7238416", "0.72372", "0.7236461", "0.7235021", "0.7234587", "0.7231598", "0.7225468", "0.7225186", "0.7224894", "0.7223105", "0.72226924", "0.72218275", "0.722035", "0.7217356", "0.72140807", "0.72128946", "0.7211713", "0.7209302", "0.72030395", "0.7200972", "0.7200604", "0.72005147", "0.7200056" ]
0.7973928
0
PollUntilDone will poll the service endpoint until a terminal state is reached or an error is received. freq: the time to wait between intervals in absence of a RetryAfter header. Allowed minimum is one second. A good starting value is 30 seconds. Note that some resources might benefit from a different value.
func (l ServerVulnerabilityAssessmentDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ServerVulnerabilityAssessmentDeleteResponse, error) { respType := ServerVulnerabilityAssessmentDeleteResponse{} resp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil) if err != nil { return respType, err } respType.RawResponse = resp return respType, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (l WatchersClientGetTroubleshootingPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientGetTroubleshootingResponse, error) {\n\trespType := WatchersClientGetTroubleshootingResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.TroubleshootingResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientGetFlowLogStatusPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientGetFlowLogStatusResponse, error) {\n\trespType := WatchersClientGetFlowLogStatusResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.FlowLogInformation)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ApplicationGatewaysClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ApplicationGatewaysClientStopResponse, error) {\n\trespType := ApplicationGatewaysClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ConnectionMonitorsClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ConnectionMonitorsClientStopResponse, error) {\n\trespType := ConnectionMonitorsClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IntegrationRuntimesClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IntegrationRuntimesClientStopResponse, error) {\n\trespType := IntegrationRuntimesClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IntegrationRuntimesClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IntegrationRuntimesClientStopResponse, error) {\n\trespType := IntegrationRuntimesClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientGetNextHopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientGetNextHopResponse, error) {\n\trespType := WatchersClientGetNextHopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.NextHopResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TokensDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TokensDeleteResponse, error) {\n\trespType := TokensDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientGetTroubleshootingResultPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientGetTroubleshootingResultResponse, error) {\n\trespType := WatchersClientGetTroubleshootingResultResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.TroubleshootingResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l LiveEventsClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (LiveEventsClientStopResponse, error) {\n\trespType := LiveEventsClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DevicesClientScanForUpdatesPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DevicesClientScanForUpdatesResponse, error) {\n\trespType := DevicesClientScanForUpdatesResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l PacketCapturesClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (PacketCapturesClientStopResponse, error) {\n\trespType := PacketCapturesClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l StreamingEndpointsClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (StreamingEndpointsClientStopResponse, error) {\n\trespType := StreamingEndpointsClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ConnectionMonitorsClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ConnectionMonitorsClientStartResponse, error) {\n\trespType := ConnectionMonitorsClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TriggersClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TriggersClientStopResponse, error) {\n\trespType := TriggersClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l BandwidthSchedulesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (BandwidthSchedulesClientDeleteResponse, error) {\n\trespType := BandwidthSchedulesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ApplicationGatewaysClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ApplicationGatewaysClientStartResponse, error) {\n\trespType := ApplicationGatewaysClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientSetFlowLogConfigurationPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientSetFlowLogConfigurationResponse, error) {\n\trespType := WatchersClientSetFlowLogConfigurationResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.FlowLogInformation)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TokensUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TokensUpdateResponse, error) {\n\trespType := TokensUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Token)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l OutboundFirewallRulesDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (OutboundFirewallRulesDeleteResponse, error) {\n\trespType := OutboundFirewallRulesDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l KustoPoolsClientStopPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (KustoPoolsClientStopResponse, error) {\n\trespType := KustoPoolsClientStopResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l AzureFirewallsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (AzureFirewallsClientDeleteResponse, error) {\n\trespType := AzureFirewallsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DevicesClientInstallUpdatesPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DevicesClientInstallUpdatesResponse, error) {\n\trespType := DevicesClientInstallUpdatesResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l AlertsSimulatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (AlertsSimulateResponse, error) {\n\trespType := AlertsSimulateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l EnergyServicesClientCreatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (EnergyServicesClientCreateResponse, error) {\n\trespType := EnergyServicesClientCreateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.EnergyService)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l RouteFilterRulesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (RouteFilterRulesClientDeleteResponse, error) {\n\trespType := RouteFilterRulesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientGetNetworkConfigurationDiagnosticPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientGetNetworkConfigurationDiagnosticResponse, error) {\n\trespType := WatchersClientGetNetworkConfigurationDiagnosticResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ConfigurationDiagnosticResponse)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DevicesClientDownloadUpdatesPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DevicesClientDownloadUpdatesResponse, error) {\n\trespType := DevicesClientDownloadUpdatesResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IntegrationRuntimesClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IntegrationRuntimesClientStartResponse, error) {\n\trespType := IntegrationRuntimesClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.IntegrationRuntimeStatusResponse)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IntegrationRuntimesClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IntegrationRuntimesClientStartResponse, error) {\n\trespType := IntegrationRuntimesClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.IntegrationRuntimeStatusResponse)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l StreamingEndpointsClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (StreamingEndpointsClientStartResponse, error) {\n\trespType := StreamingEndpointsClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l PolicyStatesClientTriggerSubscriptionEvaluationPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (PolicyStatesClientTriggerSubscriptionEvaluationResponse, error) {\n\trespType := PolicyStatesClientTriggerSubscriptionEvaluationResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRoutePortsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRoutePortsClientDeleteResponse, error) {\n\trespType := ExpressRoutePortsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l LiveEventsClientResetPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (LiveEventsClientResetResponse, error) {\n\trespType := LiveEventsClientResetResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l RunsCancelPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (RunsCancelResponse, error) {\n\trespType := RunsCancelResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l InterfacesClientGetEffectiveRouteTablePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (InterfacesClientGetEffectiveRouteTableResponse, error) {\n\trespType := InterfacesClientGetEffectiveRouteTableResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.EffectiveRouteListResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IPFirewallRulesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IPFirewallRulesClientDeleteResponse, error) {\n\trespType := IPFirewallRulesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Object)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l LiveEventsClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (LiveEventsClientStartResponse, error) {\n\trespType := LiveEventsClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TablesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TablesClientDeleteResponse, error) {\n\trespType := TablesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l EnergyServicesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (EnergyServicesClientDeleteResponse, error) {\n\trespType := EnergyServicesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l FlowLogsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (FlowLogsClientDeleteResponse, error) {\n\trespType := FlowLogsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l SharesClientRefreshPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (SharesClientRefreshResponse, error) {\n\trespType := SharesClientRefreshResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TriggersClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TriggersClientStartResponse, error) {\n\trespType := TriggersClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l RouteTablesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (RouteTablesClientDeleteResponse, error) {\n\trespType := RouteTablesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VPNGatewaysClientResetPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VPNGatewaysClientResetResponse, error) {\n\trespType := VPNGatewaysClientResetResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.VPNGateway)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l RouteFiltersClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (RouteFiltersClientDeleteResponse, error) {\n\trespType := RouteFiltersClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l FirewallPoliciesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (FirewallPoliciesClientDeleteResponse, error) {\n\trespType := FirewallPoliciesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l RouteFilterRulesClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (RouteFilterRulesClientCreateOrUpdateResponse, error) {\n\trespType := RouteFilterRulesClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.RouteFilterRule)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l KustoPoolsClientStartPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (KustoPoolsClientStartResponse, error) {\n\trespType := KustoPoolsClientStartResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l P2SVPNGatewaysClientResetPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (P2SVPNGatewaysClientResetResponse, error) {\n\trespType := P2SVPNGatewaysClientResetResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.P2SVPNGateway)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VPNSitesConfigurationClientDownloadPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VPNSitesConfigurationClientDownloadResponse, error) {\n\trespType := VPNSitesConfigurationClientDownloadResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l AccountsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (AccountsClientDeleteResponse, error) {\n\trespType := AccountsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRouteGatewaysClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRouteGatewaysClientDeleteResponse, error) {\n\trespType := ExpressRouteGatewaysClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WebApplicationFirewallPoliciesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WebApplicationFirewallPoliciesClientDeleteResponse, error) {\n\trespType := WebApplicationFirewallPoliciesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IntegrationRuntimesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IntegrationRuntimesClientDeleteResponse, error) {\n\trespType := IntegrationRuntimesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DatabaseVulnerabilityAssessmentScansInitiateScanPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DatabaseVulnerabilityAssessmentScansInitiateScanResponse, error) {\n\trespType := DatabaseVulnerabilityAssessmentScansInitiateScanResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DevicesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DevicesClientDeleteResponse, error) {\n\trespType := DevicesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientGetAzureReachabilityReportPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientGetAzureReachabilityReportResponse, error) {\n\trespType := WatchersClientGetAzureReachabilityReportResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.AzureReachabilityReport)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TokensCreatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TokensCreateResponse, error) {\n\trespType := TokensCreateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Token)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRouteCircuitsClientListRoutesTablePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRouteCircuitsClientListRoutesTableResponse, error) {\n\trespType := ExpressRouteCircuitsClientListRoutesTableResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ExpressRouteCircuitsRoutesTableListResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ServersDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ServersDeleteResponse, error) {\n\trespType := ServersDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l SecurityRulesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (SecurityRulesClientDeleteResponse, error) {\n\trespType := SecurityRulesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l InterfaceTapConfigurationsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (InterfaceTapConfigurationsClientDeleteResponse, error) {\n\trespType := InterfaceTapConfigurationsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l NatRulesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (NatRulesClientDeleteResponse, error) {\n\trespType := NatRulesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TasksDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TasksDeleteResponse, error) {\n\trespType := TasksDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ServerKeysDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ServerKeysDeleteResponse, error) {\n\trespType := ServerKeysDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WebhooksDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WebhooksDeleteResponse, error) {\n\trespType := WebhooksDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRouteCircuitsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRouteCircuitsClientDeleteResponse, error) {\n\trespType := ExpressRouteCircuitsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l TablesClientUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (TablesClientUpdateResponse, error) {\n\trespType := TablesClientUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Table)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l OrdersClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (OrdersClientDeleteResponse, error) {\n\trespType := OrdersClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ServiceEndpointPoliciesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ServiceEndpointPoliciesClientDeleteResponse, error) {\n\trespType := ServiceEndpointPoliciesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l FlowLogsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (FlowLogsClientCreateOrUpdateResponse, error) {\n\trespType := FlowLogsClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.FlowLog)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l IPFirewallRulesClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (IPFirewallRulesClientCreateOrUpdateResponse, error) {\n\trespType := IPFirewallRulesClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.IPFirewallRuleInfo)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ServiceUnitsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ServiceUnitsClientCreateOrUpdateResponse, error) {\n\trespType := ServiceUnitsClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ServiceUnitResource)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (p *Poller) PollUntilDone(ctx context.Context) error {\n\tif p.poller == nil {\n\t\treturn fmt.Errorf(\"internal-error: `poller` was nil`\")\n\t}\n\tif _, ok := ctx.Deadline(); !ok {\n\t\treturn fmt.Errorf(\"internal-error: `ctx` should have a deadline\")\n\t}\n\n\tvar wait sync.WaitGroup\n\twait.Add(1)\n\n\tgo func() {\n\t\tconnectionDropCounter := 0\n\t\tretryDuration := p.initialDelayDuration\n\t\tfor true {\n\t\t\t// determine the next retry duration / how long to poll for\n\t\t\tif p.latestResponse != nil {\n\t\t\t\tretryDuration = p.latestResponse.PollInterval\n\t\t\t}\n\t\t\tendTime := time.Now().Add(retryDuration)\n\t\t\tselect {\n\t\t\tcase <-time.After(time.Until(endTime)):\n\t\t\t\t{\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tp.latestResponse, p.latestError = p.poller.Poll(ctx)\n\n\t\t\t// first check the connection drop status\n\t\t\tconnectionHasBeenDropped := false\n\t\t\tif p.latestResponse == nil && p.latestError == nil {\n\t\t\t\t// connection drops can either have no response/error (where we have no context)\n\t\t\t\tconnectionHasBeenDropped = true\n\t\t\t} else if _, ok := p.latestError.(PollingDroppedConnectionError); ok {\n\t\t\t\t// or have an error with more details (e.g. server not found, connection reset etc)\n\t\t\t\tconnectionHasBeenDropped = true\n\t\t\t}\n\t\t\tif connectionHasBeenDropped {\n\t\t\t\tconnectionDropCounter++\n\t\t\t\tif connectionDropCounter < p.maxNumberOfDroppedConnections {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif p.latestResponse == nil && p.latestError == nil {\n\t\t\t\t\t// the connection was dropped, but we have no context\n\t\t\t\t\tp.latestError = PollingDroppedConnectionError{}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tconnectionDropCounter = 0\n\t\t\t}\n\n\t\t\tif p.latestError != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif response := p.latestResponse; response != nil {\n\t\t\t\tretryDuration = response.PollInterval\n\n\t\t\t\tdone := false\n\t\t\t\tswitch response.Status {\n\t\t\t\t// Cancelled, Dropped Connections and Failed should be raised as errors containing additional info if available\n\n\t\t\t\tcase PollingStatusCancelled:\n\t\t\t\t\tp.latestError = fmt.Errorf(\"internal-error: a polling status of `Cancelled` should be surfaced as a PollingCancelledError\")\n\t\t\t\t\tdone = true\n\t\t\t\t\tbreak\n\n\t\t\t\tcase PollingStatusFailed:\n\t\t\t\t\tp.latestError = fmt.Errorf(\"internal-error: a polling status of `Failed` should be surfaced as a PollingFailedError\")\n\t\t\t\t\tdone = true\n\t\t\t\t\tbreak\n\n\t\t\t\tcase PollingStatusInProgress:\n\t\t\t\t\tcontinue\n\n\t\t\t\tcase PollingStatusSucceeded:\n\t\t\t\t\tdone = true\n\t\t\t\t\tbreak\n\n\t\t\t\tdefault:\n\t\t\t\t\tp.latestError = fmt.Errorf(\"internal-error: unimplemented polling status %q\", string(response.Status))\n\t\t\t\t\tdone = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tif done {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\twait.Done()\n\t}()\n\n\twaitDone := make(chan struct{}, 1)\n\tgo func() {\n\t\twait.Wait()\n\t\twaitDone <- struct{}{}\n\t}()\n\n\tselect {\n\tcase <-waitDone:\n\t\tbreak\n\tcase <-ctx.Done():\n\t\t{\n\t\t\tp.latestResponse = nil\n\t\t\tp.latestError = ctx.Err()\n\t\t\treturn p.latestError\n\t\t}\n\t}\n\n\tif p.latestError != nil {\n\t\tp.latestResponse = nil\n\t}\n\n\treturn p.latestError\n}", "func (l StreamingEndpointsClientScalePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (StreamingEndpointsClientScaleResponse, error) {\n\trespType := StreamingEndpointsClientScaleResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VirtualHubRouteTableV2SClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VirtualHubRouteTableV2SClientDeleteResponse, error) {\n\trespType := VirtualHubRouteTableV2SClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VirtualHubsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VirtualHubsClientDeleteResponse, error) {\n\trespType := VirtualHubsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VirtualAppliancesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VirtualAppliancesClientDeleteResponse, error) {\n\trespType := VirtualAppliancesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l RoutesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (RoutesClientDeleteResponse, error) {\n\trespType := RoutesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l WatchersClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (WatchersClientDeleteResponse, error) {\n\trespType := WatchersClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l PacketCapturesClientGetStatusPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (PacketCapturesClientGetStatusResponse, error) {\n\trespType := PacketCapturesClientGetStatusResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.PacketCaptureQueryStatusResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l AzureFirewallsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (AzureFirewallsClientCreateOrUpdateResponse, error) {\n\trespType := AzureFirewallsClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.AzureFirewall)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRouteCircuitsClientListRoutesTableSummaryPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRouteCircuitsClientListRoutesTableSummaryResponse, error) {\n\trespType := ExpressRouteCircuitsClientListRoutesTableSummaryResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ExpressRouteCircuitsRoutesTableSummaryListResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DeletedServersRecoverPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DeletedServersRecoverResponse, error) {\n\trespType := DeletedServersRecoverResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.DeletedServer)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ConnectionMonitorsClientQueryPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ConnectionMonitorsClientQueryResponse, error) {\n\trespType := ConnectionMonitorsClientQueryResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ConnectionMonitorQueryResult)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRouteCircuitsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRouteCircuitsClientCreateOrUpdateResponse, error) {\n\trespType := ExpressRouteCircuitsClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ExpressRouteCircuit)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VPNServerConfigurationsClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VPNServerConfigurationsClientDeleteResponse, error) {\n\trespType := VPNServerConfigurationsClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l NatGatewaysClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (NatGatewaysClientDeleteResponse, error) {\n\trespType := NatGatewaysClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l InboundNatRulesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (InboundNatRulesClientDeleteResponse, error) {\n\trespType := InboundNatRulesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l HubRouteTablesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (HubRouteTablesClientDeleteResponse, error) {\n\trespType := HubRouteTablesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VPNGatewaysClientStopPacketCapturePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VPNGatewaysClientStopPacketCaptureResponse, error) {\n\trespType := VPNGatewaysClientStopPacketCaptureResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Value)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VirtualNetworkRulesDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VirtualNetworkRulesDeleteResponse, error) {\n\trespType := VirtualNetworkRulesDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l DscpConfigurationClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (DscpConfigurationClientDeleteResponse, error) {\n\trespType := DscpConfigurationClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l BandwidthSchedulesClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (BandwidthSchedulesClientCreateOrUpdateResponse, error) {\n\trespType := BandwidthSchedulesClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.BandwidthSchedule)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRoutePortsClientCreateOrUpdatePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRoutePortsClientCreateOrUpdateResponse, error) {\n\trespType := ExpressRoutePortsClientCreateOrUpdateResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ExpressRoutePort)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l P2SVPNGatewaysClientGetP2SVPNConnectionHealthDetailedPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (P2SVPNGatewaysClientGetP2SVPNConnectionHealthDetailedResponse, error) {\n\trespType := P2SVPNGatewaysClientGetP2SVPNConnectionHealthDetailedResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.P2SVPNConnectionHealth)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l LinkedServicesClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (LinkedServicesClientDeleteResponse, error) {\n\trespType := LinkedServicesClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.LinkedService)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l VirtualNetworkGatewayConnectionsClientGetIkeSasPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (VirtualNetworkGatewayConnectionsClientGetIkeSasResponse, error) {\n\trespType := VirtualNetworkGatewayConnectionsClientGetIkeSasResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.Value)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l UsersClientDeletePollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (UsersClientDeleteResponse, error) {\n\trespType := UsersClientDeleteResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, nil)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}", "func (l ExpressRouteGatewaysClientUpdateTagsPollerResponse) PollUntilDone(ctx context.Context, freq time.Duration) (ExpressRouteGatewaysClientUpdateTagsResponse, error) {\n\trespType := ExpressRouteGatewaysClientUpdateTagsResponse{}\n\tresp, err := l.Poller.pt.PollUntilDone(ctx, freq, &respType.ExpressRouteGateway)\n\tif err != nil {\n\t\treturn respType, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}" ]
[ "0.6259292", "0.6195853", "0.61802083", "0.6166713", "0.61609143", "0.61609143", "0.6154409", "0.6075394", "0.60710424", "0.60707647", "0.60615396", "0.6043415", "0.60430884", "0.60313463", "0.6021775", "0.6007862", "0.5998663", "0.59888476", "0.5980255", "0.595896", "0.5956378", "0.594661", "0.59132135", "0.5910213", "0.5899073", "0.5896752", "0.5880312", "0.58777475", "0.58739704", "0.58739704", "0.5854519", "0.5853649", "0.5840586", "0.583973", "0.58278793", "0.58115023", "0.5809201", "0.5809191", "0.58072776", "0.57925636", "0.5786058", "0.57844967", "0.5780295", "0.57786757", "0.57670593", "0.5761527", "0.576144", "0.5757725", "0.5755513", "0.5750583", "0.57389987", "0.5738375", "0.5736762", "0.57362086", "0.5731007", "0.57260036", "0.5725031", "0.5724962", "0.5723625", "0.57170564", "0.57154214", "0.57139236", "0.5710196", "0.570934", "0.5707234", "0.57039857", "0.570383", "0.5703449", "0.5703015", "0.56978655", "0.56903607", "0.56855464", "0.56841564", "0.5681204", "0.56810015", "0.5680053", "0.5679557", "0.56780916", "0.56697315", "0.56684875", "0.5666967", "0.56610644", "0.5657614", "0.56560224", "0.5647862", "0.5641831", "0.56418073", "0.5638144", "0.56339353", "0.5629027", "0.5628087", "0.5627663", "0.5627321", "0.56243616", "0.56203306", "0.561998", "0.5618211", "0.5616647", "0.5614523", "0.5613799", "0.5609946" ]
0.0
-1
Resume rehydrates a ServerVulnerabilityAssessmentDeletePollerResponse from the provided client and resume token.
func (l *ServerVulnerabilityAssessmentDeletePollerResponse) Resume(ctx context.Context, client *ServerVulnerabilityAssessmentClient, token string) error { pt, err := armruntime.NewPollerFromResumeToken("ServerVulnerabilityAssessmentClient.Delete", token, client.pl, client.deleteHandleError) if err != nil { return err } poller := &ServerVulnerabilityAssessmentDeletePoller{ pt: pt, } resp, err := poller.Poll(ctx) if err != nil { return err } l.Poller = poller l.RawResponse = resp return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (l *VPNGatewaysClientDeletePollerResponse) Resume(ctx context.Context, client *VPNGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VPNGatewaysClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VPNGatewaysClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualApplianceSitesClientDeletePollerResponse) Resume(ctx context.Context, client *VirtualApplianceSitesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualApplianceSitesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualApplianceSitesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualRouterPeeringsClientDeletePollerResponse) Resume(ctx context.Context, client *VirtualRouterPeeringsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualRouterPeeringsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualRouterPeeringsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *P2SVPNGatewaysClientDeletePollerResponse) Resume(ctx context.Context, client *P2SVPNGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"P2SVPNGatewaysClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &P2SVPNGatewaysClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagedInstanceAzureADOnlyAuthenticationsDeletePollerResponse) Resume(ctx context.Context, client *ManagedInstanceAzureADOnlyAuthenticationsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagedInstanceAzureADOnlyAuthenticationsClient.Delete\", token, client.pl, client.deleteHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagedInstanceAzureADOnlyAuthenticationsDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkTapsClientDeletePollerResponse) Resume(ctx context.Context, client *VirtualNetworkTapsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkTapsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkTapsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagedInstancesDeletePollerResponse) Resume(ctx context.Context, client *ManagedInstancesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagedInstancesClient.Delete\", token, client.pl, client.deleteHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagedInstancesDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *IPAllocationsClientDeletePollerResponse) Resume(ctx context.Context, client *IPAllocationsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"IPAllocationsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &IPAllocationsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualAppliancesClientDeletePollerResponse) Resume(ctx context.Context, client *VirtualAppliancesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualAppliancesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualAppliancesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VPNServerConfigurationsClientDeletePollerResponse) Resume(ctx context.Context, client *VPNServerConfigurationsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VPNServerConfigurationsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VPNServerConfigurationsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *LiveEventsClientDeletePollerResponse) Resume(ctx context.Context, client *LiveEventsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"LiveEventsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &LiveEventsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ServerAzureADOnlyAuthenticationsDeletePollerResponse) Resume(ctx context.Context, client *ServerAzureADOnlyAuthenticationsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ServerAzureADOnlyAuthenticationsClient.Delete\", token, client.pl, client.deleteHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ServerAzureADOnlyAuthenticationsDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ApplicationSecurityGroupsClientDeletePollerResponse) Resume(ctx context.Context, client *ApplicationSecurityGroupsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ApplicationSecurityGroupsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ApplicationSecurityGroupsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualRoutersClientDeletePollerResponse) Resume(ctx context.Context, client *VirtualRoutersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualRoutersClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualRoutersClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualWansClientDeletePollerResponse) Resume(ctx context.Context, client *VirtualWansClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualWansClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualWansClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ServerDNSAliasesDeletePollerResponse) Resume(ctx context.Context, client *ServerDNSAliasesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ServerDNSAliasesClient.Delete\", token, client.pl, client.deleteHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ServerDNSAliasesDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SnapshotPoliciesClientDeletePollerResponse) Resume(ctx context.Context, client *SnapshotPoliciesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SnapshotPoliciesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SnapshotPoliciesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *PacketCapturesClientDeletePollerResponse) Resume(ctx context.Context, client *PacketCapturesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"PacketCapturesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &PacketCapturesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *InboundNatRulesClientDeletePollerResponse) Resume(ctx context.Context, client *InboundNatRulesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"InboundNatRulesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &InboundNatRulesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkPeeringsClientDeletePollerResponse) Resume(ctx context.Context, client *VirtualNetworkPeeringsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkPeeringsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkPeeringsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SnapshotsClientDeletePollerResponse) Resume(ctx context.Context, client *SnapshotsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SnapshotsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SnapshotsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *RouteFiltersClientDeletePollerResponse) Resume(ctx context.Context, client *RouteFiltersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"RouteFiltersClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &RouteFiltersClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SyncAgentsDeletePollerResponse) Resume(ctx context.Context, client *SyncAgentsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SyncAgentsClient.Delete\", token, client.pl, client.deleteHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SyncAgentsDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *FirewallPoliciesClientDeletePollerResponse) Resume(ctx context.Context, client *FirewallPoliciesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"FirewallPoliciesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &FirewallPoliciesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SecurityPartnerProvidersClientDeletePollerResponse) Resume(ctx context.Context, client *SecurityPartnerProvidersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SecurityPartnerProvidersClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SecurityPartnerProvidersClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SQLPoolWorkloadClassifierClientDeletePollerResponse) Resume(ctx context.Context, client *SQLPoolWorkloadClassifierClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SQLPoolWorkloadClassifierClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SQLPoolWorkloadClassifierClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *RouteFilterRulesClientDeletePollerResponse) Resume(ctx context.Context, client *RouteFilterRulesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"RouteFilterRulesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &RouteFilterRulesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ExpressRouteCrossConnectionPeeringsClientDeletePollerResponse) Resume(ctx context.Context, client *ExpressRouteCrossConnectionPeeringsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ExpressRouteCrossConnectionPeeringsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ExpressRouteCrossConnectionPeeringsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SecurityRulesClientDeletePollerResponse) Resume(ctx context.Context, client *SecurityRulesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SecurityRulesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SecurityRulesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SecurityGroupsClientDeletePollerResponse) Resume(ctx context.Context, client *SecurityGroupsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SecurityGroupsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SecurityGroupsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *MonitoringConfigClientDeletePollerResponse) Resume(ctx context.Context, client *MonitoringConfigClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"MonitoringConfigClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &MonitoringConfigClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *IPFirewallRulesClientDeletePollerResponse) Resume(ctx context.Context, client *IPFirewallRulesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"IPFirewallRulesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &IPFirewallRulesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ClientDeletePollerResponse) Resume(ctx context.Context, client *Client, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"Client.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualHubIPConfigurationClientDeletePollerResponse) Resume(ctx context.Context, client *VirtualHubIPConfigurationClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualHubIPConfigurationClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualHubIPConfigurationClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworksClientDeletePollerResponse) Resume(ctx context.Context, client *VirtualNetworksClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworksClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworksClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ServiceEndpointPoliciesClientDeletePollerResponse) Resume(ctx context.Context, client *ServiceEndpointPoliciesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ServiceEndpointPoliciesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ServiceEndpointPoliciesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ExpressRouteCircuitPeeringsClientDeletePollerResponse) Resume(ctx context.Context, client *ExpressRouteCircuitPeeringsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ExpressRouteCircuitPeeringsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ExpressRouteCircuitPeeringsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ApplicationGatewaysClientDeletePollerResponse) Resume(ctx context.Context, client *ApplicationGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ApplicationGatewaysClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ApplicationGatewaysClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *FlowLogsClientDeletePollerResponse) Resume(ctx context.Context, client *FlowLogsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"FlowLogsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &FlowLogsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ConnectionMonitorsClientDeletePollerResponse) Resume(ctx context.Context, client *ConnectionMonitorsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ConnectionMonitorsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ConnectionMonitorsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SQLPoolsClientDeletePollerResponse) Resume(ctx context.Context, client *SQLPoolsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SQLPoolsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SQLPoolsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *NatRulesClientDeletePollerResponse) Resume(ctx context.Context, client *NatRulesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"NatRulesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &NatRulesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewayNatRulesClientDeletePollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewayNatRulesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewayNatRulesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewayNatRulesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ExpressRouteGatewaysClientDeletePollerResponse) Resume(ctx context.Context, client *ExpressRouteGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ExpressRouteGatewaysClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ExpressRouteGatewaysClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *EnergyServicesClientDeletePollerResponse) Resume(ctx context.Context, client *EnergyServicesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"EnergyServicesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &EnergyServicesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ExpressRouteCircuitsClientDeletePollerResponse) Resume(ctx context.Context, client *ExpressRouteCircuitsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ExpressRouteCircuitsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ExpressRouteCircuitsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *DdosProtectionPlansClientDeletePollerResponse) Resume(ctx context.Context, client *DdosProtectionPlansClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"DdosProtectionPlansClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &DdosProtectionPlansClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *NatGatewaysClientDeletePollerResponse) Resume(ctx context.Context, client *NatGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"NatGatewaysClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &NatGatewaysClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *BackupPoliciesClientDeletePollerResponse) Resume(ctx context.Context, client *BackupPoliciesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"BackupPoliciesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &BackupPoliciesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SharesClientDeletePollerResponse) Resume(ctx context.Context, client *SharesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SharesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SharesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *WebApplicationFirewallPoliciesClientDeletePollerResponse) Resume(ctx context.Context, client *WebApplicationFirewallPoliciesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"WebApplicationFirewallPoliciesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &WebApplicationFirewallPoliciesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *RoutesClientDeletePollerResponse) Resume(ctx context.Context, client *RoutesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"RoutesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &RoutesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VPNConnectionsClientDeletePollerResponse) Resume(ctx context.Context, client *VPNConnectionsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VPNConnectionsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VPNConnectionsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ServiceEndpointPolicyDefinitionsClientDeletePollerResponse) Resume(ctx context.Context, client *ServiceEndpointPolicyDefinitionsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ServiceEndpointPolicyDefinitionsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ServiceEndpointPolicyDefinitionsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagedInstanceKeysDeletePollerResponse) Resume(ctx context.Context, client *ManagedInstanceKeysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagedInstanceKeysClient.Delete\", token, client.pl, client.deleteHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagedInstanceKeysDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *TriggersClientDeletePollerResponse) Resume(ctx context.Context, client *TriggersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"TriggersClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &TriggersClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *DdosCustomPoliciesClientDeletePollerResponse) Resume(ctx context.Context, client *DdosCustomPoliciesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"DdosCustomPoliciesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &DdosCustomPoliciesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *IPGroupsClientDeletePollerResponse) Resume(ctx context.Context, client *IPGroupsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"IPGroupsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &IPGroupsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *StreamingEndpointsClientDeletePollerResponse) Resume(ctx context.Context, client *StreamingEndpointsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"StreamingEndpointsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &StreamingEndpointsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ApplicationGatewayPrivateEndpointConnectionsClientDeletePollerResponse) Resume(ctx context.Context, client *ApplicationGatewayPrivateEndpointConnectionsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ApplicationGatewayPrivateEndpointConnectionsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ApplicationGatewayPrivateEndpointConnectionsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ExpressRouteConnectionsClientDeletePollerResponse) Resume(ctx context.Context, client *ExpressRouteConnectionsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ExpressRouteConnectionsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ExpressRouteConnectionsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewaysClientDeletePollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewaysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewaysClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewaysClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *LoadBalancersClientDeletePollerResponse) Resume(ctx context.Context, client *LoadBalancersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"LoadBalancersClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &LoadBalancersClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *HubVirtualNetworkConnectionsClientDeletePollerResponse) Resume(ctx context.Context, client *HubVirtualNetworkConnectionsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"HubVirtualNetworkConnectionsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &HubVirtualNetworkConnectionsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ExpressRouteCircuitAuthorizationsClientDeletePollerResponse) Resume(ctx context.Context, client *ExpressRouteCircuitAuthorizationsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ExpressRouteCircuitAuthorizationsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ExpressRouteCircuitAuthorizationsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *RoutingIntentClientDeletePollerResponse) Resume(ctx context.Context, client *RoutingIntentClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"RoutingIntentClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &RoutingIntentClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ExpressRouteCircuitConnectionsClientDeletePollerResponse) Resume(ctx context.Context, client *ExpressRouteCircuitConnectionsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ExpressRouteCircuitConnectionsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ExpressRouteCircuitConnectionsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *IntegrationRuntimesClientDeletePollerResponse) Resume(ctx context.Context, client *IntegrationRuntimesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"IntegrationRuntimesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &IntegrationRuntimesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *WatchersClientDeletePollerResponse) Resume(ctx context.Context, client *WatchersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"WatchersClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &WatchersClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualClustersDeletePollerResponse) Resume(ctx context.Context, client *VirtualClustersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualClustersClient.Delete\", token, client.pl, client.deleteHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualClustersDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ClustersClientDeletePollerResponse) Resume(ctx context.Context, client *ClustersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ClustersClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ClustersClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkGatewayConnectionsClientDeletePollerResponse) Resume(ctx context.Context, client *VirtualNetworkGatewayConnectionsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkGatewayConnectionsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkGatewayConnectionsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualHubRouteTableV2SClientDeletePollerResponse) Resume(ctx context.Context, client *VirtualHubRouteTableV2SClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualHubRouteTableV2SClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualHubRouteTableV2SClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VPNSitesClientDeletePollerResponse) Resume(ctx context.Context, client *VPNSitesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VPNSitesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VPNSitesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualHubBgpConnectionClientDeletePollerResponse) Resume(ctx context.Context, client *VirtualHubBgpConnectionClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualHubBgpConnectionClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualHubBgpConnectionClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *RouteTablesClientDeletePollerResponse) Resume(ctx context.Context, client *RouteTablesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"RouteTablesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &RouteTablesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ServersDeletePollerResponse) Resume(ctx context.Context, client *ServersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ServersClient.Delete\", token, client.pl, client.deleteHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ServersDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *PrivateEndpointsClientDeletePollerResponse) Resume(ctx context.Context, client *PrivateEndpointsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"PrivateEndpointsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &PrivateEndpointsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *BigDataPoolsClientDeletePollerResponse) Resume(ctx context.Context, client *BigDataPoolsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"BigDataPoolsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &BigDataPoolsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *SQLPoolWorkloadGroupClientDeletePollerResponse) Resume(ctx context.Context, client *SQLPoolWorkloadGroupClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"SQLPoolWorkloadGroupClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &SQLPoolWorkloadGroupClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *HubRouteTablesClientDeletePollerResponse) Resume(ctx context.Context, client *HubRouteTablesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"HubRouteTablesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &HubRouteTablesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *InterfaceTapConfigurationsClientDeletePollerResponse) Resume(ctx context.Context, client *InterfaceTapConfigurationsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"InterfaceTapConfigurationsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &InterfaceTapConfigurationsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ContainersClientDeletePollerResponse) Resume(ctx context.Context, client *ContainersClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ContainersClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ContainersClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *PrivateEndpointConnectionsClientDeletePollerResponse) Resume(ctx context.Context, client *PrivateEndpointConnectionsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"PrivateEndpointConnectionsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &PrivateEndpointConnectionsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *PoolsClientDeletePollerResponse) Resume(ctx context.Context, client *PoolsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"PoolsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &PoolsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ExpressRoutePortsClientDeletePollerResponse) Resume(ctx context.Context, client *ExpressRoutePortsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ExpressRoutePortsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ExpressRoutePortsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *DscpConfigurationClientDeletePollerResponse) Resume(ctx context.Context, client *DscpConfigurationClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"DscpConfigurationClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &DscpConfigurationClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *LiveOutputsClientDeletePollerResponse) Resume(ctx context.Context, client *LiveOutputsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"LiveOutputsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &LiveOutputsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *TablesClientDeletePollerResponse) Resume(ctx context.Context, client *TablesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"TablesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &TablesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ServerKeysDeletePollerResponse) Resume(ctx context.Context, client *ServerKeysClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ServerKeysClient.Delete\", token, client.pl, client.deleteHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ServerKeysDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ServerTrustGroupsDeletePollerResponse) Resume(ctx context.Context, client *ServerTrustGroupsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ServerTrustGroupsClient.Delete\", token, client.pl, client.deleteHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ServerTrustGroupsDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *PrivateLinkServicesClientDeletePollerResponse) Resume(ctx context.Context, client *PrivateLinkServicesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"PrivateLinkServicesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &PrivateLinkServicesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ManagedInstanceAdministratorsDeletePollerResponse) Resume(ctx context.Context, client *ManagedInstanceAdministratorsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ManagedInstanceAdministratorsClient.Delete\", token, client.pl, client.deleteHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ManagedInstanceAdministratorsDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *RolesClientDeletePollerResponse) Resume(ctx context.Context, client *RolesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"RolesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &RolesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VirtualNetworkLinksClientDeletePollerResponse) Resume(ctx context.Context, client *VirtualNetworkLinksClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VirtualNetworkLinksClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VirtualNetworkLinksClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *AccountsClientDeletePollerResponse) Resume(ctx context.Context, client *AccountsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"AccountsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &AccountsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *VolumesClientDeletePollerResponse) Resume(ctx context.Context, client *VolumesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"VolumesClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &VolumesClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *PipelineRunsDeletePollerResponse) Resume(ctx context.Context, client *PipelineRunsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"PipelineRunsClient.Delete\", token, client.pl, client.deleteHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &PipelineRunsDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *KustoPoolsClientDeletePollerResponse) Resume(ctx context.Context, client *KustoPoolsClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"KustoPoolsClient.Delete\", token, client.pl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &KustoPoolsClientDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}", "func (l *ImportPipelinesDeletePollerResponse) Resume(ctx context.Context, client *ImportPipelinesClient, token string) error {\n\tpt, err := armruntime.NewPollerFromResumeToken(\"ImportPipelinesClient.Delete\", token, client.pl, client.deleteHandleError)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpoller := &ImportPipelinesDeletePoller{\n\t\tpt: pt,\n\t}\n\tresp, err := poller.Poll(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.Poller = poller\n\tl.RawResponse = resp\n\treturn nil\n}" ]
[ "0.81951827", "0.81379056", "0.80936027", "0.8085295", "0.8065316", "0.8061482", "0.8058655", "0.8053732", "0.8043651", "0.8019985", "0.8019461", "0.8018657", "0.8012718", "0.8009122", "0.80089366", "0.80024755", "0.7994542", "0.7992985", "0.7992747", "0.79916984", "0.7982053", "0.7979617", "0.79790926", "0.79755", "0.7972869", "0.79666334", "0.79651666", "0.79616094", "0.79581213", "0.7955334", "0.7950527", "0.79475105", "0.7938155", "0.793515", "0.7930524", "0.7928615", "0.792834", "0.792686", "0.7925672", "0.79241973", "0.7921562", "0.7918354", "0.7916962", "0.7912597", "0.79103863", "0.79089284", "0.7908352", "0.7903208", "0.7899493", "0.7897003", "0.7895661", "0.78942573", "0.7891928", "0.78911215", "0.7889311", "0.7888123", "0.7886004", "0.7879758", "0.7878651", "0.7878186", "0.78767467", "0.7876263", "0.7874666", "0.7872863", "0.786761", "0.7865258", "0.78647316", "0.78632045", "0.7858457", "0.7858428", "0.78525233", "0.78518516", "0.7851065", "0.7846215", "0.78455967", "0.78441733", "0.783503", "0.78312564", "0.7827665", "0.7825544", "0.7824201", "0.7821551", "0.7819105", "0.7817929", "0.7807168", "0.77999884", "0.77981687", "0.77953446", "0.77930343", "0.7790284", "0.7784891", "0.7783082", "0.7769912", "0.7766693", "0.77640647", "0.77568394", "0.77552235", "0.77501976", "0.7747319", "0.77406794" ]
0.8414383
0
UnmarshalJSON implements the json.Unmarshaller interface for type SettingsGetResult.
func (s *SettingsGetResult) UnmarshalJSON(data []byte) error { res, err := unmarshalSettingClassification(data) if err != nil { return err } s.SettingClassification = res return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *ProductSettingsClientGetResult) UnmarshalJSON(data []byte) error {\n\tres, err := unmarshalSettingsClassification(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.SettingsClassification = res\n\treturn nil\n}", "func (s *SettingsUpdateResult) UnmarshalJSON(data []byte) error {\n\tres, err := unmarshalSettingClassification(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.SettingClassification = res\n\treturn nil\n}", "func (p *ProductSettingsClientUpdateResult) UnmarshalJSON(data []byte) error {\n\tres, err := unmarshalSettingsClassification(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.SettingsClassification = res\n\treturn nil\n}", "func (h *HlsSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", h, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"characteristics\":\n\t\t\terr = unpopulate(val, \"Characteristics\", &h.Characteristics)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"default\":\n\t\t\terr = unpopulate(val, \"Default\", &h.Default)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"forced\":\n\t\t\terr = unpopulate(val, \"Forced\", &h.Forced)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", h, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (a *AccountSKUListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &a.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (a *ApplicationGatewayAvailableWafRuleSetsResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &a.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (h *HostSettingsResponse) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", h, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"BotOpenIdMetadata\":\n\t\t\terr = unpopulate(val, \"BotOpenIDMetadata\", &h.BotOpenIDMetadata)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"OAuthUrl\":\n\t\t\terr = unpopulate(val, \"OAuthURL\", &h.OAuthURL)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"ToBotFromChannelOpenIdMetadataUrl\":\n\t\t\terr = unpopulate(val, \"ToBotFromChannelOpenIDMetadataURL\", &h.ToBotFromChannelOpenIDMetadataURL)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"ToBotFromChannelTokenIssuer\":\n\t\t\terr = unpopulate(val, \"ToBotFromChannelTokenIssuer\", &h.ToBotFromChannelTokenIssuer)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"ToBotFromEmulatorOpenIdMetadataUrl\":\n\t\t\terr = unpopulate(val, \"ToBotFromEmulatorOpenIDMetadataURL\", &h.ToBotFromEmulatorOpenIDMetadataURL)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"ToChannelFromBotLoginUrl\":\n\t\t\terr = unpopulate(val, \"ToChannelFromBotLoginURL\", &h.ToChannelFromBotLoginURL)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"ToChannelFromBotOAuthScope\":\n\t\t\terr = unpopulate(val, \"ToChannelFromBotOAuthScope\", &h.ToChannelFromBotOAuthScope)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"ValidateAuthority\":\n\t\t\terr = unpopulate(val, \"ValidateAuthority\", &h.ValidateAuthority)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", h, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (this *ConnectionPoolSettings_HTTPSettings) UnmarshalJSON(b []byte) error {\n\treturn DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}", "func (c *ChannelSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"botId\":\n\t\t\terr = unpopulate(val, \"BotID\", &c.BotID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"botIconUrl\":\n\t\t\terr = unpopulate(val, \"BotIconURL\", &c.BotIconURL)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"channelDisplayName\":\n\t\t\terr = unpopulate(val, \"ChannelDisplayName\", &c.ChannelDisplayName)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"channelId\":\n\t\t\terr = unpopulate(val, \"ChannelID\", &c.ChannelID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"disableLocalAuth\":\n\t\t\terr = unpopulate(val, \"DisableLocalAuth\", &c.DisableLocalAuth)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"extensionKey1\":\n\t\t\terr = unpopulate(val, \"ExtensionKey1\", &c.ExtensionKey1)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"extensionKey2\":\n\t\t\terr = unpopulate(val, \"ExtensionKey2\", &c.ExtensionKey2)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"isEnabled\":\n\t\t\terr = unpopulate(val, \"IsEnabled\", &c.IsEnabled)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"requireTermsAgreement\":\n\t\t\terr = unpopulate(val, \"RequireTermsAgreement\", &c.RequireTermsAgreement)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"sites\":\n\t\t\terr = unpopulate(val, \"Sites\", &c.Sites)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (x *CMsgSetTeamFanContentStatusResponse_EResult) UnmarshalJSON(b []byte) error {\n\tnum, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CMsgSetTeamFanContentStatusResponse_EResult(num)\n\treturn nil\n}", "func GetSettings(jsonByteArray []byte) Settings {\n\tvar settings Settings\n\tjson.Unmarshal(jsonByteArray, &settings)\n\treturn settings\n}", "func (a *ApplicationGatewayWafDynamicManifestPropertiesResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"availableRuleSets\":\n\t\t\terr = unpopulate(val, \"AvailableRuleSets\", &a.AvailableRuleSets)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"defaultRuleSet\":\n\t\t\terr = unpopulate(val, \"DefaultRuleSet\", &a.DefaultRuleSet)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (this *ClientTLSSettings) UnmarshalJSON(b []byte) error {\n\treturn DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}", "func (c *ConnectionSettingResponseList) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &c.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &c.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (a *AlertRuleTemplatesClientGetResult) UnmarshalJSON(data []byte) error {\n\tres, err := unmarshalAlertRuleTemplateClassification(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\ta.AlertRuleTemplateClassification = res\n\treturn nil\n}", "func (s *SAPSizingRecommendationResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"deploymentType\":\n\t\t\terr = unpopulate(val, \"DeploymentType\", &s.DeploymentType)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func UnmarshalUserSettings(b []byte) UserSettings {\n\ta := UserSettings{}\n\tjson.Unmarshal(b, &a)\n\treturn a\n}", "func (l *ListVirtualHubIPConfigurationResults) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &l.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &l.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (api *AdminApi) RetreiveSettings() (*SettingsResult, error) {\n\t_, body, err := api.SignedCall(\"GET\", \"/admin/v1/settings\", nil, duoapi.UseTimeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tret := &SettingsResult{}\n\tif err = json.Unmarshal(body, ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n}", "func (v *VirtualApplianceSKUListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", v, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &v.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &v.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", v, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (s *SingleServerRecommendationResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"deploymentType\":\n\t\t\terr = unpopulate(val, \"DeploymentType\", &s.DeploymentType)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"vmSku\":\n\t\t\terr = unpopulate(val, \"VMSKU\", &s.VMSKU)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (t *TriggersClientGetResult) UnmarshalJSON(data []byte) error {\n\tres, err := unmarshalTriggerClassification(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tt.TriggerClassification = res\n\treturn nil\n}", "func (r *RegionSetting) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", r, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"customsubdomain\":\n\t\t\terr = unpopulate(val, \"Customsubdomain\", &r.Customsubdomain)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &r.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &r.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", r, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (d *DashSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"role\":\n\t\t\terr = unpopulate(val, \"Role\", &d.Role)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (a *AppTemplatesResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &a.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &a.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (d *DeploymentScaleSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"activeCapacity\":\n\t\t\terr = unpopulate(val, \"ActiveCapacity\", &d.ActiveCapacity)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"capacity\":\n\t\t\terr = unpopulate(val, \"Capacity\", &d.Capacity)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"scaleType\":\n\t\t\terr = unpopulate(val, \"ScaleType\", &d.ScaleType)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (s *SignatureOverridesFilterValuesResponse) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"filterValues\":\n\t\t\terr = unpopulate(val, \"FilterValues\", &s.FilterValues)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (v *ReconnectSettings) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer9(&r, v)\n\treturn r.Error()\n}", "func (l *ListVPNSitesResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &l.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &l.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (d *DisplaySettings) UnmarshalJSON(b []byte) error {\n\tvar fields struct {\n\t\tControlType string `json:\"Octopus.ControlType\"`\n\t}\n\tif err := json.Unmarshal(b, &fields); err != nil {\n\t\treturn err\n\t}\n\n\td.ControlType = ControlType(fields.ControlType)\n\n\tvar displaySettings map[string]*json.RawMessage\n\tif err := json.Unmarshal(b, &displaySettings); err != nil {\n\t\treturn err\n\t}\n\n\tif displaySettings[\"Octopus.SelectOptions\"] != nil {\n\t\td.SelectOptions = make([]*SelectOption, 0)\n\n\t\tvar selectOptionsDelimitedString *string\n\t\tif err := json.Unmarshal(*displaySettings[\"Octopus.SelectOptions\"], &selectOptionsDelimitedString); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, kv := range strings.Split(*selectOptionsDelimitedString, \"\\n\") {\n\t\t\tpairs := strings.SplitN(kv, \"|\", 2)\n\t\t\tif len(pairs) == 2 { // ignore malformed options; server shouldn't send them anyway\n\t\t\t\td.SelectOptions = append(d.SelectOptions, &SelectOption{Value: pairs[0], DisplayName: pairs[1]})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (i *InterfaceDNSSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", i, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"appliedDnsServers\":\n\t\t\terr = unpopulate(val, \"AppliedDNSServers\", &i.AppliedDNSServers)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"dnsServers\":\n\t\t\terr = unpopulate(val, \"DNSServers\", &i.DNSServers)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"internalDnsNameLabel\":\n\t\t\terr = unpopulate(val, \"InternalDNSNameLabel\", &i.InternalDNSNameLabel)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"internalDomainNameSuffix\":\n\t\t\terr = unpopulate(val, \"InternalDomainNameSuffix\", &i.InternalDomainNameSuffix)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"internalFqdn\":\n\t\t\terr = unpopulate(val, \"InternalFqdn\", &i.InternalFqdn)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", i, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (r *ResourceSKUListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", r, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &r.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &r.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", r, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (p *PolicySettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", p, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"customBlockResponseBody\":\n\t\t\terr = unpopulate(val, \"CustomBlockResponseBody\", &p.CustomBlockResponseBody)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"customBlockResponseStatusCode\":\n\t\t\terr = unpopulate(val, \"CustomBlockResponseStatusCode\", &p.CustomBlockResponseStatusCode)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"fileUploadEnforcement\":\n\t\t\terr = unpopulate(val, \"FileUploadEnforcement\", &p.FileUploadEnforcement)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"fileUploadLimitInMb\":\n\t\t\terr = unpopulate(val, \"FileUploadLimitInMb\", &p.FileUploadLimitInMb)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"logScrubbing\":\n\t\t\terr = unpopulate(val, \"LogScrubbing\", &p.LogScrubbing)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"maxRequestBodySizeInKb\":\n\t\t\terr = unpopulate(val, \"MaxRequestBodySizeInKb\", &p.MaxRequestBodySizeInKb)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"mode\":\n\t\t\terr = unpopulate(val, \"Mode\", &p.Mode)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"requestBodyCheck\":\n\t\t\terr = unpopulate(val, \"RequestBodyCheck\", &p.RequestBodyCheck)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"requestBodyEnforcement\":\n\t\t\terr = unpopulate(val, \"RequestBodyEnforcement\", &p.RequestBodyEnforcement)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"requestBodyInspectLimitInKB\":\n\t\t\terr = unpopulate(val, \"RequestBodyInspectLimitInKB\", &p.RequestBodyInspectLimitInKB)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"state\":\n\t\t\terr = unpopulate(val, \"State\", &p.State)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", p, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (this *ConnectionPoolSettings) UnmarshalJSON(b []byte) error {\n\treturn DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}", "func (m *MultiRegionSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", m, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"regions\":\n\t\t\terr = unpopulate(val, \"Regions\", &m.Regions)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"routingMethod\":\n\t\t\terr = unpopulate(val, \"RoutingMethod\", &m.RoutingMethod)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", m, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (v *DidChangeConfigurationParams) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {\n\tif k == keySettings {\n\t\treturn dec.Interface(&v.Settings)\n\t}\n\treturn nil\n}", "func (l *ListVPNServerConfigurationsResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &l.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &l.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (a *AvailableDelegationsResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &a.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &a.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (x *CMsgSocialFeedResponse_Result) UnmarshalJSON(b []byte) error {\n\tnum, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CMsgSocialFeedResponse_Result(num)\n\treturn nil\n}", "func (s *SetDefinitionListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &s.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &s.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (l *ListVPNServerConfigurationPolicyGroupsResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &l.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &l.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (a *AvailableServiceAliasesResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &a.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &a.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func UnmarshalKMSSettings(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(KMSSettings)\n\terr = core.UnmarshalPrimitive(m, \"location\", &obj.Location)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"encryption_scheme\", &obj.EncryptionScheme)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"resource_group\", &obj.ResourceGroup)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"primary_crk\", &obj.PrimaryCrk, UnmarshalKMSSettingsPrimaryCrk)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"secondary_crk\", &obj.SecondaryCrk, UnmarshalKMSSettingsSecondaryCrk)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func (v *TeamPermissionSettingType) UnmarshalJSON(src []byte) error {\n\tvar value string\n\terr := json.Unmarshal(src, &value)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*v = TeamPermissionSettingType(value)\n\treturn nil\n}", "func (a *ApplicationGatewayWafDynamicManifestResultList) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &a.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &a.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (t *TroubleshootingResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"code\":\n\t\t\terr = unpopulate(val, \"Code\", &t.Code)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"endTime\":\n\t\t\terr = unpopulateTimeRFC3339(val, \"EndTime\", &t.EndTime)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"results\":\n\t\t\terr = unpopulate(val, \"Results\", &t.Results)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"startTime\":\n\t\t\terr = unpopulateTimeRFC3339(val, \"StartTime\", &t.StartTime)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (x *CMsgProfileUpdateResponse_Result) UnmarshalJSON(b []byte) error {\n\tnum, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CMsgProfileUpdateResponse_Result(num)\n\treturn nil\n}", "func (m *ServerConfigResultEntry) UnmarshalJSON(raw []byte) error {\n\t// AO0\n\tvar aO0 PolicyAbstractConfigResultEntry\n\tif err := swag.ReadJSON(raw, &aO0); err != nil {\n\t\treturn err\n\t}\n\tm.PolicyAbstractConfigResultEntry = aO0\n\n\t// AO1\n\tvar dataAO1 struct {\n\t\tConfigResult *ServerConfigResultRef `json:\"ConfigResult,omitempty\"`\n\t}\n\tif err := swag.ReadJSON(raw, &dataAO1); err != nil {\n\t\treturn err\n\t}\n\n\tm.ConfigResult = dataAO1.ConfigResult\n\n\treturn nil\n}", "func (l *ListVirtualWANsResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &l.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &l.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (a *AddonsClientGetResult) UnmarshalJSON(data []byte) error {\n\tres, err := unmarshalAddonClassification(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\ta.AddonClassification = res\n\treturn nil\n}", "func (d *DNSSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"enableProxy\":\n\t\t\terr = unpopulate(val, \"EnableProxy\", &d.EnableProxy)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"requireProxyForNetworkRules\":\n\t\t\terr = unpopulate(val, \"RequireProxyForNetworkRules\", &d.RequireProxyForNetworkRules)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"servers\":\n\t\t\terr = unpopulate(val, \"Servers\", &d.Servers)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (l *ListVirtualHubsResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &l.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &l.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (p *PolicySettingsLogScrubbing) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", p, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"scrubbingRules\":\n\t\t\terr = unpopulate(val, \"ScrubbingRules\", &p.ScrubbingRules)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"state\":\n\t\t\terr = unpopulate(val, \"State\", &p.State)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", p, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (a *AlertRulesClientGetResult) UnmarshalJSON(data []byte) error {\n\tres, err := unmarshalAlertRuleClassification(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\ta.AlertRuleClassification = res\n\treturn nil\n}", "func (a *AvailablePrivateEndpointTypesResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &a.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &a.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (o *SyntheticsTriggerCITestRunResult) UnmarshalJSON(bytes []byte) (err error) {\n\traw := map[string]interface{}{}\n\tall := struct {\n\t\tDevice *SyntheticsDeviceID `json:\"device,omitempty\"`\n\t\tLocation *int64 `json:\"location,omitempty\"`\n\t\tPublicId *string `json:\"public_id,omitempty\"`\n\t\tResultId *string `json:\"result_id,omitempty\"`\n\t}{}\n\terr = json.Unmarshal(bytes, &all)\n\tif err != nil {\n\t\terr = json.Unmarshal(bytes, &raw)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\to.UnparsedObject = raw\n\t\treturn nil\n\t}\n\tif v := all.Device; v != nil && !v.IsValid() {\n\t\terr = json.Unmarshal(bytes, &raw)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\to.UnparsedObject = raw\n\t\treturn nil\n\t}\n\to.Device = all.Device\n\to.Location = all.Location\n\to.PublicId = all.PublicId\n\to.ResultId = all.ResultId\n\treturn nil\n}", "func (t *TenantListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &t.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &t.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (d *DefenderSettingsProperties) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"deviceQuota\":\n\t\t\terr = unpopulate(val, \"DeviceQuota\", &d.DeviceQuota)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"evaluationEndTime\":\n\t\t\terr = unpopulateTimeRFC3339(val, \"EvaluationEndTime\", &d.EvaluationEndTime)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"mdeIntegration\":\n\t\t\terr = unpopulate(val, \"MdeIntegration\", &d.MdeIntegration)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"onboardingKind\":\n\t\t\terr = unpopulate(val, \"OnboardingKind\", &d.OnboardingKind)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"sentinelWorkspaceResourceIds\":\n\t\t\terr = unpopulate(val, \"SentinelWorkspaceResourceIDs\", &d.SentinelWorkspaceResourceIDs)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (o *SyntheticsAPITestResultShortResult) UnmarshalJSON(bytes []byte) (err error) {\n\traw := map[string]interface{}{}\n\tall := struct {\n\t\tPassed *bool `json:\"passed,omitempty\"`\n\t\tTimings *SyntheticsTiming `json:\"timings,omitempty\"`\n\t}{}\n\terr = json.Unmarshal(bytes, &all)\n\tif err != nil {\n\t\terr = json.Unmarshal(bytes, &raw)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\to.UnparsedObject = raw\n\t\treturn nil\n\t}\n\to.Passed = all.Passed\n\tif all.Timings != nil && all.Timings.UnparsedObject != nil && o.UnparsedObject == nil {\n\t\terr = json.Unmarshal(bytes, &raw)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\to.UnparsedObject = raw\n\t}\n\to.Timings = all.Timings\n\treturn nil\n}", "func (u *UsagesListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &u.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &u.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (s *SecurityAdminConfigurationListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &s.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &s.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (this *LoadBalancerSettings) UnmarshalJSON(b []byte) error {\n\treturn DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}", "func (u *UsageListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &u.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t\t}\n\t}\n\treturn nil\n}", "func loadSettings(payload []byte) (Settings, error) {\n\tvar settings Settings\n\terr := json.Unmarshal(payload, &settings)\n\tif err != nil {\n\t\treturn Settings{}, err\n\t}\n\treturn settings, nil\n}", "func (a *ApplicationGatewayWafDynamicManifestResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &a.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &a.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &a.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &a.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (m *ManagerEffectiveConnectivityConfigurationListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", m, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"skipToken\":\n\t\t\terr = unpopulate(val, \"SkipToken\", &m.SkipToken)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &m.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", m, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (e *EndpointServiceResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", e, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &e.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &e.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &e.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", e, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (l *ListVPNGatewaysResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &l.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &l.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (b *BgpSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", b, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"asn\":\n\t\t\terr = unpopulate(val, \"Asn\", &b.Asn)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"bgpPeeringAddress\":\n\t\t\terr = unpopulate(val, \"BgpPeeringAddress\", &b.BgpPeeringAddress)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"bgpPeeringAddresses\":\n\t\t\terr = unpopulate(val, \"BgpPeeringAddresses\", &b.BgpPeeringAddresses)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"peerWeight\":\n\t\t\terr = unpopulate(val, \"PeerWeight\", &b.PeerWeight)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", b, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (c *ConnectionSettingParameter) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"key\":\n\t\t\terr = unpopulate(val, \"Key\", &c.Key)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &c.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (f *FeatureResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", f, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &f.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &f.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &f.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &f.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", f, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (s *StreamingEndpointSKUInfoListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &s.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (t *TestAllRoutesResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"routes\":\n\t\t\terr = unpopulate(val, \"Routes\", &t.Routes)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (x *CMsgSocialFeedCommentsResponse_Result) UnmarshalJSON(b []byte) error {\n\tnum, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CMsgSocialFeedCommentsResponse_Result(num)\n\treturn nil\n}", "func (a *ApplicationGatewayBackendHTTPSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"etag\":\n\t\t\terr = unpopulate(val, \"Etag\", &a.Etag)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &a.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &a.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &a.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &a.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (l *ListRoutingIntentResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &l.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &l.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (l *ListVirtualHubRouteTableV2SResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &l.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &l.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (a *ApplicationGatewayBackendSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"etag\":\n\t\t\terr = unpopulate(val, \"Etag\", &a.Etag)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &a.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &a.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &a.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &a.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (d *DdosSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"ddosProtectionPlan\":\n\t\t\terr = unpopulate(val, \"DdosProtectionPlan\", &d.DdosProtectionPlan)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"protectionMode\":\n\t\t\terr = unpopulate(val, \"ProtectionMode\", &d.ProtectionMode)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (x *CMsgGCToClientAllStarVotesSubmitReply_Result) UnmarshalJSON(b []byte) error {\n\tnum, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CMsgGCToClientAllStarVotesSubmitReply_Result(num)\n\treturn nil\n}", "func (s *SignatureOverridesFilterValuesQuery) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"filterName\":\n\t\t\terr = unpopulate(val, \"FilterName\", &s.FilterName)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (a *AsyncOperationResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"error\":\n\t\t\terr = unpopulate(val, \"Error\", &a.Error)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &a.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"status\":\n\t\t\terr = unpopulate(val, \"Status\", &a.Status)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (v *SwapAccountsSetting) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi35(&r, v)\n\treturn r.Error()\n}", "func (v *VirtualNetworkListUsageResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", v, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &v.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &v.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", v, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (s *ServiceProviderListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &s.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &s.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (t *ThreeTierRecommendationResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"applicationServerInstanceCount\":\n\t\t\terr = unpopulate(val, \"ApplicationServerInstanceCount\", &t.ApplicationServerInstanceCount)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"applicationServerVmSku\":\n\t\t\terr = unpopulate(val, \"ApplicationServerVMSKU\", &t.ApplicationServerVMSKU)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"centralServerInstanceCount\":\n\t\t\terr = unpopulate(val, \"CentralServerInstanceCount\", &t.CentralServerInstanceCount)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"centralServerVmSku\":\n\t\t\terr = unpopulate(val, \"CentralServerVMSKU\", &t.CentralServerVMSKU)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"databaseInstanceCount\":\n\t\t\terr = unpopulate(val, \"DatabaseInstanceCount\", &t.DatabaseInstanceCount)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"dbVmSku\":\n\t\t\terr = unpopulate(val, \"DbVMSKU\", &t.DbVMSKU)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"deploymentType\":\n\t\t\terr = unpopulate(val, \"DeploymentType\", &t.DeploymentType)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (a *ApplicationGatewayBackendSettingsPropertiesFormat) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"hostName\":\n\t\t\terr = unpopulate(val, \"HostName\", &a.HostName)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"pickHostNameFromBackendAddress\":\n\t\t\terr = unpopulate(val, \"PickHostNameFromBackendAddress\", &a.PickHostNameFromBackendAddress)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"port\":\n\t\t\terr = unpopulate(val, \"Port\", &a.Port)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"probe\":\n\t\t\terr = unpopulate(val, \"Probe\", &a.Probe)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"protocol\":\n\t\t\terr = unpopulate(val, \"Protocol\", &a.Protocol)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"provisioningState\":\n\t\t\terr = unpopulate(val, \"ProvisioningState\", &a.ProvisioningState)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"timeout\":\n\t\t\terr = unpopulate(val, \"Timeout\", &a.Timeout)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"trustedRootCertificates\":\n\t\t\terr = unpopulate(val, \"TrustedRootCertificates\", &a.TrustedRootCertificates)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (o *SyntheticsTestOptionsRetry) UnmarshalJSON(bytes []byte) (err error) {\n\traw := map[string]interface{}{}\n\tall := struct {\n\t\tCount *int64 `json:\"count,omitempty\"`\n\t\tInterval *float64 `json:\"interval,omitempty\"`\n\t}{}\n\terr = json.Unmarshal(bytes, &all)\n\tif err != nil {\n\t\terr = json.Unmarshal(bytes, &raw)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\to.UnparsedObject = raw\n\t\treturn nil\n\t}\n\to.Count = all.Count\n\to.Interval = all.Interval\n\treturn nil\n}", "func (e *EntityQueryTemplatesClientGetResult) UnmarshalJSON(data []byte) error {\n\tres, err := unmarshalEntityQueryTemplateClassification(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\te.EntityQueryTemplateClassification = res\n\treturn nil\n}", "func (s *SKUAvailabilityListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &s.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (client *DeviceSettingsClient) getAlertSettingsHandleResponse(resp *http.Response) (DeviceSettingsClientGetAlertSettingsResponse, error) {\n\tresult := DeviceSettingsClientGetAlertSettingsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AlertSettings); err != nil {\n\t\treturn DeviceSettingsClientGetAlertSettingsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (c *ConnectionMonitorWorkspaceSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"workspaceResourceId\":\n\t\t\terr = unpopulate(val, \"WorkspaceResourceID\", &c.WorkspaceResourceID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (b *BackupSecretResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn err\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"value\":\n\t\t\terr = runtime.DecodeByteArray(string(val), &b.Value, runtime.Base64URLFormat)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (c *ConfigurationDiagnosticResponse) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"results\":\n\t\t\terr = unpopulate(val, \"Results\", &c.Results)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (l *ListHubRouteTablesResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &l.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &l.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (l *ListP2SVPNGatewaysResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &l.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &l.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (r *Results) UnmarshalJSON(b []byte) error {\n\tvar o struct {\n\t\tResults []Result `json:\"results,omitempty\"`\n\t\tErr string `json:\"error,omitempty\"`\n\t}\n\n\tdec := json.NewDecoder(bytes.NewBuffer(b))\n\tdec.UseNumber()\n\terr := dec.Decode(&o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.Results = o.Results\n\tif o.Err != \"\" {\n\t\tr.Err = errors.New(o.Err)\n\t}\n\treturn nil\n}", "func (x *CMsgActivatePlusFreeTrialResponse_Result) UnmarshalJSON(b []byte) error {\n\tnum, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CMsgActivatePlusFreeTrialResponse_Result(num)\n\treturn nil\n}", "func (u *UserSubscriptionQuotaListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &u.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &u.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (c *ConnectionSettingProperties) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"clientId\":\n\t\t\terr = unpopulate(val, \"ClientID\", &c.ClientID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"clientSecret\":\n\t\t\terr = unpopulate(val, \"ClientSecret\", &c.ClientSecret)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"parameters\":\n\t\t\terr = unpopulate(val, \"Parameters\", &c.Parameters)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"provisioningState\":\n\t\t\terr = unpopulate(val, \"ProvisioningState\", &c.ProvisioningState)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"scopes\":\n\t\t\terr = unpopulate(val, \"Scopes\", &c.Scopes)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"serviceProviderDisplayName\":\n\t\t\terr = unpopulate(val, \"ServiceProviderDisplayName\", &c.ServiceProviderDisplayName)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"serviceProviderId\":\n\t\t\terr = unpopulate(val, \"ServiceProviderID\", &c.ServiceProviderID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"settingId\":\n\t\t\terr = unpopulate(val, \"SettingID\", &c.SettingID)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}" ]
[ "0.6875413", "0.6686578", "0.62999934", "0.5880715", "0.58680254", "0.5806092", "0.57214934", "0.5703393", "0.5693801", "0.5664528", "0.5656249", "0.5615088", "0.5578815", "0.5578258", "0.55728483", "0.55662054", "0.55587554", "0.55584365", "0.55446297", "0.554454", "0.5529279", "0.55267966", "0.54706895", "0.5465733", "0.5458811", "0.5446573", "0.54421943", "0.5442083", "0.544064", "0.5438225", "0.5433312", "0.5433145", "0.5428149", "0.54186714", "0.54105186", "0.54033804", "0.53947496", "0.53870374", "0.53850806", "0.5383439", "0.53723085", "0.5368691", "0.53574866", "0.5340588", "0.53382415", "0.5337564", "0.5334358", "0.53339887", "0.53097695", "0.5284183", "0.5283137", "0.5282833", "0.52825487", "0.5281488", "0.5274267", "0.52695954", "0.5259728", "0.5258622", "0.5254972", "0.52395207", "0.52378386", "0.52368", "0.5234319", "0.52224535", "0.52114385", "0.52090865", "0.52064574", "0.52017426", "0.519933", "0.518883", "0.51842725", "0.5177548", "0.5173897", "0.51537055", "0.51530004", "0.5149799", "0.51410896", "0.51401013", "0.5131254", "0.5129851", "0.5129432", "0.512519", "0.5114781", "0.5108619", "0.5105351", "0.5095003", "0.50948626", "0.50939745", "0.5090376", "0.5083377", "0.508179", "0.5072719", "0.50700575", "0.5062678", "0.5061529", "0.5057499", "0.50573665", "0.5056378", "0.5053798", "0.5046663" ]
0.7434199
0
UnmarshalJSON implements the json.Unmarshaller interface for type SettingsUpdateResult.
func (s *SettingsUpdateResult) UnmarshalJSON(data []byte) error { res, err := unmarshalSettingClassification(data) if err != nil { return err } s.SettingClassification = res return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *ProductSettingsClientUpdateResult) UnmarshalJSON(data []byte) error {\n\tres, err := unmarshalSettingsClassification(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.SettingsClassification = res\n\treturn nil\n}", "func (s *SettingsGetResult) UnmarshalJSON(data []byte) error {\n\tres, err := unmarshalSettingClassification(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.SettingClassification = res\n\treturn nil\n}", "func (s *SecretUpdate) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &s.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"systemData\":\n\t\t\terr = unpopulate(val, \"SystemData\", &s.SystemData)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (x *CMsgProfileUpdateResponse_Result) UnmarshalJSON(b []byte) error {\n\tnum, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CMsgProfileUpdateResponse_Result(num)\n\treturn nil\n}", "func (p *ProductSettingsClientGetResult) UnmarshalJSON(data []byte) error {\n\tres, err := unmarshalSettingsClassification(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.SettingsClassification = res\n\treturn nil\n}", "func (a *AccountSKUListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &a.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (m *MediaServiceUpdate) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", m, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"identity\":\n\t\t\terr = unpopulate(val, \"Identity\", &m.Identity)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &m.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"tags\":\n\t\t\terr = unpopulate(val, \"Tags\", &m.Tags)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", m, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (s *ServiceUpdate) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &s.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (s *SyncIdentityProviderUpdate) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &s.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"systemData\":\n\t\t\terr = unpopulate(val, \"SystemData\", &s.SystemData)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (s *SyncSetUpdate) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &s.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"systemData\":\n\t\t\terr = unpopulate(val, \"SystemData\", &s.SystemData)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (v *UserUpdate) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonD2b7633eDecodeGithubComMailcoursesTechnoparkDbmsForumGeneratedModels1(&r, v)\n\treturn r.Error()\n}", "func (v *VirtualApplianceSKUListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", v, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &v.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &v.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", v, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (mu *MemberUpdate) UnmarshalJSON(body []byte) error {\n\tvar m map[string]*json.RawMessage\n\terr := json.Unmarshal(body, &m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, v := range m {\n\t\tswitch k {\n\t\tcase \"tags\":\n\t\t\tif v != nil {\n\t\t\t\tvar tags map[string]*string\n\t\t\t\terr = json.Unmarshal(*v, &tags)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tmu.Tags = tags\n\t\t\t}\n\t\tcase \"properties\":\n\t\t\tif v != nil {\n\t\t\t\tvar memberPropertiesUpdate MemberPropertiesUpdate\n\t\t\t\terr = json.Unmarshal(*v, &memberPropertiesUpdate)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tmu.MemberPropertiesUpdate = &memberPropertiesUpdate\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (aup *AccountUpdateParameters) UnmarshalJSON(body []byte) error {\n\tvar m map[string]*json.RawMessage\n\terr := json.Unmarshal(body, &m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, v := range m {\n\t\tswitch k {\n\t\tcase \"tags\":\n\t\t\tif v != nil {\n\t\t\t\tvar tags map[string]*string\n\t\t\t\terr = json.Unmarshal(*v, &tags)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\taup.Tags = tags\n\t\t\t}\n\t\tcase \"kind\":\n\t\t\tif v != nil {\n\t\t\t\tvar kind Kind\n\t\t\t\terr = json.Unmarshal(*v, &kind)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\taup.Kind = kind\n\t\t\t}\n\t\tcase \"sku\":\n\t\t\tif v != nil {\n\t\t\t\tvar sku Sku\n\t\t\t\terr = json.Unmarshal(*v, &sku)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\taup.Sku = &sku\n\t\t\t}\n\t\tcase \"properties\":\n\t\t\tif v != nil {\n\t\t\t\tvar accountProperties AccountProperties\n\t\t\t\terr = json.Unmarshal(*v, &accountProperties)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\taup.AccountProperties = &accountProperties\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (v *UpdateInfo) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonDe066f3DecodeGithubComStudtoolDocumentsServiceModels(&r, v)\n\treturn r.Error()\n}", "func (s *SingleServerRecommendationResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"deploymentType\":\n\t\t\terr = unpopulate(val, \"DeploymentType\", &s.DeploymentType)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"vmSku\":\n\t\t\terr = unpopulate(val, \"VMSKU\", &s.VMSKU)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (e *EndpointServiceResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", e, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &e.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &e.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &e.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", e, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (v *ReconnectSettings) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer9(&r, v)\n\treturn r.Error()\n}", "func (m *ServerConfigResultEntry) UnmarshalJSON(raw []byte) error {\n\t// AO0\n\tvar aO0 PolicyAbstractConfigResultEntry\n\tif err := swag.ReadJSON(raw, &aO0); err != nil {\n\t\treturn err\n\t}\n\tm.PolicyAbstractConfigResultEntry = aO0\n\n\t// AO1\n\tvar dataAO1 struct {\n\t\tConfigResult *ServerConfigResultRef `json:\"ConfigResult,omitempty\"`\n\t}\n\tif err := swag.ReadJSON(raw, &dataAO1); err != nil {\n\t\treturn err\n\t}\n\n\tm.ConfigResult = dataAO1.ConfigResult\n\n\treturn nil\n}", "func (c *ChannelSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"botId\":\n\t\t\terr = unpopulate(val, \"BotID\", &c.BotID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"botIconUrl\":\n\t\t\terr = unpopulate(val, \"BotIconURL\", &c.BotIconURL)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"channelDisplayName\":\n\t\t\terr = unpopulate(val, \"ChannelDisplayName\", &c.ChannelDisplayName)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"channelId\":\n\t\t\terr = unpopulate(val, \"ChannelID\", &c.ChannelID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"disableLocalAuth\":\n\t\t\terr = unpopulate(val, \"DisableLocalAuth\", &c.DisableLocalAuth)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"extensionKey1\":\n\t\t\terr = unpopulate(val, \"ExtensionKey1\", &c.ExtensionKey1)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"extensionKey2\":\n\t\t\terr = unpopulate(val, \"ExtensionKey2\", &c.ExtensionKey2)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"isEnabled\":\n\t\t\terr = unpopulate(val, \"IsEnabled\", &c.IsEnabled)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"requireTermsAgreement\":\n\t\t\terr = unpopulate(val, \"RequireTermsAgreement\", &c.RequireTermsAgreement)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"sites\":\n\t\t\terr = unpopulate(val, \"Sites\", &c.Sites)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (v *ShadowUpdateMsgSt) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonB7ed31d3DecodeMevericcoreMccommon1(&r, v)\n\treturn r.Error()\n}", "func (v *PostUpdate) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonD2b7633eDecodeGithubComMailcoursesTechnoparkDbmsForumGeneratedModels6(&r, v)\n\treturn r.Error()\n}", "func (t *TroubleshootingResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"code\":\n\t\t\terr = unpopulate(val, \"Code\", &t.Code)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"endTime\":\n\t\t\terr = unpopulateTimeRFC3339(val, \"EndTime\", &t.EndTime)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"results\":\n\t\t\terr = unpopulate(val, \"Results\", &t.Results)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"startTime\":\n\t\t\terr = unpopulateTimeRFC3339(val, \"StartTime\", &t.StartTime)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", t, err)\n\t\t}\n\t}\n\treturn nil\n}", "func UnmarshalUserSettings(b []byte) UserSettings {\n\ta := UserSettings{}\n\tjson.Unmarshal(b, &a)\n\treturn a\n}", "func (v *DidChangeConfigurationParams) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {\n\tif k == keySettings {\n\t\treturn dec.Interface(&v.Settings)\n\t}\n\treturn nil\n}", "func (a *AvailableServiceAliasesResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &a.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &a.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (h *HostSettingsResponse) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", h, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"BotOpenIdMetadata\":\n\t\t\terr = unpopulate(val, \"BotOpenIDMetadata\", &h.BotOpenIDMetadata)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"OAuthUrl\":\n\t\t\terr = unpopulate(val, \"OAuthURL\", &h.OAuthURL)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"ToBotFromChannelOpenIdMetadataUrl\":\n\t\t\terr = unpopulate(val, \"ToBotFromChannelOpenIDMetadataURL\", &h.ToBotFromChannelOpenIDMetadataURL)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"ToBotFromChannelTokenIssuer\":\n\t\t\terr = unpopulate(val, \"ToBotFromChannelTokenIssuer\", &h.ToBotFromChannelTokenIssuer)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"ToBotFromEmulatorOpenIdMetadataUrl\":\n\t\t\terr = unpopulate(val, \"ToBotFromEmulatorOpenIDMetadataURL\", &h.ToBotFromEmulatorOpenIDMetadataURL)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"ToChannelFromBotLoginUrl\":\n\t\t\terr = unpopulate(val, \"ToChannelFromBotLoginURL\", &h.ToChannelFromBotLoginURL)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"ToChannelFromBotOAuthScope\":\n\t\t\terr = unpopulate(val, \"ToChannelFromBotOAuthScope\", &h.ToChannelFromBotOAuthScope)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"ValidateAuthority\":\n\t\t\terr = unpopulate(val, \"ValidateAuthority\", &h.ValidateAuthority)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", h, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (h *HlsSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", h, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"characteristics\":\n\t\t\terr = unpopulate(val, \"Characteristics\", &h.Characteristics)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"default\":\n\t\t\terr = unpopulate(val, \"Default\", &h.Default)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"forced\":\n\t\t\terr = unpopulate(val, \"Forced\", &h.Forced)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", h, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (sfu *ServerForUpdate) UnmarshalJSON(body []byte) error {\n var m map[string]*json.RawMessage\n err := json.Unmarshal(body, &m)\n if err != nil {\n return err\n }\n for k, v := range m {\n switch k {\n case \"sku\":\n if v != nil {\n var sku Sku\n err = json.Unmarshal(*v, &sku)\n if err != nil {\n return err\n }\n sfu.Sku = &sku\n }\n case \"properties\":\n if v != nil {\n var serverPropertiesForUpdate ServerPropertiesForUpdate\n err = json.Unmarshal(*v, &serverPropertiesForUpdate)\n if err != nil {\n return err\n }\n sfu.ServerPropertiesForUpdate = &serverPropertiesForUpdate\n }\n case \"tags\":\n if v != nil {\n var tags map[string]*string\n err = json.Unmarshal(*v, &tags)\n if err != nil {\n return err\n }\n sfu.Tags = tags\n }\n }\n }\n\n return nil\n }", "func (v *updateByQuery) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson390b7126DecodeGithubComChancedPicker2(&r, v)\n\treturn r.Error()\n}", "func (a *AppTemplatesResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &a.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &a.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (a *ApplicationGatewayWafDynamicManifestPropertiesResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"availableRuleSets\":\n\t\t\terr = unpopulate(val, \"AvailableRuleSets\", &a.AvailableRuleSets)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"defaultRuleSet\":\n\t\t\terr = unpopulate(val, \"DefaultRuleSet\", &a.DefaultRuleSet)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (i *ImageTemplateWindowsUpdateCustomizer) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn err\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"filters\":\n\t\t\terr = unpopulate(val, &i.Filters)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, &i.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"searchCriteria\":\n\t\t\terr = unpopulate(val, &i.SearchCriteria)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, &i.Type)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"updateLimit\":\n\t\t\terr = unpopulate(val, &i.UpdateLimit)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (v *TradeUpdate) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson3e8ab7adDecodeGithubComAlpacahqAlpacaTradeApiGoV3Alpaca9(&r, v)\n\treturn r.Error()\n}", "func (s *SAPSizingRecommendationResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"deploymentType\":\n\t\t\terr = unpopulate(val, \"DeploymentType\", &s.DeploymentType)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (usap *UpdateStorageAccountParameters) UnmarshalJSON(body []byte) error {\n\tvar m map[string]*json.RawMessage\n\terr := json.Unmarshal(body, &m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, v := range m {\n\t\tswitch k {\n\t\tcase \"properties\":\n\t\t\tif v != nil {\n\t\t\t\tvar updateStorageAccountProperties UpdateStorageAccountProperties\n\t\t\t\terr = json.Unmarshal(*v, &updateStorageAccountProperties)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tusap.UpdateStorageAccountProperties = &updateStorageAccountProperties\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (l *ListVirtualHubIPConfigurationResults) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &l.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &l.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (m *MultipleActivationKeyUpdate) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", m, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"tags\":\n\t\t\terr = unpopulate(val, \"Tags\", &m.Tags)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", m, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (v *ShadowUpdateRPCMsgSt) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson5bd79fa1DecodeMevericcoreMcplantainer(&r, v)\n\treturn r.Error()\n}", "func (a *AssignmentUpdate) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"identity\":\n\t\t\terr = unpopulate(val, \"Identity\", &a.Identity)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"location\":\n\t\t\terr = unpopulate(val, \"Location\", &a.Location)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (l *ListVirtualWANsResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &l.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &l.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (r *ResourceSKUListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", r, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &r.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &r.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", r, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (a *ApplicationGatewayAvailableWafRuleSetsResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &a.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (i *InterfaceDNSSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", i, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"appliedDnsServers\":\n\t\t\terr = unpopulate(val, \"AppliedDNSServers\", &i.AppliedDNSServers)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"dnsServers\":\n\t\t\terr = unpopulate(val, \"DNSServers\", &i.DNSServers)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"internalDnsNameLabel\":\n\t\t\terr = unpopulate(val, \"InternalDNSNameLabel\", &i.InternalDNSNameLabel)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"internalDomainNameSuffix\":\n\t\t\terr = unpopulate(val, \"InternalDomainNameSuffix\", &i.InternalDomainNameSuffix)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"internalFqdn\":\n\t\t\terr = unpopulate(val, \"InternalFqdn\", &i.InternalFqdn)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", i, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (x *MetadataUpdateType) UnmarshalJSON(b []byte) error {\n\tnum, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = MetadataUpdateType(num)\n\treturn nil\n}", "func (x *CMsgSetTeamFanContentStatusResponse_EResult) UnmarshalJSON(b []byte) error {\n\tnum, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CMsgSetTeamFanContentStatusResponse_EResult(num)\n\treturn nil\n}", "func (v *SwapAccountsSetting) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi35(&r, v)\n\treturn r.Error()\n}", "func UnmarshalUpdateChannelResponse(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(UpdateChannelResponse)\n\terr = core.UnmarshalPrimitive(m, \"channel_id\", &obj.ChannelID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"status_code\", &obj.StatusCode)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func (u *UpdateDataLakeStoreAccountProperties) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"defaultGroup\":\n\t\t\terr = unpopulate(val, \"DefaultGroup\", &u.DefaultGroup)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"encryptionConfig\":\n\t\t\terr = unpopulate(val, \"EncryptionConfig\", &u.EncryptionConfig)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"firewallAllowAzureIps\":\n\t\t\terr = unpopulate(val, \"FirewallAllowAzureIPs\", &u.FirewallAllowAzureIPs)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"firewallRules\":\n\t\t\terr = unpopulate(val, \"FirewallRules\", &u.FirewallRules)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"firewallState\":\n\t\t\terr = unpopulate(val, \"FirewallState\", &u.FirewallState)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"newTier\":\n\t\t\terr = unpopulate(val, \"NewTier\", &u.NewTier)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"trustedIdProviderState\":\n\t\t\terr = unpopulate(val, \"TrustedIDProviderState\", &u.TrustedIDProviderState)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"trustedIdProviders\":\n\t\t\terr = unpopulate(val, \"TrustedIDProviders\", &u.TrustedIDProviders)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"virtualNetworkRules\":\n\t\t\terr = unpopulate(val, \"VirtualNetworkRules\", &u.VirtualNetworkRules)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (c *ChannelUpdateParametersProperties) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"expirationTimeIfNotActivatedUtc\":\n\t\t\terr = unpopulateTimeRFC3339(val, \"ExpirationTimeIfNotActivatedUTC\", &c.ExpirationTimeIfNotActivatedUTC)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"partnerDestinationInfo\":\n\t\t\tc.PartnerDestinationInfo, err = unmarshalPartnerUpdateDestinationInfoClassification(val)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"partnerTopicInfo\":\n\t\t\terr = unpopulate(val, \"PartnerTopicInfo\", &c.PartnerTopicInfo)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (x *UpdateType) UnmarshalJSON(b []byte) error {\n\tnum, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = UpdateType(num)\n\treturn nil\n}", "func (v *PostUpdate) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonD2b7633eDecodeGithubComDbProjectPkgModels8(&r, v)\n\treturn r.Error()\n}", "func (d *DashSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"role\":\n\t\t\terr = unpopulate(val, \"Role\", &d.Role)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (l *ListVPNServerConfigurationsResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &l.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &l.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (d *DeploymentScaleSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"activeCapacity\":\n\t\t\terr = unpopulate(val, \"ActiveCapacity\", &d.ActiveCapacity)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"capacity\":\n\t\t\terr = unpopulate(val, \"Capacity\", &d.Capacity)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"scaleType\":\n\t\t\terr = unpopulate(val, \"ScaleType\", &d.ScaleType)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (v *EventApplicationPermissionUpdate) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjson4c51a5cdDecodeGithubComOvhCdsSdk10(&r, v)\n\treturn r.Error()\n}", "func (c *ConnectionSettingResponseList) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &c.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &c.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", c, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (w *WebhookUpdatePartnerDestinationInfo) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", w, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"endpointType\":\n\t\t\terr = unpopulate(val, \"EndpointType\", &w.EndpointType)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &w.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", w, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (a *ApplicationGatewayWafDynamicManifestResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &a.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &a.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &a.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &a.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (s *SecurityAdminConfigurationListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &s.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &s.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (d *DefenderSettingsProperties) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"deviceQuota\":\n\t\t\terr = unpopulate(val, \"DeviceQuota\", &d.DeviceQuota)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"evaluationEndTime\":\n\t\t\terr = unpopulateTimeRFC3339(val, \"EvaluationEndTime\", &d.EvaluationEndTime)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"mdeIntegration\":\n\t\t\terr = unpopulate(val, \"MdeIntegration\", &d.MdeIntegration)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"onboardingKind\":\n\t\t\terr = unpopulate(val, \"OnboardingKind\", &d.OnboardingKind)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"sentinelWorkspaceResourceIds\":\n\t\t\terr = unpopulate(val, \"SentinelWorkspaceResourceIDs\", &d.SentinelWorkspaceResourceIDs)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (s *SignatureOverridesFilterValuesResponse) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"filterValues\":\n\t\t\terr = unpopulate(val, \"FilterValues\", &s.FilterValues)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (d *DNSSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"enableProxy\":\n\t\t\terr = unpopulate(val, \"EnableProxy\", &d.EnableProxy)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"requireProxyForNetworkRules\":\n\t\t\terr = unpopulate(val, \"RequireProxyForNetworkRules\", &d.RequireProxyForNetworkRules)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"servers\":\n\t\t\terr = unpopulate(val, \"Servers\", &d.Servers)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (l *ListVPNServerConfigurationPolicyGroupsResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &l.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &l.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (m *ManagerDeploymentStatusListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", m, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"skipToken\":\n\t\t\terr = unpopulate(val, \"SkipToken\", &m.SkipToken)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &m.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", m, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (u *UpdateFirewallRuleParameters) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &u.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (v *VirtualNetworkListUsageResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", v, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &v.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &v.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", v, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (x *CMsgSocialFeedResponse_Result) UnmarshalJSON(b []byte) error {\n\tnum, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CMsgSocialFeedResponse_Result(num)\n\treturn nil\n}", "func (u *UpdateFirewallRuleProperties) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"endIpAddress\":\n\t\t\terr = unpopulate(val, \"EndIPAddress\", &u.EndIPAddress)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"startIpAddress\":\n\t\t\terr = unpopulate(val, \"StartIPAddress\", &u.StartIPAddress)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (l *ListVPNSitesResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &l.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &l.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (l *ListVirtualHubsResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &l.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &l.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", l, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (s *SingleQueryResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"description\":\n\t\t\terr = unpopulate(val, \"Description\", &s.Description)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"destinationPorts\":\n\t\t\terr = unpopulate(val, \"DestinationPorts\", &s.DestinationPorts)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"direction\":\n\t\t\terr = unpopulate(val, \"Direction\", &s.Direction)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"group\":\n\t\t\terr = unpopulate(val, \"Group\", &s.Group)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"inheritedFromParentPolicy\":\n\t\t\terr = unpopulate(val, \"InheritedFromParentPolicy\", &s.InheritedFromParentPolicy)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"lastUpdated\":\n\t\t\terr = unpopulate(val, \"LastUpdated\", &s.LastUpdated)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"mode\":\n\t\t\terr = unpopulate(val, \"Mode\", &s.Mode)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"protocol\":\n\t\t\terr = unpopulate(val, \"Protocol\", &s.Protocol)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"severity\":\n\t\t\terr = unpopulate(val, \"Severity\", &s.Severity)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"signatureId\":\n\t\t\terr = unpopulate(val, \"SignatureID\", &s.SignatureID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"sourcePorts\":\n\t\t\terr = unpopulate(val, \"SourcePorts\", &s.SourcePorts)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (v *VirtualNetworkDdosProtectionStatusResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", v, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &v.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &v.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", v, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (ujp *UpdateJobProperties) UnmarshalJSON(body []byte) error {\n\tvar m map[string]*json.RawMessage\n\terr := json.Unmarshal(body, &m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, v := range m {\n\t\tswitch k {\n\t\tcase \"details\":\n\t\t\tif v != nil {\n\t\t\t\tvar details UpdateJobDetails\n\t\t\t\terr = json.Unmarshal(*v, &details)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tujp.Details = &details\n\t\t\t}\n\t\tcase \"destinationAccountDetails\":\n\t\t\tif v != nil {\n\t\t\t\tdestinationAccountDetails, err := unmarshalBasicDestinationAccountDetailsArray(*v)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tujp.DestinationAccountDetails = &destinationAccountDetails\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *StreamingEndpointSKUInfoListResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &s.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (this *ConnectionPoolSettings_HTTPSettings) UnmarshalJSON(b []byte) error {\n\treturn DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}", "func (v *PostUpdate) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonC80ae7adDecodeGithubComDeiklovTechDbRomanovAndrGolangModels8(&r, v)\n\treturn r.Error()\n}", "func (p *PolicySettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", p, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"customBlockResponseBody\":\n\t\t\terr = unpopulate(val, \"CustomBlockResponseBody\", &p.CustomBlockResponseBody)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"customBlockResponseStatusCode\":\n\t\t\terr = unpopulate(val, \"CustomBlockResponseStatusCode\", &p.CustomBlockResponseStatusCode)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"fileUploadEnforcement\":\n\t\t\terr = unpopulate(val, \"FileUploadEnforcement\", &p.FileUploadEnforcement)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"fileUploadLimitInMb\":\n\t\t\terr = unpopulate(val, \"FileUploadLimitInMb\", &p.FileUploadLimitInMb)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"logScrubbing\":\n\t\t\terr = unpopulate(val, \"LogScrubbing\", &p.LogScrubbing)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"maxRequestBodySizeInKb\":\n\t\t\terr = unpopulate(val, \"MaxRequestBodySizeInKb\", &p.MaxRequestBodySizeInKb)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"mode\":\n\t\t\terr = unpopulate(val, \"Mode\", &p.Mode)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"requestBodyCheck\":\n\t\t\terr = unpopulate(val, \"RequestBodyCheck\", &p.RequestBodyCheck)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"requestBodyEnforcement\":\n\t\t\terr = unpopulate(val, \"RequestBodyEnforcement\", &p.RequestBodyEnforcement)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"requestBodyInspectLimitInKB\":\n\t\t\terr = unpopulate(val, \"RequestBodyInspectLimitInKB\", &p.RequestBodyInspectLimitInKB)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"state\":\n\t\t\terr = unpopulate(val, \"State\", &p.State)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", p, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (this *ClientTLSSettings) UnmarshalJSON(b []byte) error {\n\treturn DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}", "func (a *ApplicationGatewayWafDynamicManifestResultList) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"nextLink\":\n\t\t\terr = unpopulate(val, \"NextLink\", &a.NextLink)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"value\":\n\t\t\terr = unpopulate(val, \"Value\", &a.Value)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (client AppsClient) UpdateSettingsResponder(resp *http.Response) (result OperationStatus, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (b *BackupSecretResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn err\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"value\":\n\t\t\terr = runtime.DecodeByteArray(string(val), &b.Value, runtime.Base64URLFormat)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (a *ActivityReportVulnerabilityTypesUpdated) UnmarshalJSON(b []byte) error {\n\tvar helper activityReportVulnerabilityTypesUpdatedUnmarshalHelper\n\tif err := json.Unmarshal(b, &helper); err != nil {\n\t\treturn err\n\t}\n\ta.OldVulnerabilityTypes = helper.Relationships.OldVulnerabilityTypes.Data\n\ta.NewVulnerabilityTypes = helper.Relationships.NewVulnerabilityTypes.Data\n\treturn nil\n}", "func (v *ShadowUpdateMsgStateSt) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonB7ed31d3DecodeMevericcoreMccommon(&r, v)\n\treturn r.Error()\n}", "func (a *ApplicationGatewayBackendSettings) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"etag\":\n\t\t\terr = unpopulate(val, \"Etag\", &a.Etag)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &a.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &a.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &a.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &a.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func UnmarshalKMSSettings(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(KMSSettings)\n\terr = core.UnmarshalPrimitive(m, \"location\", &obj.Location)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"encryption_scheme\", &obj.EncryptionScheme)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"resource_group\", &obj.ResourceGroup)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"primary_crk\", &obj.PrimaryCrk, UnmarshalKMSSettingsPrimaryCrk)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"secondary_crk\", &obj.SecondaryCrk, UnmarshalKMSSettingsSecondaryCrk)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func (r *ResourceUpdateParameters) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", r, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"tags\":\n\t\t\terr = unpopulate(val, \"Tags\", &r.Tags)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", r, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (future *PrivateEndpointConnectionsUpdateFuture) UnmarshalJSON(body []byte) error {\n\tvar azFuture azure.Future\n\tif err := json.Unmarshal(body, &azFuture); err != nil {\n\t\treturn err\n\t}\n\tfuture.FutureAPI = &azFuture\n\tfuture.Result = future.result\n\treturn nil\n}", "func (k *KustoPoolDatabasesClientUpdateResult) UnmarshalJSON(data []byte) error {\n\tres, err := unmarshalDatabaseClassification(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tk.DatabaseClassification = res\n\treturn nil\n}", "func (o *OpenShiftClusterUpdate) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", o, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &o.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"systemData\":\n\t\t\terr = unpopulate(val, \"SystemData\", &o.SystemData)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"tags\":\n\t\t\terr = unpopulate(val, \"Tags\", &o.Tags)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", o, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (v *UpdatePasswordData) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonD2b7633eDecode20191OPGPlus2InternalPkgModels4(&r, v)\n\treturn r.Error()\n}", "func (m *MachinePoolUpdate) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", m, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &m.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"systemData\":\n\t\t\terr = unpopulate(val, \"SystemData\", &m.SystemData)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", m, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (v *ThreadUpdate) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\teasyjsonD2b7633eDecodeGithubComDbProjectPkgModels4(&r, v)\n\treturn r.Error()\n}", "func ParseUpdateMRNSettingsResponse(rsp *http.Response) (*UpdateMRNSettingsResponse, error) {\n\tbodyBytes, err := io.ReadAll(rsp.Body)\n\tdefer func() { _ = rsp.Body.Close() }()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &UpdateMRNSettingsResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\treturn response, nil\n}", "func (d *DebugSendResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"failure\":\n\t\t\terr = unpopulate(val, \"Failure\", &d.Failure)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"results\":\n\t\t\terr = unpopulate(val, \"Results\", &d.Results)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"success\":\n\t\t\terr = unpopulate(val, \"Success\", &d.Success)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (a *AsyncOperationResult) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"error\":\n\t\t\terr = unpopulate(val, \"Error\", &a.Error)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &a.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"status\":\n\t\t\terr = unpopulate(val, \"Status\", &a.Status)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (ShareUpdated) Unmarshal(v []byte) (interface{}, error) {\n\te := ShareUpdated{}\n\terr := json.Unmarshal(v, &e)\n\treturn e, err\n}", "func (p *PolicySettingsLogScrubbing) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", p, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"scrubbingRules\":\n\t\t\terr = unpopulate(val, \"ScrubbingRules\", &p.ScrubbingRules)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"state\":\n\t\t\terr = unpopulate(val, \"State\", &p.State)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", p, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (s *SignatureOverridesFilterValuesQuery) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"filterName\":\n\t\t\terr = unpopulate(val, \"FilterName\", &s.FilterName)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (u *UpdateFirewallRuleWithAccountParameters) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &u.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &u.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", u, err)\n\t\t}\n\t}\n\treturn nil\n}" ]
[ "0.7052551", "0.66646963", "0.6281283", "0.627407", "0.61887354", "0.61412156", "0.6101325", "0.6062967", "0.60167694", "0.60028285", "0.59863514", "0.5935965", "0.58691716", "0.58244735", "0.58145964", "0.5798269", "0.579495", "0.5794271", "0.57862407", "0.578138", "0.57628447", "0.5760283", "0.57323545", "0.57244456", "0.5701484", "0.5697525", "0.5690969", "0.56883395", "0.568647", "0.5684794", "0.5683759", "0.56690705", "0.566446", "0.5661881", "0.5653744", "0.5651588", "0.56494427", "0.56471175", "0.5643201", "0.5642057", "0.56379074", "0.5624862", "0.5618221", "0.5605639", "0.56016845", "0.5594342", "0.5585047", "0.5579404", "0.55777436", "0.5573984", "0.55703664", "0.5566469", "0.5557856", "0.5553933", "0.5552427", "0.5547533", "0.55331045", "0.55316347", "0.5519837", "0.55122304", "0.55121917", "0.5501679", "0.5500214", "0.54985195", "0.54965913", "0.54891497", "0.5489077", "0.5487824", "0.54877007", "0.5484259", "0.548183", "0.54775494", "0.5474719", "0.54734856", "0.5468226", "0.54625744", "0.5452355", "0.5450624", "0.5449557", "0.5448725", "0.5446574", "0.5441014", "0.5438359", "0.5432684", "0.5424162", "0.54224885", "0.5421806", "0.54179615", "0.54169023", "0.54166305", "0.5416503", "0.5410799", "0.54106075", "0.5406457", "0.5403829", "0.53992754", "0.53977257", "0.53883237", "0.53834224", "0.53787917" ]
0.74987465
0
Test that status transitions from Inactive to Active, once the start time is reached
func TestActivation(t *testing.T) { mockclock := clock.NewMock() setClock(mockclock) // replace clock with mock for speedy testing now := mockclock.Now() reset() p, _ := New("Promo1", now.Add(1*time.Hour), now.Add(24*time.Hour)) runtime.Gosched() if res := p.AllowDisplay(ip); res != false { t.Errorf("Bad Promo status, got: %v want %v", res, false) } // wind clock forward until after start time; enter display period mockclock.Add(2 * time.Hour) if res := p.AllowDisplay(ip); res != true { t.Errorf("Bad Promo status, got: %v want %v", res, true) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (pas *PodAutoscalerStatus) inStatusFor(status corev1.ConditionStatus, now time.Time) time.Duration {\n\tcond := pas.GetCondition(PodAutoscalerConditionActive)\n\tif cond == nil || cond.Status != status {\n\t\treturn -1\n\t}\n\treturn now.Sub(cond.LastTransitionTime.Inner.Time)\n}", "func (v *ObservabilityVerifier) ExpectPreparingStatus(g Gomega) {\n\tg.Expect(time.Now().UTC().Sub(v.Shoot.Status.Credentials.Rotation.Observability.LastInitiationTime.Time.UTC())).To(BeNumerically(\"<=\", time.Minute))\n}", "func TestOpenAfterFailures(t *testing.T){\n breaker := NewBreaker(5 * time.Second, 2, 2)\n\n breaker.open()\n\n evaluateCondition(t, breaker.State == OpenState, \"TestOpenAfterFailures\")\n}", "func (suite *HealthCheckTestSuite) TestGetAgentStateActive() {\n\t// UpdateEmptyInstanceInformation will return active in the h.ping() function.\n\tsuite.serviceMock.On(\"UpdateEmptyInstanceInformation\", mock.Anything, version.Version, AgentName).Return(nil, nil)\n\tagentState, err := suite.healthCheck.GetAgentState()\n\t// Assert the status is Active and the error is nil.\n\tassert.Equal(suite.T(), agentState, Active, \"agent state should be active\")\n\tassert.Nil(suite.T(), err, \"GatAgentState function should always return nil as error\")\n}", "func (s *InMemorySuite) TestRecordStatus(c *C) {\n\ttest.WithTimeout(func(ctx context.Context) {\n\t\ttimeline := s.newTimeline()\n\t\tnode := \"test-node\"\n\t\told := &pb.NodeStatus{Name: node, Status: pb.NodeStatus_Running}\n\t\texpected := []*pb.TimelineEvent{history.NewNodeRecovered(s.clock.Now(), node)}\n\n\t\tc.Assert(timeline.RecordStatus(ctx, old), IsNil)\n\n\t\tactual, err := timeline.GetEvents(ctx, nil)\n\t\tc.Assert(err, IsNil)\n\t\tc.Assert(actual, test.DeepCompare, expected, Commentf(\"Expected the status to be recorded.\"))\n\t})\n}", "func TestHalfOpenAfterTimeout(t *testing.T){\n breaker := NewBreaker(2 * time.Second, 2, 2)\n\n breaker.halfOpen()\n\n evaluateCondition(t, breaker.State == HalfOpenState, \"TestHalfOpenAfterTimeout\")\n}", "func TestInProgress(t *testing.T) {\n\ttestCase := func(c chan<- fs.Event) {\n\t\tevents.Default.Log(events.ItemStarted, map[string]string{\n\t\t\t\"item\": \"inprogress\",\n\t\t})\n\t\tsleepMs(100)\n\t\tc <- fs.Event{Name: \"inprogress\", Type: fs.NonRemove}\n\t\tsleepMs(1000)\n\t\tevents.Default.Log(events.ItemFinished, map[string]interface{}{\n\t\t\t\"item\": \"inprogress\",\n\t\t})\n\t\tsleepMs(100)\n\t\tc <- fs.Event{Name: \"notinprogress\", Type: fs.NonRemove}\n\t\tsleepMs(800)\n\t}\n\n\texpectedBatches := []expectedBatch{\n\t\t{[][]string{{\"notinprogress\"}}, 2000, 3500},\n\t}\n\n\ttestScenario(t, \"InProgress\", testCase, expectedBatches)\n}", "func (TransferStatus) Started() TransferStatus { return TransferStatus(1) }", "func (s *Stopwatch) active() bool {\n\treturn s.stop.IsZero()\n}", "func (pas *PodAutoscalerStatus) ActiveFor(now time.Time) time.Duration {\n\treturn pas.inStatusFor(corev1.ConditionTrue, now)\n}", "func TestUnitToContainerStatus(t *testing.T) {\n\ttestCases := []struct {\n\t\tunitState api.UnitState\n\t}{\n\t\t{\n\t\t\tunitState: api.UnitState{\n\t\t\t\tWaiting: &api.UnitStateWaiting{\n\t\t\t\t\tReason: \"waiting to start\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tunitState: api.UnitState{\n\t\t\t\tRunning: &api.UnitStateRunning{\n\t\t\t\t\tStartedAt: api.Now(),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tunitState: api.UnitState{\n\t\t\t\tTerminated: &api.UnitStateTerminated{\n\t\t\t\t\tExitCode: int32(rand.Intn(256)),\n\t\t\t\t\tFinishedAt: api.Now(),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tus := api.UnitStatus{\n\t\t\tName: \"myunit\",\n\t\t\tRestartCount: 0,\n\t\t\tImage: \"elotl/myimage\",\n\t\t\tState: tc.unitState,\n\t\t}\n\t\tcs := unitToContainerStatus(us)\n\t\tif us.State.Waiting != nil {\n\t\t\tassert.NotNil(t, cs.State.Waiting)\n\t\t\tassert.Nil(t, cs.State.Running)\n\t\t\tassert.Nil(t, cs.State.Terminated)\n\t\t\tassert.Equal(t, us.State.Waiting.Reason, cs.State.Waiting.Reason)\n\t\t}\n\t\tif us.State.Running != nil {\n\t\t\tassert.NotNil(t, cs.State.Running)\n\t\t\tassert.Nil(t, cs.State.Waiting)\n\t\t\tassert.Nil(t, cs.State.Terminated)\n\t\t\tassert.Equal(\n\t\t\t\tt,\n\t\t\t\tus.State.Running.StartedAt.Time,\n\t\t\t\tcs.State.Running.StartedAt.Time)\n\t\t}\n\t\tif us.State.Terminated != nil {\n\t\t\tassert.NotNil(t, cs.State.Terminated)\n\t\t\tassert.Nil(t, cs.State.Running)\n\t\t\tassert.Nil(t, cs.State.Waiting)\n\t\t\tassert.Equal(\n\t\t\t\tt,\n\t\t\t\tus.State.Terminated.ExitCode,\n\t\t\t\tcs.State.Terminated.ExitCode)\n\t\t\tassert.Equal(\n\t\t\t\tt,\n\t\t\t\tus.State.Terminated.FinishedAt.Time,\n\t\t\t\tcs.State.Terminated.FinishedAt.Time)\n\t\t}\n\t}\n}", "func (m *MockUpstreamIntf) ActiveInPastSeconds(arg0 time.Duration) bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ActiveInPastSeconds\", arg0)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func TestRequestUpFromStop(t *testing.T) {\n\t// setup\n\tdwController := setup(t, 2, Stopped, []common.PiPin{common.OpenerUp})\n\n\t// test\n\tdwController.SetRequestedFloor(3)\n\twaitForStatus(t, 2, 3, Up, dwController, 3*time.Second)\n}", "func JudgeContestStatus(cst *models.Contest, t time.Time) int {\n\tif t.Before(cst.StartTime) {\n\t\treturn -1\n\t}\n\tif t.After(cst.EndTime) {\n\t\treturn 1\n\t}\n\treturn 0\n}", "func (px *Paxos) Status(seq int) (bool, interface{}) {\n\t// Your code here.\n\tcurMin := px.Min()\n\tpx.mu.Lock()\n\tdefer px.mu.Unlock()\n\n\ttargetIns := px.instances[seq]\n\tif seq < curMin || targetIns == nil {\n\t\treturn false, nil\n\t} else {\n\t\t//debug\n\t\t// fmt.Printf(\"Status: isDecided=%t, va=%v, me=%d, seq %d\\n\", targetIns.isDecided, targetIns.vDecided, px.me, seq)\n\t\treturn targetIns.isDecided, targetIns.vDecided\n\t}\n}", "func vgStatusCheck(id string, powerClient *v.IBMPIVolumeGroupClient) {\n\tfor start := time.Now(); time.Since(start) < time.Second*30; {\n\t\ttime.Sleep(10 * time.Second)\n\t\tvg, err := powerClient.GetDetails(id)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif vg.Status == \"available\" {\n\t\t\tbreak\n\t\t}\n\t}\n}", "func TestNetworkStatus(t *testing.T) {\n\tedgeNode := tc.GetEdgeNode(tc.WithTest(t))\n\n\targs := flag.Args()\n\tif len(args) == 0 {\n\t\tt.Fatalf(\"Usage: %s [options] state vol_name...\\n\", os.Args[0])\n\t} else {\n\t\tsecs := int(timewait.Seconds())\n\t\tstate := args[0]\n\t\tt.Log(utils.AddTimestamp(fmt.Sprintf(\"networks: '%s' expected state: '%s' secs: %d\\n\",\n\t\t\targs[1:], state, secs)))\n\n\t\tnws := args[1:]\n\t\tif nws[len(nws)-1] == \"&\" {\n\t\t\tnws = nws[:len(nws)-1]\n\t\t}\n\t\tstates = make(map[string][]nwState)\n\t\tfor _, el := range nws {\n\t\t\tstates[el] = []nwState{{state: \"no info from controller\", timestamp: time.Now()}}\n\t\t}\n\n\t\tif !*newitems {\n\t\t\t// observe existing info object and feed them into eveState object\n\t\t\tif err := tc.GetController().InfoLastCallback(edgeNode.GetID(), nil, eveState.InfoCallback()); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\n\t\t// we are done if our eveState object is in required state\n\t\tif ready := checkState(eveState, state, nws); ready == nil {\n\n\t\t\ttc.AddProcInfo(edgeNode, checkNet(state, nws))\n\n\t\t\tcallback := func() {\n\t\t\t\tt.Errorf(\"ASSERTION FAILED (%s): expected networks %s in %s state\", time.Now().Format(time.RFC3339Nano), nws, state)\n\t\t\t\tfor k, v := range states {\n\t\t\t\t\tt.Errorf(\"\\tactual %s: %s\", k, v[len(v)-1].state)\n\t\t\t\t\tif checkNewLastState(k, state) {\n\t\t\t\t\t\tt.Errorf(\"\\thistory of states for %s:\", k)\n\t\t\t\t\t\tfor _, st := range v {\n\t\t\t\t\t\t\tt.Errorf(\"\\t\\tstate: %s received in: %s\", st.state, st.timestamp.Format(time.RFC3339Nano))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttc.WaitForProcWithErrorCallback(secs, callback)\n\n\t\t} else {\n\t\t\tt.Log(utils.AddTimestamp(ready.Error()))\n\t\t}\n\n\t\t// sleep to reduce concurrency effects\n\t\ttime.Sleep(1 * time.Second)\n\t}\n}", "func checkrequestStatus(d *schema.ResourceData, config Config, requestID string, timeOut int) error {\n\ttimeout := time.After(time.Duration(timeOut) * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(1 * time.Second):\n\t\t\tstatus, state, err := checkServiceRequestStatus(config, requestID)\n\t\t\tif err == nil {\n\t\t\t\tif state == \"finished\" && status == \"Ok\" {\n\t\t\t\t\tlog.Println(\"[DEBUG] Service order added SUCCESSFULLY\")\n\t\t\t\t\td.SetId(requestID)\n\t\t\t\t\treturn nil\n\t\t\t\t} else if status == \"Error\" {\n\t\t\t\t\tlog.Println(\"[ERROR] Failed\")\n\t\t\t\t\treturn fmt.Errorf(\"[Error] Failed execution\")\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(\"[DEBUG] Request state is :\", state)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase <-timeout:\n\t\t\tlog.Println(\"[DEBUG] Timeout occured\")\n\t\t\treturn fmt.Errorf(\"[ERROR] Timeout\")\n\t\t}\n\t}\n}", "func waitForTableToBeActiveWithRandomSleep(tableName string, client *dynamodb.DynamoDB, maxRetries int, sleepBetweenRetriesMin time.Duration, sleepBetweenRetriesMax time.Duration, terragruntOptions *options.TerragruntOptions) error {\n\tfor i := 0; i < maxRetries; i++ {\n\t\ttableReady, err := LockTableExistsAndIsActive(tableName, client)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif tableReady {\n\t\t\tterragruntOptions.Logger.Debugf(\"Success! Table %s is now in active state.\", tableName)\n\t\t\treturn nil\n\t\t}\n\n\t\tsleepBetweenRetries := util.GetRandomTime(sleepBetweenRetriesMin, sleepBetweenRetriesMax)\n\t\tterragruntOptions.Logger.Debugf(\"Table %s is not yet in active state. Will check again after %s.\", tableName, sleepBetweenRetries)\n\t\ttime.Sleep(sleepBetweenRetries)\n\t}\n\n\treturn errors.WithStackTrace(TableActiveRetriesExceeded{TableName: tableName, Retries: maxRetries})\n}", "func checkStatus(currentStatus, wantedStatus int8) (err error) {\n\tswitch currentStatus {\n\tcase constant.RUNNING_STATUS_PREPARING:\n\t\terr = ErrSchedulerBeingInitilated\n\tcase constant.RUNNING_STATUS_STARTING:\n\t\terr = ErrSchedulerBeingStarted\n\tcase constant.RUNNING_STATUS_STOPPING:\n\t\terr = ErrSchedulerBeingStopped\n\tcase constant.RUNNING_STATUS_PAUSING:\n\t\terr = ErrSchedulerBeingPaused\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif currentStatus == constant.RUNNING_STATUS_UNPREPARED &&\n\t\twantedStatus != constant.RUNNING_STATUS_PREPARING {\n\t\terr = ErrSchedulerNotInitialized\n\t\treturn\n\t}\n\n\tif currentStatus == constant.RUNNING_STATUS_STOPPED {\n\t\terr = ErrSchedulerStopped\n\t\treturn\n\t}\n\n\tswitch wantedStatus {\n\tcase constant.RUNNING_STATUS_PREPARING:\n\t\tif currentStatus != constant.RUNNING_STATUS_UNPREPARED {\n\t\t\terr = ErrSchedulerInitialized\n\t\t}\n\tcase constant.RUNNING_STATUS_STARTING:\n\t\tif currentStatus != constant.RUNNING_STATUS_PREPARED {\n\t\t\terr = ErrSchedulerStarted\n\t\t}\n\tcase constant.RUNNING_STATUS_PAUSING:\n\t\tif currentStatus != constant.RUNNING_STATUS_STARTED {\n\t\t\terr = ErrSchedulerNotStarted\n\t\t}\n\tcase constant.RUNNING_STATUS_STOPPING:\n\t\tif currentStatus != constant.RUNNING_STATUS_STARTED &&\n\t\t\tcurrentStatus != constant.RUNNING_STATUS_PAUSED {\n\t\t\terr = ErrSchedulerNotStarted\n\t\t}\n\tdefault:\n\t\terr = ErrStatusUnsupported\n\t}\n\treturn\n}", "func (v Value) IsActiveForTime(time int64) bool {\n\treturn time >= v.StartSeconds && time < v.EndSeconds\n}", "func (r *SubscriptionReconciler) checkStatusActive(subscription *eventingv1alpha1.Subscription) (statusChanged, retry bool, err error) {\n\tif subscription.Status.EmsSubscriptionStatus.SubscriptionStatus == string(types.SubscriptionStatusActive) {\n\t\tif len(subscription.Status.FailedActivation) > 0 {\n\t\t\tsubscription.Status.FailedActivation = \"\"\n\t\t\treturn true, false, nil\n\t\t}\n\t\treturn false, false, nil\n\t}\n\tt1 := time.Now()\n\tif len(subscription.Status.FailedActivation) == 0 {\n\t\t// it's the first time\n\t\tsubscription.Status.FailedActivation = t1.Format(time.RFC3339)\n\t\treturn true, true, nil\n\t}\n\t// check the timeout\n\tif t0, er := time.Parse(time.RFC3339, subscription.Status.FailedActivation); er != nil {\n\t\terr = er\n\t} else if t1.Sub(t0) > timeoutRetryActiveEmsStatus {\n\t\terr = fmt.Errorf(\"timeout waiting for the subscription to be active: %v\", subscription.Name)\n\t} else {\n\t\tretry = true\n\t}\n\treturn false, retry, err\n}", "func (gt *myGoTickle) Active() bool {\n\treturn gt.avtive\n}", "func waitForTableToBeActive(tableName string, client *dynamodb.DynamoDB, maxRetries int, sleepBetweenRetries time.Duration, terragruntOptions *options.TerragruntOptions) error {\n\treturn waitForTableToBeActiveWithRandomSleep(tableName, client, maxRetries, sleepBetweenRetries, sleepBetweenRetries, terragruntOptions)\n}", "func initGetStatus(dev *globals.BleDev) {\n\tgetStatus := GetStatus{Client: dev.BleClient,\n\t\ttopic: fmt.Sprintf(mappercommon.TopicStateUpdate, dev.Instance.ID)}\n\ttimer := mappercommon.Timer{Function: getStatus.Run, Duration: 1 * time.Second, Times: 0}\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\ttimer.Start()\n\t}()\n}", "func (pas *PodAutoscalerStatus) InactiveFor(now time.Time) time.Duration {\n\treturn pas.inStatusFor(corev1.ConditionFalse, now)\n}", "func printStakeStatus(bigStatus *big.Int, started *big.Int) {\n\t//0-not Staked, 1=Staked, 2=LockedForWithdraw 3= OnDispute\n\tstatus := bigStatus.Uint64()\n\tstakeTime := time.Unix(started.Int64(), 0)\n\tswitch status {\n\tcase 0:\n\t\tfmt.Printf(\"Not currently staked\\n\")\n\tcase 1:\n\t\tfmt.Printf(\"Staked in good standing since %s\\n\", stakeTime.UTC())\n\tcase 2:\n\t\tstartedRound := started.Int64()\n\t\tstartedRound = ((startedRound + 86399) / 86400) * 86400\n\t\ttarget := time.Unix(startedRound, 0)\n\t\ttimePassed := time.Now().Sub(target)\n\t\tdelta := timePassed - (time.Hour * 24 * 7)\n\t\tif delta > 0 {\n\t\t\tfmt.Printf(\"Stake has been eligbile to withdraw for %s\\n\", delta)\n\t\t} else {\n\t\t\tfmt.Printf(\"Stake will be eligible to withdraw in %s\\n\", -delta)\n\t\t}\n\tcase 3:\n\t\tfmt.Printf(\"Stake is currently under dispute\")\n\t}\n}", "func (px *Paxos) Status(seq int) (Fate, interface{}) {\n\t// Your code here.\n\tif seq < px.Min() {\n\t\treturn Forgotten, nil\n\t}\n\n\tnode, ok := px.prepareStatus.Find(seq)\n\tif ok && node.State.Done {\n\t\treturn Decided, node.State.VA\n\t}\n\treturn Pending, nil\n}", "func StatefulsetStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {\n\n\t//Record start timestamp\n\tChaosStartTimeStamp := time.Now().Unix()\n\tisFailed := false\n\n\terr = retry.\n\t\tTimes(uint(experimentsDetails.ChaosDuration / experimentsDetails.Delay)).\n\t\tWait(time.Duration(experimentsDetails.Delay) * time.Second).\n\t\tTry(func(attempt uint) error {\n\t\t\tfor _, app := range appsUnderTest {\n\t\t\t\tstatefulset, err := appsv1StatefulsetClient.Get(app.AppName, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.Errorf(\"Unable to find the statefulset with name %v, err: %v\", app.AppName, err)\n\t\t\t\t}\n\t\t\t\tlog.Infof(\"Statefulset's Ready Replica Count is: %v\", statefulset.Status.ReadyReplicas)\n\t\t\t\tif int(statefulset.Status.ReadyReplicas) != experimentsDetails.Replicas {\n\t\t\t\t\tisFailed = true\n\t\t\t\t\treturn errors.Errorf(\"Application is not scaled yet, err: %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tisFailed = false\n\t\t\treturn nil\n\t\t})\n\n\tif isFailed {\n\t\terr = AutoscalerRecoveryInStatefulset(experimentsDetails, clients, appsUnderTest)\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"Unable to perform autoscaling, err: %v\", err)\n\t\t}\n\t\treturn errors.Errorf(\"Failed to scale the application\")\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// run the probes during chaos\n\tif len(resultDetails.ProbeDetails) != 0 {\n\t\tif err = probe.RunProbes(chaosDetails, clients, resultDetails, \"DuringChaos\", eventsDetails); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t//ChaosCurrentTimeStamp contains the current timestamp\n\tChaosCurrentTimeStamp := time.Now().Unix()\n\tif int(ChaosCurrentTimeStamp-ChaosStartTimeStamp) <= experimentsDetails.ChaosDuration {\n\t\tlog.Info(\"[Wait]: Waiting for completion of chaos duration\")\n\t\ttime.Sleep(time.Duration(experimentsDetails.ChaosDuration-int(ChaosCurrentTimeStamp-ChaosStartTimeStamp)) * time.Second)\n\t}\n\n\treturn nil\n}", "func (timeout *Timeout) IsActive() bool {\n\treturn timeout.state == Active\n}", "func (c *Client) BecomeActive() {\n\tc.addProvisional()\n\tfor !c.killed() {\n\t\tc.addSelf()\n\t\tstartTime := time.Now()\n\t\ttimeout := time.Second * 2\n\t\tfor time.Since(startTime) < timeout {\n\t\t\tvar tempActive bool\n\t\t\tc.mu.Lock()\n\t\t\t// this is set when client receives configuration update\n\t\t\ttempActive = c.active\n\t\t\tc.mu.Unlock()\n\n\t\t\tif tempActive {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttime.Sleep(50 * time.Millisecond)\n\t\t}\n\t}\n}", "func TestAssessRunStatusUpdateResult(t *testing.T) {\n\tf := newFixture(t)\n\tdefer f.Close()\n\tc, _, _ := f.newController(noResyncPeriodFunc)\n\trun := &v1alpha1.AnalysisRun{\n\t\tSpec: v1alpha1.AnalysisRunSpec{\n\t\t\tMetrics: []v1alpha1.Metric{\n\t\t\t\t{\n\t\t\t\t\tName: \"sleep-infinity\",\n\t\t\t\t\tProvider: v1alpha1.MetricProvider{\n\t\t\t\t\t\tJob: &v1alpha1.JobMetric{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"fail-after-30\",\n\t\t\t\t\tProvider: v1alpha1.MetricProvider{\n\t\t\t\t\t\tJob: &v1alpha1.JobMetric{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tStatus: v1alpha1.AnalysisRunStatus{\n\t\t\tPhase: v1alpha1.AnalysisPhaseRunning,\n\t\t\tMetricResults: []v1alpha1.MetricResult{\n\t\t\t\t{\n\t\t\t\t\tName: \"sleep-infinity\",\n\t\t\t\t\tPhase: v1alpha1.AnalysisPhaseRunning,\n\t\t\t\t\tMeasurements: []v1alpha1.Measurement{{\n\t\t\t\t\t\tPhase: v1alpha1.AnalysisPhaseRunning,\n\t\t\t\t\t\tStartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))),\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"fail-after-30\",\n\t\t\t\t\tCount: 1,\n\t\t\t\t\tFailed: 1,\n\t\t\t\t\tPhase: v1alpha1.AnalysisPhaseRunning, // This should flip to Failed\n\t\t\t\t\tMeasurements: []v1alpha1.Measurement{{\n\t\t\t\t\t\tPhase: v1alpha1.AnalysisPhaseFailed,\n\t\t\t\t\t\tStartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))),\n\t\t\t\t\t\tFinishedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))),\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tstatus, message := c.assessRunStatus(run, run.Spec.Metrics, map[string]bool{})\n\tassert.Equal(t, v1alpha1.AnalysisPhaseRunning, status)\n\tassert.Equal(t, \"\", message)\n\tassert.Equal(t, v1alpha1.AnalysisPhaseFailed, run.Status.MetricResults[1].Phase)\n}", "func waitForScanStatus(t *testing.T, f *framework.Framework, namespace, name string, targetStaus complianceoperatorv1alpha1.ComplianceScanStatusPhase) error {\n\texampleComplianceScan := &complianceoperatorv1alpha1.ComplianceScan{}\n\tvar lastErr error\n\t// retry and ignore errors until timeout\n\ttimeouterr := wait.Poll(retryInterval, timeout, func() (bool, error) {\n\t\tlastErr = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, exampleComplianceScan)\n\t\tif lastErr != nil {\n\t\t\tif apierrors.IsNotFound(lastErr) {\n\t\t\t\tt.Logf(\"Waiting for availability of %s compliancescan\\n\", name)\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\tt.Logf(\"Retrying. Got error: %v\\n\", lastErr)\n\t\t\treturn false, nil\n\t\t}\n\n\t\tif exampleComplianceScan.Status.Phase == targetStaus {\n\t\t\treturn true, nil\n\t\t}\n\t\tt.Logf(\"Waiting for run of %s compliancescan (%s)\\n\", name, exampleComplianceScan.Status.Phase)\n\t\treturn false, nil\n\t})\n\t// Error in function call\n\tif lastErr != nil {\n\t\treturn lastErr\n\t}\n\t// Timeout\n\tif timeouterr != nil {\n\t\treturn timeouterr\n\t}\n\tt.Logf(\"ComplianceScan ready (%s)\\n\", exampleComplianceScan.Status.Phase)\n\treturn nil\n}", "func (t *Track) StepActive(step uint16) bool {\n\treturn getBit(step, t.measure)\n}", "func (px *Paxos) Status(seq int) (Fate, interface{}) {\n\t// Your code here\n\n\t//log.Printf(\"judge status of %d\\n\", seq)\n\tif seq < px.Min() {\n\t\t//\tlog.Printf(\"forgotten\\n\")\n\t\treturn Forgotten, nil\n\t}\n\t//log.Printf(\"no forgotten\\n\")\n\tpx.mu.Lock()\n\tdefer px.mu.Unlock()\n\n\t_, exist := px.acceptor[seq]\n\n\tif exist {\n\t\treturn px.acceptor[seq].state, px.acceptor[seq].decided.Value\n\t}\n\treturn Pending, nil\n}", "func (ut *utilizationTracker) CheckStarted(longRunning bool) {\n\tut.Lock()\n\n\tut.isRunningLongCheck = longRunning\n\tut.checkStart = ut.clock.Now()\n\n\tut.Unlock()\n}", "func (control *Control) Status(waived string) (status string) {\n\tstatus = ResultStatusPassed\n\tfor _, result := range control.Results {\n\t\tif result.Status == ResultStatusFailed {\n\t\t\tstatus = ResultStatusFailed\n\t\t\tbreak\n\t\t} else if result.Status == ResultStatusSkipped {\n\t\t\tstatus = ResultStatusSkipped\n\t\t}\n\t}\n\tif waived == ControlWaivedStrYesRun || waived == ControlWaivedStrYes {\n\t\tstatus = ResultStatusWaived\n\t}\n\treturn status\n}", "func (px *Paxos) Status(seq int) (Fate, interface{}) {\n \n if seq < px.Min() {\n return Forgotten, nil\n }\n\n instance := px.getInstance(seq)\n return instance.Fate, instance.Va\n}", "func (tc *MXController) updateStatusSingle(mxjob *mxv1.MXJob, rtype mxv1.MXReplicaType, replicas int, restart, schedulerCompleted bool) error {\n\tmxjobKey, err := KeyFunc(mxjob)\n\tif err != nil {\n\t\tutilruntime.HandleError(fmt.Errorf(\"couldn't get key for mxjob object %#v: %v\", mxjob, err))\n\t\treturn err\n\t}\n\n\t// Expect to have `replicas - succeeded` pods alive.\n\texpected := replicas - int(mxjob.Status.MXReplicaStatuses[rtype].Succeeded)\n\trunning := int(mxjob.Status.MXReplicaStatuses[rtype].Active)\n\tfailed := int(mxjob.Status.MXReplicaStatuses[rtype].Failed)\n\n\tmxlogger.LoggerForJob(mxjob).Infof(\"MXJob=%s, ReplicaType=%s expected=%d, running=%d, failed=%d\",\n\t\tmxjob.Name, rtype, expected, running, failed)\n\t// set StartTime.\n\tif mxjob.Status.StartTime == nil {\n\t\tnow := metav1.Now()\n\t\tmxjob.Status.StartTime = &now\n\t\t// enqueue a sync to check if job past ActiveDeadlineSeconds\n\t\tif mxjob.Spec.ActiveDeadlineSeconds != nil {\n\t\t\tmxlogger.LoggerForJob(mxjob).Infof(\"Job with ActiveDeadlineSeconds will sync after %d seconds\", *mxjob.Spec.ActiveDeadlineSeconds)\n\t\t\ttc.WorkQueue.AddAfter(mxjobKey, time.Duration(*mxjob.Spec.ActiveDeadlineSeconds)*time.Second)\n\t\t}\n\t}\n\n\tif ContainSchedulerSpec(mxjob) {\n\t\tif rtype == mxv1.MXReplicaTypeScheduler {\n\t\t\tif running > 0 {\n\t\t\t\tmsg := fmt.Sprintf(\"MXJob %s is running.\", mxjob.Name)\n\t\t\t\terr := updateMXJobConditions(mxjob, mxv1.MXJobRunning, mxJobRunningReason, msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tmxlogger.LoggerForJob(mxjob).Infof(\"Append mxjob condition error: %v\", err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif expected == 0 {\n\t\t\t\tmsg := fmt.Sprintf(\"MXJob %s is successfully completed.\", mxjob.Name)\n\t\t\t\tif mxjob.Status.CompletionTime == nil {\n\t\t\t\t\tnow := metav1.Now()\n\t\t\t\t\tmxjob.Status.CompletionTime = &now\n\t\t\t\t}\n\t\t\t\terr := updateMXJobConditions(mxjob, mxv1.MXJobSucceeded, mxJobSucceededReason, msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tmxlogger.LoggerForJob(mxjob).Infof(\"Append mxjob condition error: %v\", err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif rtype == mxv1.MXReplicaTypeWorker || rtype == mxv1.MXReplicaTypeTuner {\n\t\t\t// All workers are succeeded or scheduler completed, leave a succeeded condition.\n\t\t\tif expected == 0 || schedulerCompleted {\n\t\t\t\tmsg := fmt.Sprintf(\"MXJob %s is successfully completed.\", mxjob.Name)\n\t\t\t\tif mxjob.Status.CompletionTime == nil {\n\t\t\t\t\tnow := metav1.Now()\n\t\t\t\t\tmxjob.Status.CompletionTime = &now\n\t\t\t\t}\n\t\t\t\terr := updateMXJobConditions(mxjob, mxv1.MXJobSucceeded, mxJobSucceededReason, msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tmxlogger.LoggerForJob(mxjob).Infof(\"Append mxjob condition error: %v\", err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else if running > 0 {\n\t\t\t\t// Some workers are still running, leave a running condition.\n\t\t\t\tmsg := fmt.Sprintf(\"MXJob %s is running.\", mxjob.Name)\n\t\t\t\terr := updateMXJobConditions(mxjob, mxv1.MXJobRunning, mxJobRunningReason, msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tmxlogger.LoggerForJob(mxjob).Infof(\"Append mxjob condition error: %v\", err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif failed > 0 {\n\t\tif restart {\n\t\t\tmsg := fmt.Sprintf(\"MXJob %s is restarting.\", mxjob.Name)\n\t\t\terr := updateMXJobConditions(mxjob, mxv1.MXJobRestarting, mxJobRestartingReason, msg)\n\t\t\tif err != nil {\n\t\t\t\tmxlogger.LoggerForJob(mxjob).Infof(\"Append mxjob condition error: %v\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tmsg := fmt.Sprintf(\"MXJob %s is failed.\", mxjob.Name)\n\t\t\tif mxjob.Status.CompletionTime == nil {\n\t\t\t\tnow := metav1.Now()\n\t\t\t\tmxjob.Status.CompletionTime = &now\n\t\t\t}\n\t\t\terr := updateMXJobConditions(mxjob, mxv1.MXJobFailed, mxJobFailedReason, msg)\n\t\t\tif err != nil {\n\t\t\t\tmxlogger.LoggerForJob(mxjob).Infof(\"Append mxjob condition error: %v\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func TestSucceedsAfterTimeout(t *testing.T){\n breaker := NewBreaker(2 * time.Second, 2, 2)\n\n breaker.halfOpen()\n\n _, err := breaker.Run(alwaysSucceedsFunc)\n\n evaluateCondition(t, err == nil, \"TestSucceedsAfterTimeout\")\n}", "func (expStatus *ExperimentStatus) InitialExperimentStatus(experimentDetails *ExperimentDetails) {\n\texpStatus.Name = experimentDetails.JobName\n\texpStatus.Status = \"Waiting for Job Creation\"\n\texpStatus.Verdict = \"Waiting\"\n\texpStatus.LastUpdateTime = metav1.Now()\n}", "func (official CompetitionOfficial) Active() bool {\n\treturn time.Now().Before(official.EffectiveUntil) && time.Now().After(official.EffectiveFrom)\n}", "func WaitUntilInState(client SkytapClient, desiredStates []string, r RunstateAwareResource, requireStateChange bool) (RunstateAwareResource, error) {\n\tlog.WithFields(log.Fields{\"desiredStates\": desiredStates, \"resource\": r}).Info(\"Waiting until resource is in desired state\")\n\tstart := time.Now()\n\n\tcurrent, err := r.Refresh(client)\n\tif err != nil {\n\t\treturn current, err\n\t}\n\n\thasChanged := !requireStateChange || current.RunstateStr() != r.RunstateStr()\n\n\tmaxBusyWaitPeriods := 20\n\twaitPeriod := 10 * time.Second\n\tfor i := 0; i < maxBusyWaitPeriods && !(hasChanged && stringInSlice(current.RunstateStr(), desiredStates)); i++ {\n\t\ttime.Sleep(waitPeriod)\n\t\tcurrent, err = r.Refresh(client)\n\t\tif err != nil {\n\t\t\treturn current, err\n\t\t}\n\t\thasChanged = hasChanged || current.RunstateStr() != r.RunstateStr()\n\t}\n\tif !stringInSlice(current.RunstateStr(), desiredStates) {\n\t\treturn current, errors.New(fmt.Sprintf(\"Didn't achieve any desired runstate in %s after %d seconds, resource is in runstate %s\", desiredStates, time.Now().Unix()-start.Unix(), current.RunstateStr()))\n\t}\n\treturn current, err\n}", "func (m *Member) IsActive() bool { return m.State == \"active\" }", "func (m *ScheduleItem) SetStatus(value *FreeBusyStatus)() {\n err := m.GetBackingStore().Set(\"status\", value)\n if err != nil {\n panic(err)\n }\n}", "func statusChanged(status PingerTaskStatus) bool {\n\treturn status.Consecutive == 1 && status.LatestResult.Status != ping.StatusUnknown\n}", "func (px *Paxos) Status(seq int) (Fate, interface{}) {\n\n\tif seq < px.Min() {\n\t\treturn Forgotten, nil\n\t}\n\n\tpx.mu.Lock()\n\tdefer px.mu.Unlock()\n\n\tif val, ok := px.decidedVals[seq]; ok {\n\t\treturn Decided, val\n\t}\n\treturn Pending, nil\n}", "func toggleStatus(writer http.ResponseWriter, req *http.Request) {\n\tvars := mux.Vars(req)\n\tcreatedAt, err := time.Parse(time.RFC3339, vars[\"createdAt\"]) //converts requested time into correct format\n\tif err != nil {\n\t\tlog.Print(\"error:\", err)\n\t\thttp.Error(writer, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tdatabase, err := loadJsonFile()\n\tif err != nil {\n\t\tlog.Print(\"error:\", err)\n\t\thttp.Error(writer, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tfor i, _ := range database.Tasks {\n\t\tif database.Tasks[i].CreatedAt.Equal(createdAt) {\n\t\t\tdatabase.Tasks[i].Completed = !database.Tasks[i].Completed\n\t\t\treturnJson(database, writer)\n\t\t\treturn\n\t\t}\n\t}\n\t//this code runs only if no taks was found with the correct createdAt timestamp\n\thttp.Error(writer, err.Error(), http.StatusBadRequest)\n}", "func (s Status) Initial() bool {\n\treturn s == StatusStarting || s == StatusRetrying\n}", "func (s *TiFlashSpec) Status(ctx context.Context, timeout time.Duration, tlsCfg *tls.Config, pdList ...string) string {\n\tstoreAddr := utils.JoinHostPort(s.Host, s.FlashServicePort)\n\tstate := checkStoreStatus(ctx, storeAddr, tlsCfg, pdList...)\n\tif s.Offline && strings.ToLower(state) == \"offline\" {\n\t\tstate = \"Pending Offline\" // avoid misleading\n\t}\n\treturn state\n}", "func TestLobbyReadyStatus(t *testing.T) {\n\tlobbySvc := NewLobbyService()\n\tlobby := lobbySvc.CreateLobby()\n\n\tres, found := lobbySvc.IsReady(lobby.ID)\n\tassert.True(t, found)\n\tassert.False(t, res)\n\n\t_, err := lobbySvc.JoinLobby(pogo.LobbyJoinMsg{LobbyID: lobby.ID, PlayerID: \"Player1\"})\n\tassert.NoError(t, err)\n\n\tres, found = lobbySvc.IsReady(lobby.ID)\n\tassert.True(t, found)\n\tassert.False(t, res)\n\n\t_, err = lobbySvc.ReadyPlayer(lobby.ID, pogo.PlayerReadyMsg{PlayerName: \"Player1\", Ready: true})\n\tassert.NoError(t, err)\n\n\tres, found = lobbySvc.IsReady(lobby.ID)\n\tassert.True(t, found)\n\tassert.False(t, res)\n\n\t_, err = lobbySvc.JoinLobby(pogo.LobbyJoinMsg{LobbyID: lobby.ID, PlayerID: \"Player2\"})\n\tassert.NoError(t, err)\n\n\t_, err = lobbySvc.JoinLobby(pogo.LobbyJoinMsg{LobbyID: lobby.ID, PlayerID: \"Player3\"})\n\tassert.NoError(t, err)\n\n\tres, found = lobbySvc.IsReady(lobby.ID)\n\tassert.True(t, found)\n\tassert.False(t, res)\n\n\t_, err = lobbySvc.ReadyPlayer(lobby.ID, pogo.PlayerReadyMsg{PlayerName: \"Player2\", Ready: true})\n\tassert.NoError(t, err)\n\n\tres, found = lobbySvc.IsReady(lobby.ID)\n\tassert.True(t, found)\n\tassert.False(t, res)\n\n\t_, err = lobbySvc.ReadyPlayer(lobby.ID, pogo.PlayerReadyMsg{PlayerName: \"Player3\", Ready: true})\n\tassert.NoError(t, err)\n\n\tres, found = lobbySvc.IsReady(lobby.ID)\n\tassert.True(t, found)\n\tassert.True(t, res)\n}", "func (t *StatusTracker) UpdateStatus() {\n\tif t.failures < t.halfOpenThreshold {\n\t\tt.status = Closed\n\t} else if t.failures < t.openThreshold {\n\t\tt.status = HalfOpen\n\t} else {\n\t\tt.status = Open\n\t}\n}", "func (c *Client) doWaitForStatus(eniID string, checkNum, checkInterval int, finalStatus string) error {\n\tfor i := 0; i < checkNum; i++ {\n\t\ttime.Sleep(time.Second * time.Duration(checkInterval))\n\t\tenis, err := c.queryENI(eniID, \"\", \"\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, eni := range enis {\n\t\t\tif *eni.NetworkInterfaceId == eniID {\n\t\t\t\tswitch *eni.State {\n\t\t\t\tcase ENI_STATUS_AVAILABLE:\n\t\t\t\t\tswitch finalStatus {\n\t\t\t\t\tcase ENI_STATUS_ATTACHED:\n\t\t\t\t\t\tif eni.Attachment != nil && eni.Attachment.InstanceId != nil {\n\t\t\t\t\t\t\tblog.Infof(\"eni %s is attached\", eniID)\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\tblog.Infof(\"eni %s is not attached\", eniID)\n\t\t\t\t\tcase ENI_STATUS_DETACHED:\n\t\t\t\t\t\tif eni.Attachment == nil {\n\t\t\t\t\t\t\tblog.Infof(\"eni %s is detached\", eniID)\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\tblog.Infof(\"eni %s is not detached\", eniID)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tblog.Infof(\"eni %s is %s now\", eniID, *eni.State)\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\tcase ENI_STATUS_PENDING, ENI_STATUS_ATTACHING, ENI_STATUS_DETACHING, ENI_STATUS_DELETING:\n\t\t\t\t\tblog.Infof(\"eni %s is %s\", eniID, *eni.State)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tblog.Errorf(\"timeout when wait for eni %s\", eniID)\n\treturn fmt.Errorf(\"timeout when wait for eni %s\", eniID)\n}", "func assertSystemdActiveState(unitName string) error {\n\tfetchSystemdActiveState := func() error {\n\t\tus, err := cAPI.UnitState(unitName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error getting unit state of %s: %v\", unitName, err)\n\t\t}\n\n\t\t// Get systemd state and check the state is active & loaded.\n\t\tif us.SystemdActiveState != \"active\" || us.SystemdLoadState != \"loaded\" {\n\t\t\treturn fmt.Errorf(\"Failed to find an active unit %s\", unitName)\n\t\t}\n\t\treturn nil\n\t}\n\n\ttimeout, err := waitForState(fetchSystemdActiveState)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to find an active unit %s within %v, err: %v\",\n\t\t\tunitName, timeout, err)\n\t}\n\n\treturn nil\n}", "func TestUpDownStartStop(t *testing.T) {\n\tvar sl SkipLog\n\tlog.SetOutput(sl)\n\n\tseq := make(map[string][]bool)\n\tseq[\"h1\"] = []bool{true, false, false, false, true, true, false}\n\tseq[\"h2\"] = []bool{false, false, true, true, false, true, true}\n\tseq[\"h3\"] = []bool{true, true, true, false, true, true, false}\n\tseq[\"h4\"] = []bool{false, false, true, true, false, false, false}\n\tload := []string{\"h1\", \"h2\"}\n\n\tresultSeq := []HostStatus{\n\t\tHostStatus{\"h2\", true}, // h2 goes down first\n\t\tHostStatus{\"h1\", true}, // h1 follows\n\t\tHostStatus{\"h2\", false}, // h2 goes up\n\t\tHostStatus{\"h1\", false}, // h1 goes up\n\t\tHostStatus{\"h1\", true}, // h2 goes down\n\t\tHostStatus{\"h2\", true}, // h1 goes down\n\t}\n\n\tstartHostChFW, stopHostChFW, notifyChFW := createTestPool(seq, load)\n\n\t// Test expected events for h1 and h2\n\tfor _, expected := range resultSeq {\n\t\tevent := <-notifyChFW\n\t\tif event.Host != expected.Host || event.Down != expected.Down {\n\t\t\tt.Errorf(\"Got event: %s %t, expected: %s %t \\n\", event.Host, event.Down, expected.Host, expected.Down)\n\t\t}\n\t}\n\n\tstartHostChFW <- HostStatus{\"h3\", false} // start h3 as UP\n\tstartHostChFW <- HostStatus{\"h4\", true} // start h4 as DOWN\n\n\t// Expect h4 to come UP (down=false) first\n\tevent := <-notifyChFW\n\tif event.Host != \"h4\" || event.Down != false {\n\t\tt.Errorf(\"Got event: %s %t, expected: %s %t \\n\", event.Host, event.Down, \"h4\", false)\n\t}\n\n\t// Stop monitoring h4\n\tstopHostChFW <- event\n\t// Expect h3 will eventually go DOWN (down=true)\n\tevent = <-notifyChFW\n\tif event.Host != \"h3\" || event.Down != true {\n\t\tt.Errorf(\"Got event: %s %t, expected: %s %t \\n\", event.Host, event.Down, \"h3\", true)\n\t}\n\n\t// There should be no more events,\n\t// close channel and wait, which will\n\t// panic if more events come in\n\tclose(notifyChFW)\n\n\ttime.Sleep(time.Millisecond * 20)\n}", "func (me TQualificationTypeStatus) IsActive() bool { return me.String() == \"Active\" }", "func init_time() {\n\tSTARTEDON = time.Now()\n}", "func (px *Paxos) Status(seq int) (Fate, interface{}) {\n\t// Your code here.\n\t// check if seq number is less than min, return forgotten if it is\n\t// check map for struct for given seq, if agreed value is nil, return pending with agreed value\n\t// otherwise return decided and agreed value for seq\n\n\tmin := px.Min()\n\tpx.mu.Lock()\n\tdefer px.mu.Unlock()\n\tif seq < min {\n\t\treturn Forgotten, nil\n\t} else if px.instances[seq] != nil {\n\t\tif px.instances[seq].decided {\n\t\t\treturn Decided, px.instances[seq].va\n\t\t}\n\t}\n\treturn Pending, nil\n}", "func TestStatus() {\n\tfmt.Println(\"\")\n\tfmt.Println(\"=============================================================================================\")\n\tfmt.Printf(\"TOTAL TEST SUITES : %d\\n\", counters.suitesCount)\n\tfmt.Printf(\"TOTAL TEST CASES : %d\\n\", counters.testCaseCount)\n\tfmt.Printf(\"TOTAL TEST METHODS : %d\\n\", counters.methodsCount)\n\tfmt.Printf(\"TOTAL TEST METHODS PASS : %d\\n\", counters.methodsPassedCount)\n\tfmt.Printf(\"TOTAL TEST METHODS FAIL : %d\\n\", counters.methodsFailedCount)\n\tfmt.Println(\"\")\n\tif TestPassed {\n\t\tfmt.Println(\"TEST STATUS : PASS\")\n\t} else {\n\t\tfmt.Println(\"TEST STATUS : FAIL\")\n\t}\n\tfmt.Println(\"=============================================================================================\")\n\tfmt.Println(\"\")\n}", "func (a *ActivityMonitor) Active() bool {\n\treturn a.NumActiveRequests() > 0\n}", "func TestInitialDateTimeChecks3(t *testing.T) {\n\n\texpected := false\n\n\tcurrDate := time.Now().Format(\"2006-01-02\")\n\tbeforeRestaurantOpenCheck, _ := restaurantBooking.InitialDateTimeChecks(currDate, \"12:00\")\n\tafterRestaurantOpenCheck, _ := restaurantBooking.InitialDateTimeChecks(currDate, \"22:00\")\n\n\tif (beforeRestaurantOpenCheck && afterRestaurantOpenCheck) != expected {\n\t\tt.Fail()\n\t}\n}", "func (batch *eventBatch) isActive(targetTimestampMs *int64) bool {\n\t// new log event batch\n\tif batch.minTimestampMs == 0 || batch.maxTimestampMs == 0 {\n\t\treturn true\n\t}\n\tif *targetTimestampMs-batch.minTimestampMs > 24*3600*1e3 {\n\t\treturn false\n\t}\n\tif batch.maxTimestampMs-*targetTimestampMs > 24*3600*1e3 {\n\t\treturn false\n\t}\n\treturn true\n}", "func TestIncompleteBirdShowStatus(t *testing.T) {\n\tout := \"1011-Router ID is 192.168.1.9\\n\" +\n\t\t\" Current server time is 2018-12-27 12:15:01\\n\" +\n\t\t\" Last reboot on 2018-12-21 12:35:11\\n\"\n\tcompleted := containsActionCompletedCode([]byte(out))\n\n\tassert.False(\"'show status' successfully completed\", completed, t)\n}", "func (m *Master) checkComplete(taskType string, taskNum int) {\n\n\ttime.Sleep(10 * time.Second)\n\tm.mu.Lock()\n\tif taskType == \"map\" {\n\t\tif m.mapState[taskNum].Value == \"in-progress\" {\n\t\t\tm.mapState[taskNum].Value = \"idle\"\n\t\t}\n\t} else {\n\t\tif m.reduceState[taskNum] == \"in-progress\" {\n\t\t\tm.reduceState[taskNum] = \"idle\"\n\t\t}\n\t}\n\tm.mu.Unlock()\n}", "func (v *ObservabilityVerifier) ExpectCompletingStatus(g Gomega) {}", "func MarkActive(r *v1alpha1.Revision) {\n\tr.Status.MarkActive()\n}", "func (px *Paxos) Status(seq int) (bool, interface{}) {\n\t// Your code here.\n\treturn px.Lslots[seq].Decided, px.Lslots[seq].V\n}", "func TestFailWithOpenState(t *testing.T){\n breaker := NewBreaker(5 * time.Second, 2, 2)\n\n breaker.open()\n\n _, err := breaker.Run(alwaysSucceedsFunc)\n\n evaluateCondition(t, err != nil && err.Error() == OpenStateError, \"TestFailWithOpenState\")\n}", "func (px *Paxos) Status(seq int) (bool, interface{}) {\n\t//DPrintf(\"Status(%d)\\n\", seq)\n\n\tif pi, ok := px.instances[seq]; ok {\n\t\treturn pi.Decided, pi.V_a\n\t}\n\n\t// instace for the seq no does not exist yet\n\tif seq > px.nseq {\n\t\tpx.nseq = seq\n\t}\n\n\treturn false, -1\n}", "func (s *TrackerSuite) TestStartNotStopped() {\n\n\tevent := s.service.StartNew()\n\tassert.Equal(s.T(), ErrorStart, s.service.Start(event))\n}", "func TestOpensAfterHalfOpenFailure(t *testing.T){\n breaker := NewBreaker(2 * time.Second, 2, 2)\n\n breaker.halfOpen()\n breaker.Run(alwaysFailsFunc)\n\n evaluateCondition(t, breaker.State == OpenState, \"TestOpensAfterHalfOpenFailure\")\n}", "func (px *Paxos) Status(seq int) (Fate, interface{}) {\n\t// Your code here.\n\tif seq < px.min {\n\t\treturn Forgotten, nil\n\t}\n\n\tinstance, ok := px.getInstance(seq)\n\tif !ok {\n\t\treturn Unknown, nil\n\t}\n\treturn instance.status, instance.aValue\n}", "func DUTActive(ctx context.Context, servoInst *servo.Servo) (bool, error) {\n\tstate, err := servoInst.GetECSystemPowerState(ctx)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"failed to get ec_system_power_state\")\n\t}\n\ttesting.ContextLog(ctx, \"state: \", state)\n\tif state == \"S0\" {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}", "func (TransferStatus) NotStarted() TransferStatus { return TransferStatus(0) }", "func TestCurrentTraffic(t *testing.T) {\n\tvar (\n\t\tbackoff = time.Hour * 5\n\t\twithinBackoff = testTime.Add(time.Hour * -1)\n\t\toutsideBackoff = testTime.Add(backoff * -2)\n\n\t\tsuccess = []*loopdb.LoopEvent{\n\t\t\t{\n\t\t\t\tSwapStateData: loopdb.SwapStateData{\n\t\t\t\t\tState: loopdb.StateSuccess,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tfailedInBackoff = []*loopdb.LoopEvent{\n\t\t\t{\n\t\t\t\tSwapStateData: loopdb.SwapStateData{\n\t\t\t\t\tState: loopdb.StateFailOffchainPayments,\n\t\t\t\t},\n\t\t\t\tTime: withinBackoff,\n\t\t\t},\n\t\t}\n\n\t\tfailedOutsideBackoff = []*loopdb.LoopEvent{\n\t\t\t{\n\t\t\t\tSwapStateData: loopdb.SwapStateData{\n\t\t\t\t\tState: loopdb.StateFailOffchainPayments,\n\t\t\t\t},\n\t\t\t\tTime: outsideBackoff,\n\t\t\t},\n\t\t}\n\n\t\tfailedTimeoutInBackoff = []*loopdb.LoopEvent{\n\t\t\t{\n\t\t\t\tSwapStateData: loopdb.SwapStateData{\n\t\t\t\t\tState: loopdb.StateFailTimeout,\n\t\t\t\t},\n\t\t\t\tTime: withinBackoff,\n\t\t\t},\n\t\t}\n\n\t\tfailedTimeoutOutsideBackoff = []*loopdb.LoopEvent{\n\t\t\t{\n\t\t\t\tSwapStateData: loopdb.SwapStateData{\n\t\t\t\t\tState: loopdb.StateFailTimeout,\n\t\t\t\t},\n\t\t\t\tTime: outsideBackoff,\n\t\t\t},\n\t\t}\n\t)\n\n\ttests := []struct {\n\t\tname string\n\t\tloopOut []*loopdb.LoopOut\n\t\tloopIn []*loopdb.LoopIn\n\t\texpected *swapTraffic\n\t}{\n\t\t{\n\t\t\tname: \"completed swaps ignored\",\n\t\t\tloopOut: []*loopdb.LoopOut{\n\t\t\t\t{\n\t\t\t\t\tLoop: loopdb.Loop{\n\t\t\t\t\t\tEvents: success,\n\t\t\t\t\t},\n\t\t\t\t\tContract: &loopdb.LoopOutContract{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tloopIn: []*loopdb.LoopIn{\n\t\t\t\t{\n\t\t\t\t\tLoop: loopdb.Loop{\n\t\t\t\t\t\tEvents: success,\n\t\t\t\t\t},\n\t\t\t\t\tContract: &loopdb.LoopInContract{},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: newSwapTraffic(),\n\t\t},\n\t\t{\n\t\t\t// No events indicates that the swap is still pending.\n\t\t\tname: \"pending swaps included\",\n\t\t\tloopOut: []*loopdb.LoopOut{\n\t\t\t\t{\n\t\t\t\t\tContract: &loopdb.LoopOutContract{\n\t\t\t\t\t\tOutgoingChanSet: []uint64{\n\t\t\t\t\t\t\tchanID1.ToUint64(),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tloopIn: []*loopdb.LoopIn{\n\t\t\t\t{\n\t\t\t\t\tContract: &loopdb.LoopInContract{\n\t\t\t\t\t\tLastHop: &peer2,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: &swapTraffic{\n\t\t\t\tongoingLoopOut: map[lnwire.ShortChannelID]bool{\n\t\t\t\t\tchanID1: true,\n\t\t\t\t},\n\t\t\t\tongoingLoopIn: map[route.Vertex]bool{\n\t\t\t\t\tpeer2: true,\n\t\t\t\t},\n\t\t\t\t// Make empty maps so that we can assert equal.\n\t\t\t\tfailedLoopOut: make(\n\t\t\t\t\tmap[lnwire.ShortChannelID]time.Time,\n\t\t\t\t),\n\t\t\t\tfailedLoopIn: make(map[route.Vertex]time.Time),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"failure backoff included\",\n\t\t\tloopOut: []*loopdb.LoopOut{\n\t\t\t\t{\n\t\t\t\t\tContract: &loopdb.LoopOutContract{\n\t\t\t\t\t\tOutgoingChanSet: []uint64{\n\t\t\t\t\t\t\tchanID1.ToUint64(),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tLoop: loopdb.Loop{\n\t\t\t\t\t\tEvents: failedInBackoff,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tContract: &loopdb.LoopOutContract{\n\t\t\t\t\t\tOutgoingChanSet: []uint64{\n\t\t\t\t\t\t\tchanID2.ToUint64(),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tLoop: loopdb.Loop{\n\t\t\t\t\t\tEvents: failedOutsideBackoff,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tloopIn: []*loopdb.LoopIn{\n\t\t\t\t{\n\t\t\t\t\tContract: &loopdb.LoopInContract{\n\t\t\t\t\t\tLastHop: &peer1,\n\t\t\t\t\t},\n\t\t\t\t\tLoop: loopdb.Loop{\n\t\t\t\t\t\tEvents: failedTimeoutInBackoff,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tContract: &loopdb.LoopInContract{\n\t\t\t\t\t\tLastHop: &peer2,\n\t\t\t\t\t},\n\t\t\t\t\tLoop: loopdb.Loop{\n\t\t\t\t\t\tEvents: failedTimeoutOutsideBackoff,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: &swapTraffic{\n\t\t\t\tongoingLoopOut: make(\n\t\t\t\t\tmap[lnwire.ShortChannelID]bool,\n\t\t\t\t),\n\t\t\t\tongoingLoopIn: make(map[route.Vertex]bool),\n\t\t\t\tfailedLoopOut: map[lnwire.ShortChannelID]time.Time{\n\t\t\t\t\tchanID1: withinBackoff,\n\t\t\t\t},\n\t\t\t\tfailedLoopIn: map[route.Vertex]time.Time{\n\t\t\t\t\tpeer1: withinBackoff,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, testCase := range tests {\n\t\tcfg, _ := newTestConfig()\n\t\tm := NewManager(cfg)\n\n\t\tparams := m.GetParameters()\n\t\tparams.FailureBackOff = backoff\n\t\trequire.NoError(t, m.setParameters(context.Background(), params))\n\n\t\tactual := m.currentSwapTraffic(testCase.loopOut, testCase.loopIn)\n\t\trequire.Equal(t, testCase.expected, actual)\n\t}\n}", "func (runner *McRunner) updateStatus() {\n\trunner.WaitGroup.Add(1)\n\tdefer runner.WaitGroup.Done()\n\tfor {\n\t\tselect {\n\t\tcase <-runner.StatusRequestChannel:\n\t\t\tif runner.State != Running {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tstatus := new(Status)\n\t\t\tstatus.Name = runner.Settings.Name\n\t\t\tstatus.PlayerMax = runner.Settings.MaxPlayers\n\t\t\tswitch runner.State {\n\t\t\tcase NotRunning:\n\t\t\t\tstatus.Status = \"Not Running\"\n\t\t\tcase Starting:\n\t\t\t\tstatus.Status = \"Starting\"\n\t\t\tcase Running:\n\t\t\t\tstatus.Status = \"Running\"\n\t\t\t}\n\t\t\tstatus.ActiveTime = int(time.Since(runner.startTime).Seconds())\n\n\t\t\tproc, _ := process.NewProcess(int32(runner.cmd.Process.Pid))\n\t\t\tmemInfo, _ := proc.MemoryInfo()\n\t\t\tstatus.MemoryMax = runner.Settings.MaxRAM\n\t\t\tstatus.Memory = int(memInfo.RSS / (1024 * 1024))\n\n\t\t\tworldPath := filepath.Join(McServerPath(), \"world\")\n\t\t\tusage, _ := disk.Usage(worldPath)\n\t\t\tstatus.Storage = usage.Used / (1024 * 1024)\n\t\t\tstatus.StorageMax = usage.Total / (1024 * 1024)\n\n\t\t\trunner.executeCommand(\"list\")\n\t\t\tstatus.PlayerCount = <-runner.playerChannel\n\n\t\t\ttpsMap := make(map[int]float32)\n\t\t\trunner.executeCommand(\"forge tps\")\n\t\tloop:\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase m := <-runner.tpsChannel:\n\t\t\t\t\tfor k, v := range m {\n\t\t\t\t\t\ttpsMap[k] = v\n\t\t\t\t\t}\n\t\t\t\tcase <-time.After(1 * time.Second):\n\t\t\t\t\tbreak loop\n\t\t\t\t}\n\t\t\t}\n\t\t\tvar tpsStrBuilder strings.Builder\n\t\t\ttpsStrBuilder.WriteString(\"{ \")\n\t\t\tfor k, v := range tpsMap {\n\t\t\t\ttpsStrBuilder.WriteString(fmt.Sprintf(\"\\\"%d\\\": %f, \", k, v))\n\t\t\t}\n\t\t\ttpsStr := tpsStrBuilder.String()[:tpsStrBuilder.Len()-3]\n\t\t\ttpsStrBuilder.Reset()\n\t\t\ttpsStrBuilder.WriteString(tpsStr)\n\t\t\ttpsStrBuilder.WriteString(\"}\")\n\t\t\ttpsStr = tpsStrBuilder.String()\n\t\t\tstatus.TPS = []byte(tpsStr)\n\n\t\t\trunner.StatusChannel <- status\n\t\tcase <-runner.killChannel:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (sv *Unit) Active() unit.Activation {\n\tlog.WithField(\"sv\", sv).Debugf(\"sv.Active\")\n\n\t// based of Systemd transtition table found in https://goo.gl/oEjikJ\n\tswitch sv.Sub() {\n\tcase dead:\n\t\treturn unit.Inactive\n\tcase failed:\n\t\treturn unit.Failed\n\tcase reload:\n\t\treturn unit.Reloading\n\tcase running, exited:\n\t\treturn unit.Active\n\tcase start, startPre, startPost, autoRestart:\n\t\treturn unit.Activating\n\tcase stop, stopSigabrt, stopPost, stopSigkill, stopSigterm, finalSigkill, finalSigterm:\n\t\treturn unit.Deactivating\n\tdefault:\n\t\tpanic(\"Unknown service sub state\")\n\t}\n}", "func TestResume(t *testing.T) {\n\tdefer test.Guard(t)()\n\n\tt.Run(\"not expired\", func(t *testing.T) {\n\t\ttestResume(t, false, false, true)\n\t})\n\tt.Run(\"expired not revealed\", func(t *testing.T) {\n\t\ttestResume(t, true, false, false)\n\t})\n\tt.Run(\"expired revealed\", func(t *testing.T) {\n\t\ttestResume(t, true, true, true)\n\t})\n}", "func predateLastSeenActiveAt(t testing.TB, db testdb.DB, shardName, nodeName string, amount time.Duration) {\n\tt.Helper()\n\n\t_, err := db.Exec(`\nUPDATE node_status SET last_seen_active_at = last_seen_active_at - INTERVAL '1 MICROSECOND' * $1\nWHERE shard_name = $2 AND node_name = $3`, amount.Microseconds(), shardName, nodeName,\n\t)\n\n\trequire.NoError(t, err)\n}", "func (px *Paxos) Status(seq int) (Fate, interface{}) {\n\tfate := Pending\n\tvar retValue interface{}\n\tpx.mu.Lock()\n\tif seq < px.minSeq {\n\t\tfate = Forgotten\n\t} else if px.isDecided(seq) {\n\t\tfate = Decided\n\t\tretValue = px.getDecidedValue(seq)\n\t}\n\tpx.mu.Unlock()\n\treturn fate, retValue\n}", "func (s *taskService) checkBasicStatus(c context.Context, date time.Time) (ok bool, err error) {\n\treturn s.dp.SendBasicDataRequest(c, fmt.Sprintf(\"{\\\"select\\\": [], \\\"where\\\":{\\\"job_name\\\":{\\\"in\\\":[\\\"ucs_%s\\\"]}}}\", date.Format(\"20060102\")))\n}", "func TestProgressResumeByHeartbeatResp(t *testing.T) {\n\tr := newTestRaft(1, []uint64{1, 2}, 5, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tr.prs[2].Paused = true\n\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\tif !r.prs[2].Paused {\n\t\tt.Errorf(\"paused = %v, want true\", r.prs[2].Paused)\n\t}\n\n\tr.prs[2].becomeReplicate()\n\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeatResp})\n\tif r.prs[2].Paused {\n\t\tt.Errorf(\"paused = %v, want false\", r.prs[2].Paused)\n\t}\n}", "func IsActive(n OsqueryNode, inactive int64) bool {\n\tnow := time.Now()\n\t// Check status if not empty/zero\n\tif !n.LastStatus.IsZero() {\n\t\tif n.LastStatus.Sub(now).Hours() < math.Abs(float64(inactive)) {\n\t\t\treturn true\n\t\t}\n\t}\n\t// Check result if not empty/zero\n\tif !n.LastResult.IsZero() {\n\t\tif n.LastResult.Sub(now).Hours() < math.Abs(float64(inactive)) {\n\t\t\treturn true\n\t\t}\n\t}\n\t// Check config if not empty/zero\n\tif !n.LastConfig.IsZero() {\n\t\tif n.LastConfig.Sub(now).Hours() < math.Abs(float64(inactive)) {\n\t\t\treturn true\n\t\t}\n\t}\n\t// Check query read if not empty/zero\n\tif !n.LastQueryRead.IsZero() {\n\t\tif n.LastQueryRead.Sub(now).Hours() < math.Abs(float64(inactive)) {\n\t\t\treturn true\n\t\t}\n\t}\n\t// Check query write if not empty/zero\n\tif !n.LastQueryWrite.IsZero() {\n\t\tif n.LastQueryWrite.Sub(now).Hours() < math.Abs(float64(inactive)) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func ChangeStatus(ID string) {\n\tvalue, _ := table.Load(ID)\n\tnewvalue := value.(membership)\n\ttable.Store(ID, membership{\n\t\tCounter: newvalue.Counter,\n\t\tTime: newvalue.Time,\n\t\tStatus: false,\n\t})\n}", "func (timeout *Timeout) Start() {\n\ttimeout.state = Active\n\ttimeout.start = time.Now()\n}", "func (m *MockPostForkBlock) setStatus(arg0 choices.Status) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"setStatus\", arg0)\n}", "func TestIdleInconsistency(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"short testing requested\")\n\t}\n\n\tr := new(MeterRegistry)\n\tm1 := r.Get(\"first\")\n\tm2 := r.Get(\"second\")\n\tm3 := r.Get(\"third\")\n\n\tm1.Mark(10)\n\tm2.Mark(20)\n\tm3.Mark(30)\n\n\t// make m1 and m3 go idle\n\tfor i := 0; i < 30; i++ {\n\t\ttime.Sleep(time.Second)\n\t\tm2.Mark(1)\n\t}\n\n\ttime.Sleep(time.Second)\n\n\t// re-activate m3\n\tm3.Mark(20)\n\ttime.Sleep(time.Second + time.Millisecond)\n\n\t// check the totals\n\tif total := r.Get(\"first\").Snapshot().Total; total != 10 {\n\t\tt.Errorf(\"expected first total to be 10, got %d\", total)\n\t}\n\n\tif total := r.Get(\"second\").Snapshot().Total; total != 50 {\n\t\tt.Errorf(\"expected second total to be 50, got %d\", total)\n\t}\n\n\tif total := r.Get(\"third\").Snapshot().Total; total != 50 {\n\t\tt.Errorf(\"expected third total to be 50, got %d\", total)\n\t}\n}", "func Active() bool {\n\treturn isActive\n}", "func doInactivateHalt(ctx *zedmanagerContext, uuidStr string,\n\tconfig types.AppInstanceConfig, status *types.AppInstanceStatus) bool {\n\n\tlog.Infof(\"doInactivateHalt for %s\\n\", uuidStr)\n\tchanged := false\n\n\t// Check AppNetworkStatus\n\tns := lookupAppNetworkStatus(ctx, uuidStr)\n\tif ns == nil {\n\t\tlog.Infof(\"Waiting for AppNetworkStatus for %s\\n\", uuidStr)\n\t\treturn changed\n\t}\n\tupdateAppNetworkStatus(status, ns)\n\tif ns.Pending() {\n\t\tlog.Infof(\"Waiting for AppNetworkStatus !Pending for %s\\n\", uuidStr)\n\t\treturn changed\n\t}\n\t// XXX should we make it not Activated?\n\tif ns.Error != \"\" {\n\t\tlog.Errorf(\"Received error from zedrouter for %s: %s\\n\",\n\t\t\tuuidStr, ns.Error)\n\t\tstatus.Error = ns.Error\n\t\tstatus.ErrorSource = pubsub.TypeToName(types.AppNetworkStatus{})\n\t\tstatus.ErrorTime = ns.ErrorTime\n\t\tchanged = true\n\t\treturn changed\n\t} else if status.ErrorSource == pubsub.TypeToName(types.AppNetworkStatus{}) {\n\t\tlog.Infof(\"Clearing zedrouter error %s\\n\", status.Error)\n\t\tstatus.Error = \"\"\n\t\tstatus.ErrorSource = \"\"\n\t\tstatus.ErrorTime = time.Time{}\n\t\tchanged = true\n\t}\n\tlog.Debugf(\"Done with AppNetworkStatus for %s\\n\", uuidStr)\n\n\t// Make sure we have a DomainConfig. Clears dc.Activate based\n\t// on the AppInstanceConfig's Activate\n\terr := MaybeAddDomainConfig(ctx, config, ns)\n\tif err != nil {\n\t\tlog.Errorf(\"Error from MaybeAddDomainConfig for %s: %s\\n\",\n\t\t\tuuidStr, err)\n\t\tstatus.Error = fmt.Sprintf(\"%s\", err)\n\t\tstatus.ErrorTime = time.Now()\n\t\tchanged = true\n\t\tlog.Infof(\"Waiting for DomainStatus Activated for %s\\n\",\n\t\t\tuuidStr)\n\t\treturn changed\n\t}\n\n\t// Check DomainStatus; update AppInstanceStatus if error\n\tds := lookupDomainStatus(ctx, uuidStr)\n\tif ds == nil {\n\t\tlog.Infof(\"Waiting for DomainStatus for %s\\n\", uuidStr)\n\t\treturn changed\n\t}\n\tif status.DomainName != ds.DomainName {\n\t\tstatus.DomainName = ds.DomainName\n\t\tchanged = true\n\t}\n\tif status.BootTime != ds.BootTime {\n\t\tstatus.BootTime = ds.BootTime\n\t\tchanged = true\n\t}\n\tif ds.State != status.State {\n\t\tswitch status.State {\n\t\tcase types.RESTARTING, types.PURGING:\n\t\t\t// Leave unchanged\n\t\tdefault:\n\t\t\tlog.Infof(\"Set State from DomainStatus from %d to %d\\n\",\n\t\t\t\tstatus.State, ds.State)\n\t\t\tstatus.State = ds.State\n\t\t\tchanged = true\n\t\t}\n\t}\n\t// Ignore errors during a halt\n\tif ds.LastErr != \"\" {\n\t\tlog.Warnf(\"doInactivateHalt sees error from domainmgr for %s: %s\\n\",\n\t\t\tuuidStr, ds.LastErr)\n\t}\n\tif status.ErrorSource == pubsub.TypeToName(types.DomainStatus{}) {\n\t\tlog.Infof(\"Clearing domainmgr error %s\\n\", status.Error)\n\t\tstatus.Error = \"\"\n\t\tstatus.ErrorSource = \"\"\n\t\tstatus.ErrorTime = time.Time{}\n\t\tchanged = true\n\t}\n\tif ds.Pending() {\n\t\tlog.Infof(\"Waiting for DomainStatus !Pending for %s\\n\", uuidStr)\n\t\treturn changed\n\t}\n\tif ds.Activated {\n\t\tlog.Infof(\"Waiting for Not Activated for DomainStatus %s\\n\",\n\t\t\tuuidStr)\n\t\treturn changed\n\t}\n\t// XXX network is still around! Need to call doInactivate in doRemove?\n\t// XXX fix assymetry?\n\tstatus.Activated = false\n\tstatus.ActivateInprogress = false\n\tchanged = true\n\tlog.Infof(\"doInactivateHalt done for %s\\n\", uuidStr)\n\treturn changed\n}", "func (client *Client) ServiceStatus(request *ServiceStatusRequest) (response *ServiceStatusResponse, err error) {\nresponse = CreateServiceStatusResponse()\nerr = client.DoAction(request, response)\nreturn\n}", "func (t *Task) ChangeStatus(s int) {\n\tt.Status = s\n}", "func (o *Status) Update() {\n o.Time = time.Now()\n}", "func changeTicketStatus(ticket *jira.Issue, status string, transitions []jira.Transition, fields map[string]jira.TransitionField) error {\n\tfmt.Printf(\"Changing %s status to %s with fields %s\\n\", ticket.Key, status, fields)\n\tvar found *jira.Transition\n\n\tfor _, transition := range transitions {\n\t\tif status == transition.Name {\n\t\t\tfound = &transition\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif found == nil {\n\t\treturn fmt.Errorf(\"Transition for %s to %s not found\", ticket.Key, status)\n\t}\n\n\tjiraClient.Transition.Create(ticket.ID, found.ID, fields)\n\n\treturn nil\n}", "func (m *IosUpdateConfiguration) SetActiveHoursStart(value *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.TimeOnly)() {\n err := m.GetBackingStore().Set(\"activeHoursStart\", value)\n if err != nil {\n panic(err)\n }\n}", "func StartEnvironmentStatusUpdate(environmentStatus *constants.EnvironmentStatus, tickerDuration time.Duration) {\n\tticker := time.NewTicker(tickerDuration)\n\n\tgo func() {\n\t\tfor {\n\t\t\t<-ticker.C\n\n\t\t\tboxInstalled, boxInstalledErr := Installed()\n\t\t\tif boxInstalledErr != nil {\n\t\t\t\tlog.Error(\"Could not query whether VM is installed: %v\", boxInstalledErr)\n\t\t\t} else {\n\t\t\t\tenvironmentStatus.BoxInstalled = (boxInstalledErr == nil) && boxInstalled\n\t\t\t}\n\n\t\t\tboxRunning, boxRunningErr := Running()\n\t\t\tif boxRunningErr != nil {\n\t\t\t\tlog.Error(\"Could not query whether VM is running: %v\", boxRunningErr)\n\t\t\t} else {\n\t\t\t\tenvironmentStatus.BoxRunning = (boxRunningErr == nil) && boxRunning\n\t\t\t}\n\t\t}\n\t}()\n}", "func WaitForTableToBecomeActive(dynamo *dynamodb.DynamoDBService, tableName string, timeout uint, interval uint) error {\n\n\tnow := time.Now()\n\n\tfor !DoesTableExist(dynamo, tableName) {\n\n\t\tif time.Since(now) > time.Millisecond*time.Duration(timeout) {\n\t\t\treturn errors.New(\"\")\n\t\t}\n\t\ttime.Sleep(time.Millisecond * time.Duration(interval))\n\t}\n\treturn nil\n}", "func (pas *PodAutoscalerStatus) MarkActive() {\n\tpodCondSet.Manage(pas).MarkTrue(PodAutoscalerConditionActive)\n}", "func TestHostWorkingStatus(t *testing.T) {\n\tif testing.Short() || !build.VLONG {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\n\tht, err := newHostTester(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer ht.Close()\n\n\t// TODO: this causes an ndf, because it relies on the host tester starting up\n\t// and fully returning faster than the first check, which isnt always the\n\t// case. This check is disabled for now, but can be fixed by using the\n\t// Disrupt() pattern.\n\t// if ht.host.WorkingStatus() != modules.HostWorkingStatusChecking {\n\t// \tt.Fatal(\"expected working state to initially be modules.HostWorkingStatusChecking\")\n\t// }\n\n\tfor i := 0; i < 5; i++ {\n\t\t// Simulate some setting calls, and see if the host picks up on it.\n\t\tatomic.AddUint64(&ht.host.atomicSettingsCalls, workingStatusThreshold+1)\n\n\t\tsuccess := false\n\t\tfor start := time.Now(); time.Since(start) < 30*time.Second; time.Sleep(time.Millisecond * 10) {\n\t\t\tif ht.host.WorkingStatus() == modules.HostWorkingStatusWorking {\n\t\t\t\tsuccess = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !success {\n\t\t\tt.Fatal(\"expected working state to flip to HostWorkingStatusWorking after incrementing settings calls\")\n\t\t}\n\n\t\t// make no settings calls, host should flip back to NotWorking\n\t\tsuccess = false\n\t\tfor start := time.Now(); time.Since(start) < 30*time.Second; time.Sleep(time.Millisecond * 10) {\n\t\t\tif ht.host.WorkingStatus() == modules.HostWorkingStatusNotWorking {\n\t\t\t\tsuccess = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !success {\n\t\t\tt.Fatal(\"expected working state to flip to HostStatusNotWorking if no settings calls occur\")\n\t\t}\n\t}\n}", "func (handler *BotHandler) updateStatus() {\n\thandler.McRunner.WaitGroup.Add(1)\n\tdefer handler.McRunner.WaitGroup.Done()\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(60 * time.Second):\n\t\t\thandler.McRunner.StatusRequestChannel <- true\n\n\t\t\tselect {\n\t\t\tcase status := <-handler.McRunner.StatusChannel:\n\t\t\t\tstatusJSON, _ := json.Marshal(status)\n\t\t\t\theader := header{Type: \"status\", Data: statusJSON}\n\t\t\t\thandler.sock.WriteJSON(header)\n\t\t\tcase <-time.After(10 * time.Second):\n\t\t\t\tfmt.Println(\"Failed to receive status update from runner, might be deadlocked.\")\n\t\t\t}\n\t\tcase <-handler.killChannel:\n\t\t\treturn\n\t\t}\n\n\t}\n}", "func (ct *Cointop) setRefreshStatus() {\n\tct.debuglog(\"setRefreshStatus()\")\n\tgo func() {\n\t\tct.loadingTicks(\"refreshing\", 900)\n\t\tct.RowChanged()\n\t}()\n}" ]
[ "0.6315328", "0.5751217", "0.5707817", "0.57021505", "0.5679295", "0.5671448", "0.56645894", "0.56333077", "0.5627383", "0.56199175", "0.56196296", "0.55565387", "0.5556034", "0.553161", "0.5514312", "0.54835093", "0.54337984", "0.53858835", "0.5380496", "0.537922", "0.53719425", "0.53661984", "0.5361009", "0.53583574", "0.53460485", "0.53406847", "0.5329454", "0.53218246", "0.5292565", "0.526667", "0.5265498", "0.52608913", "0.52255666", "0.52143323", "0.5201289", "0.51997304", "0.5185201", "0.51691043", "0.51652193", "0.5157601", "0.51515085", "0.5148765", "0.5145518", "0.51423764", "0.51409525", "0.514025", "0.51385903", "0.51316696", "0.5124399", "0.5124184", "0.5117852", "0.5110857", "0.51087934", "0.5108261", "0.5105011", "0.51043254", "0.5091219", "0.50845057", "0.5057654", "0.50518936", "0.5050551", "0.5046221", "0.5044896", "0.5039117", "0.5038918", "0.503027", "0.50263155", "0.5022662", "0.50197434", "0.50179243", "0.50159323", "0.5011497", "0.50017846", "0.50006384", "0.4994581", "0.4983081", "0.49739766", "0.49717957", "0.49662852", "0.49657416", "0.49518356", "0.4947483", "0.49463212", "0.49370185", "0.4931936", "0.49213946", "0.49174035", "0.49159044", "0.49155742", "0.49145266", "0.49139357", "0.49129954", "0.4911661", "0.49114364", "0.49088168", "0.48999596", "0.48995858", "0.4891841", "0.4890579", "0.4886133" ]
0.5440705
16
Test that status transitions from Active to Expired, once the end time is reached
func TestExpiration(t *testing.T) { mockclock := clock.NewMock() setClock(mockclock) // replace clock with mock for speedy testing now := mockclock.Now() reset() p, _ := New("Promo1", now, now.Add(1*time.Hour)) runtime.Gosched() if res := p.AllowDisplay(ip); res != true { t.Errorf("Bad Promo status, got: %v want %v", res, true) } // wind clock forward until after display period mockclock.Add(1*time.Hour + 1*time.Second) if res := p.AllowDisplay(ip); res != false { t.Errorf("Bad Promo status, got: %v want %v", res, false) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (_TrialRulesAbstract *TrialRulesAbstractTransactor) Expired(opts *bind.TransactOpts, caseId [32]byte, status uint8) (*types.Transaction, error) {\n\treturn _TrialRulesAbstract.contract.Transact(opts, \"expired\", caseId, status)\n}", "func TestExpiration(t *testing.T) {\n\tr := NewRegistrar()\n\tr.Add(session)\n\ttime.Sleep(expireDuration)\n\tif r.Validate(user) {\n\t\tt.Error(\"The token has expired, but the user was still validated!\")\n\t}\n\tt.Log(\"The token expired, and the user was succesfully reported Invalid.\")\n}", "func (c Choco) Expired() bool {\n\treturn time.Since(c.TimeStamp) > time.Second\n}", "func checkrequestStatus(d *schema.ResourceData, config Config, requestID string, timeOut int) error {\n\ttimeout := time.After(time.Duration(timeOut) * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(1 * time.Second):\n\t\t\tstatus, state, err := checkServiceRequestStatus(config, requestID)\n\t\t\tif err == nil {\n\t\t\t\tif state == \"finished\" && status == \"Ok\" {\n\t\t\t\t\tlog.Println(\"[DEBUG] Service order added SUCCESSFULLY\")\n\t\t\t\t\td.SetId(requestID)\n\t\t\t\t\treturn nil\n\t\t\t\t} else if status == \"Error\" {\n\t\t\t\t\tlog.Println(\"[ERROR] Failed\")\n\t\t\t\t\treturn fmt.Errorf(\"[Error] Failed execution\")\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(\"[DEBUG] Request state is :\", state)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase <-timeout:\n\t\t\tlog.Println(\"[DEBUG] Timeout occured\")\n\t\t\treturn fmt.Errorf(\"[ERROR] Timeout\")\n\t\t}\n\t}\n}", "func (timeout *Timeout) CheckExpire() bool {\n\tif timeout.state == Active && time.Since(timeout.start) > timeout.d {\n\t\ttimeout.state = Expired\n\t}\n\tif timeout.state == Expired {\n\t\treturn true\n\t}\n\treturn false\n}", "func (exp *ControlleeExpectations) isExpired() bool {\n\treturn clock.RealClock{}.Since(exp.timestamp) > ExpectationsTimeout\n}", "func (s) TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) {\n\toverrideFedEnvVar(t)\n\tmgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to spin up the xDS management server: %v\", err)\n\t}\n\tdefer mgmtServer.Stop()\n\n\t// Create an xDS client talking to the above management server.\n\tnodeID := uuid.New().String()\n\tclient, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{\n\t\tXDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address),\n\t\tNodeProto: &v3corepb.Node{Id: nodeID},\n\t}, defaultTestWatchExpiryTimeout, time.Duration(0))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create xds client: %v\", err)\n\t}\n\tdefer close()\n\n\t// Register a watch for an endpoint resource and have the watch callback\n\t// push the received update on to a channel.\n\tew := newEndpointsWatcher()\n\tedsCancel := xdsresource.WatchEndpoints(client, edsName, ew)\n\tdefer edsCancel()\n\n\t// Configure the management server to return a single endpoint resource,\n\t// corresponding to the one we registered a watch for.\n\tresources := e2e.UpdateOptions{\n\t\tNodeID: nodeID,\n\t\tEndpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1})},\n\t\tSkipValidation: true,\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := mgmtServer.Update(ctx, resources); err != nil {\n\t\tt.Fatalf(\"Failed to update management server with resources: %v, err: %v\", resources, err)\n\t}\n\n\t// Verify the contents of the received update.\n\twantUpdate := endpointsUpdateErrTuple{\n\t\tupdate: xdsresource.EndpointsUpdate{\n\t\t\tLocalities: []xdsresource.Locality{\n\t\t\t\t{\n\t\t\t\t\tEndpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf(\"%s:%d\", edsHost1, edsPort1), Weight: 1}},\n\t\t\t\t\tID: internal.LocalityID{\n\t\t\t\t\t\tRegion: \"region-1\",\n\t\t\t\t\t\tZone: \"zone-1\",\n\t\t\t\t\t\tSubZone: \"subzone-1\",\n\t\t\t\t\t},\n\t\t\t\t\tPriority: 0,\n\t\t\t\t\tWeight: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tif err := verifyEndpointsUpdate(ctx, ew.updateCh, wantUpdate); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Wait for the watch expiry timer to fire, and verify that the callback is\n\t// not invoked.\n\t<-time.After(defaultTestWatchExpiryTimeout)\n\tif err := verifyNoEndpointsUpdate(ctx, ew.updateCh); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (s *Suite) TestAttemptExpiration(c *check.C) {\n\tworkSpec, worker := s.makeWorkSpecAndWorker(c)\n\tworkUnit, err := workSpec.AddWorkUnit(\"a\", map[string]interface{}{}, 0.0)\n\tc.Assert(err, check.IsNil)\n\n\tattempts, err := worker.RequestAttempts(coordinate.AttemptRequest{})\n\tc.Assert(err, check.IsNil)\n\tc.Assert(attempts, check.HasLen, 1)\n\tattempt := attempts[0]\n\n\tstatus, err := attempt.Status()\n\tc.Assert(err, check.IsNil)\n\tc.Check(status, check.Equals, coordinate.Pending)\n\n\tattempts, err = worker.RequestAttempts(coordinate.AttemptRequest{})\n\tc.Assert(err, check.IsNil)\n\tc.Check(attempts, check.HasLen, 0)\n\n\t// There is a default expiration of 15 minutes (checked elsewhere)\n\t// So if we wait for, say, 20 minutes we should become expired\n\ts.Clock.Add(time.Duration(20) * time.Minute)\n\tstatus, err = attempt.Status()\n\tc.Assert(err, check.IsNil)\n\tc.Check(status, check.Equals, coordinate.Expired)\n\n\t// The work unit should be \"available\" for all purposes\n\tmeta, err := workSpec.Meta(true)\n\tc.Assert(err, check.IsNil)\n\tc.Check(meta.AvailableCount, check.Equals, 1)\n\tc.Check(meta.PendingCount, check.Equals, 0)\n\n\tuStatus, err := workUnit.Status()\n\tc.Assert(err, check.IsNil)\n\tc.Check(uStatus, check.Equals, coordinate.AvailableUnit)\n\n\t// If we request more attempts we should get back the expired\n\t// unit again\n\tattempts, err = worker.RequestAttempts(coordinate.AttemptRequest{})\n\tc.Assert(err, check.IsNil)\n\tc.Assert(attempts, check.HasLen, 1)\n\n\tstatus, err = attempts[0].Status()\n\tc.Assert(err, check.IsNil)\n\tc.Check(status, check.Equals, coordinate.Pending)\n}", "func TestHalfOpenAfterTimeout(t *testing.T){\n breaker := NewBreaker(2 * time.Second, 2, 2)\n\n breaker.halfOpen()\n\n evaluateCondition(t, breaker.State == HalfOpenState, \"TestHalfOpenAfterTimeout\")\n}", "func (s) TestEDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) {\n\toverrideFedEnvVar(t)\n\tmgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to spin up the xDS management server: %v\", err)\n\t}\n\tdefer mgmtServer.Stop()\n\n\tclient, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{\n\t\tXDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address),\n\t\tNodeProto: &v3corepb.Node{},\n\t}, defaultTestWatchExpiryTimeout, time.Duration(0))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create xds client: %v\", err)\n\t}\n\tdefer close()\n\n\t// Register a watch for a resource which is expected to fail with an error\n\t// after the watch expiry timer fires.\n\tew := newEndpointsWatcher()\n\tedsCancel := xdsresource.WatchEndpoints(client, edsName, ew)\n\tdefer edsCancel()\n\n\t// Wait for the watch expiry timer to fire.\n\t<-time.After(defaultTestWatchExpiryTimeout)\n\n\t// Verify that an empty update with the expected error is received.\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\twantErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, \"\")\n\tif err := verifyEndpointsUpdate(ctx, ew.updateCh, endpointsUpdateErrTuple{err: wantErr}); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func TestSucceedsAfterTimeout(t *testing.T){\n breaker := NewBreaker(2 * time.Second, 2, 2)\n\n breaker.halfOpen()\n\n _, err := breaker.Run(alwaysSucceedsFunc)\n\n evaluateCondition(t, err == nil, \"TestSucceedsAfterTimeout\")\n}", "func (_TrialRulesAbstract *TrialRulesAbstractTransactorSession) Expired(caseId [32]byte, status uint8) (*types.Transaction, error) {\n\treturn _TrialRulesAbstract.Contract.Expired(&_TrialRulesAbstract.TransactOpts, caseId, status)\n}", "func (_TrialRulesAbstract *TrialRulesAbstractSession) Expired(caseId [32]byte, status uint8) (*types.Transaction, error) {\n\treturn _TrialRulesAbstract.Contract.Expired(&_TrialRulesAbstract.TransactOpts, caseId, status)\n}", "func TestSendENIStateChangeExpired(t *testing.T) {\n\tmockCtrl := gomock.NewController(t)\n\tdefer mockCtrl.Finish()\n\n\tmockStateManager := mock_dockerstate.NewMockTaskEngineState(mockCtrl)\n\teventChannel := make(chan statechange.Event)\n\tctx := context.TODO()\n\n\tgomock.InOrder(\n\t\tmockStateManager.EXPECT().ENIByMac(randomMAC).Return(&ni.ENIAttachment{\n\t\t\tAttachmentInfo: attachmentinfo.AttachmentInfo{\n\t\t\t\tAttachStatusSent: false,\n\t\t\t\tExpiresAt: time.Now().Add(expirationTimeSubtraction),\n\t\t\t},\n\t\t\tMACAddress: randomMAC,\n\t\t}, true),\n\t\tmockStateManager.EXPECT().RemoveENIAttachment(randomMAC),\n\t)\n\n\twatcher := setupWatcher(ctx, nil, mockStateManager, eventChannel, primaryMAC)\n\n\tassert.Error(t, watcher.sendENIStateChange(randomMAC))\n}", "func (cc *CurrentConditions) Expired() bool {\n\tnow := time.Now()\n\texpired := now.After(cc.RequestTimestamp.Add(UpdateInterval))\n\treturn expired\n}", "func (s *Suite) TestAttemptExpiration() {\n\tsts := SimpleTestSetup{\n\t\tNamespaceName: \"TestAttemptExpiration\",\n\t\tWorkerName: \"worker\",\n\t\tWorkSpecName: \"spec\",\n\t\tWorkUnitName: \"a\",\n\t}\n\tsts.SetUp(s)\n\tdefer sts.TearDown(s)\n\n\tattempts, err := sts.Worker.RequestAttempts(coordinate.AttemptRequest{})\n\tif !(s.NoError(err) && s.Len(attempts, 1)) {\n\t\treturn\n\t}\n\tattempt := attempts[0]\n\n\tstatus, err := attempt.Status()\n\tif s.NoError(err) {\n\t\ts.Equal(coordinate.Pending, status)\n\t}\n\n\tsts.RequestNoAttempts(s)\n\n\t// There is a default expiration of 15 minutes (checked elsewhere)\n\t// So if we wait for, say, 20 minutes we should become expired\n\ts.Clock.Add(time.Duration(20) * time.Minute)\n\tstatus, err = attempt.Status()\n\tif s.NoError(err) {\n\t\ts.Equal(coordinate.Expired, status)\n\t}\n\n\t// The work unit should be \"available\" for all purposes\n\tmeta, err := sts.WorkSpec.Meta(true)\n\tif s.NoError(err) {\n\t\ts.Equal(1, meta.AvailableCount)\n\t\ts.Equal(0, meta.PendingCount)\n\t}\n\tsts.CheckUnitStatus(s, coordinate.AvailableUnit)\n\n\t// If we request more attempts we should get back the expired\n\t// unit again\n\tattempt = sts.RequestOneAttempt(s)\n\ts.AttemptStatus(coordinate.Pending, attempt)\n}", "func TestOpenAfterFailures(t *testing.T){\n breaker := NewBreaker(5 * time.Second, 2, 2)\n\n breaker.open()\n\n evaluateCondition(t, breaker.State == OpenState, \"TestOpenAfterFailures\")\n}", "func (pas *PodAutoscalerStatus) inStatusFor(status corev1.ConditionStatus, now time.Time) time.Duration {\n\tcond := pas.GetCondition(PodAutoscalerConditionActive)\n\tif cond == nil || cond.Status != status {\n\t\treturn -1\n\t}\n\treturn now.Sub(cond.LastTransitionTime.Inner.Time)\n}", "func (s *InMemorySuite) TestRecordStatus(c *C) {\n\ttest.WithTimeout(func(ctx context.Context) {\n\t\ttimeline := s.newTimeline()\n\t\tnode := \"test-node\"\n\t\told := &pb.NodeStatus{Name: node, Status: pb.NodeStatus_Running}\n\t\texpected := []*pb.TimelineEvent{history.NewNodeRecovered(s.clock.Now(), node)}\n\n\t\tc.Assert(timeline.RecordStatus(ctx, old), IsNil)\n\n\t\tactual, err := timeline.GetEvents(ctx, nil)\n\t\tc.Assert(err, IsNil)\n\t\tc.Assert(actual, test.DeepCompare, expected, Commentf(\"Expected the status to be recorded.\"))\n\t})\n}", "func (s *status) ended() error { return s.set(\"ended\") }", "func (r *resourceRecord) isCloseToExpiring() bool {\n\telapsed := (r.initialTimeToLive - r.remainingTimeToLive).Seconds()\n\n\t// RFC 6762 section 10 specifies that records should be refreshed when more than 80% of\n\t// their initial time-to-live has elapsed.\n\treturn (elapsed / r.initialTimeToLive.Seconds()) > 0.8\n}", "func (dc *DelegatedCredential) isExpired(start, now time.Time) bool {\n\tend := start.Add(dc.cred.validTime)\n\treturn !now.Before(end)\n}", "func (r *SubscriptionReconciler) checkStatusActive(subscription *eventingv1alpha1.Subscription) (statusChanged, retry bool, err error) {\n\tif subscription.Status.EmsSubscriptionStatus.SubscriptionStatus == string(types.SubscriptionStatusActive) {\n\t\tif len(subscription.Status.FailedActivation) > 0 {\n\t\t\tsubscription.Status.FailedActivation = \"\"\n\t\t\treturn true, false, nil\n\t\t}\n\t\treturn false, false, nil\n\t}\n\tt1 := time.Now()\n\tif len(subscription.Status.FailedActivation) == 0 {\n\t\t// it's the first time\n\t\tsubscription.Status.FailedActivation = t1.Format(time.RFC3339)\n\t\treturn true, true, nil\n\t}\n\t// check the timeout\n\tif t0, er := time.Parse(time.RFC3339, subscription.Status.FailedActivation); er != nil {\n\t\terr = er\n\t} else if t1.Sub(t0) > timeoutRetryActiveEmsStatus {\n\t\terr = fmt.Errorf(\"timeout waiting for the subscription to be active: %v\", subscription.Name)\n\t} else {\n\t\tretry = true\n\t}\n\treturn false, retry, err\n}", "func (state *RuntimeState) CheckRenewalStatus() {\n\tfor {\n\t\t//To avoid concurrent map iteration and map write\n\t\trenewalinfoMap := state.Renewalinfo\n\t\tfor key, value := range renewalinfoMap {\n\t\t\tif time.Now().Unix() > value.RenewCertAfterTime {\n\t\t\t\tfmt.Println(\"Hey I'm renewing\")\n\t\t\t\tstate.RenewCertsforDomain(key)\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(time.Minute * 1)\n\t}\n}", "func (t *timer) checkExpiration() bool {\n\t// Transition to fully disabled state if we're just consuming an\n\t// orphaned timer.\n\tif t.state == timerStateOrphaned {\n\t\tt.state = timerStateDisabled\n\t\treturn false\n\t}\n\n\t// The timer is enabled, but it may have expired early. Check if that's\n\t// the case, and if so, reset the runtime timer to the correct time.\n\tnow := t.clock.NowMonotonic()\n\tif now.Before(t.target) {\n\t\tt.clockTarget = t.target\n\t\tt.timer.Reset(t.target.Sub(now))\n\t\treturn false\n\t}\n\n\t// The timer has actually expired, disable it for now and inform the\n\t// caller.\n\tt.state = timerStateDisabled\n\treturn true\n}", "func StatefulsetStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {\n\n\t//Record start timestamp\n\tChaosStartTimeStamp := time.Now().Unix()\n\tisFailed := false\n\n\terr = retry.\n\t\tTimes(uint(experimentsDetails.ChaosDuration / experimentsDetails.Delay)).\n\t\tWait(time.Duration(experimentsDetails.Delay) * time.Second).\n\t\tTry(func(attempt uint) error {\n\t\t\tfor _, app := range appsUnderTest {\n\t\t\t\tstatefulset, err := appsv1StatefulsetClient.Get(app.AppName, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.Errorf(\"Unable to find the statefulset with name %v, err: %v\", app.AppName, err)\n\t\t\t\t}\n\t\t\t\tlog.Infof(\"Statefulset's Ready Replica Count is: %v\", statefulset.Status.ReadyReplicas)\n\t\t\t\tif int(statefulset.Status.ReadyReplicas) != experimentsDetails.Replicas {\n\t\t\t\t\tisFailed = true\n\t\t\t\t\treturn errors.Errorf(\"Application is not scaled yet, err: %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tisFailed = false\n\t\t\treturn nil\n\t\t})\n\n\tif isFailed {\n\t\terr = AutoscalerRecoveryInStatefulset(experimentsDetails, clients, appsUnderTest)\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"Unable to perform autoscaling, err: %v\", err)\n\t\t}\n\t\treturn errors.Errorf(\"Failed to scale the application\")\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// run the probes during chaos\n\tif len(resultDetails.ProbeDetails) != 0 {\n\t\tif err = probe.RunProbes(chaosDetails, clients, resultDetails, \"DuringChaos\", eventsDetails); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t//ChaosCurrentTimeStamp contains the current timestamp\n\tChaosCurrentTimeStamp := time.Now().Unix()\n\tif int(ChaosCurrentTimeStamp-ChaosStartTimeStamp) <= experimentsDetails.ChaosDuration {\n\t\tlog.Info(\"[Wait]: Waiting for completion of chaos duration\")\n\t\ttime.Sleep(time.Duration(experimentsDetails.ChaosDuration-int(ChaosCurrentTimeStamp-ChaosStartTimeStamp)) * time.Second)\n\t}\n\n\treturn nil\n}", "func (a *Assembler) Expired() bool {\n\treturn time.Now().After(a.deadline)\n}", "func testHealth(service *bridge.Service, client fargo.EurekaConnection, elbReg *fargo.Instance) {\n\tcontainerID := service.Origin.ContainerID\n\n\t// Get actual eureka status and lookup previous logical registration status\n\teurekaStatus := getELBStatus(client, elbReg)\n\tlog.Debugf(\"Eureka status check gave: %v\", eurekaStatus)\n\tlast := getPreviousStatus(containerID)\n\n\t// Work out an appropriate registration status given previous and current values\n\tstatusChange := determineNewEurekaStatus(containerID, eurekaStatus, last)\n\tsetPreviousStatus(containerID, statusChange.newStatus)\n\telbReg.Status = statusChange.registrationStatus\n\tlog.Debugf(\"Status health check returned prev: %v registration: %v\", last, elbReg.Status)\n}", "func checkTimer(name string, t *testing.T, upd timerUpdate, active bool, next time.Duration) {\n\tif upd.active != active {\n\t\tt.Fatalf(\"%s: expected timer active=%v\", name, active)\n\t}\n\tif active && upd.next != next {\n\t\tt.Fatalf(\"%s: expected timer to be %v, got %v\", name, next, upd.next)\n\t}\n}", "func (p *UserPendingPermissions) Expired(ttl time.Duration, now time.Time) bool {\n\treturn !now.Before(p.UpdatedAt.Add(ttl))\n}", "func (d *Driver) Expired() bool {\n\tif d.Expiration == 0 {\n\t\treturn false\n\t}\n\treturn time.Now().UnixNano() > d.Expiration\n}", "func (v *ObservabilityVerifier) ExpectCompletingStatus(g Gomega) {}", "func vgStatusCheck(id string, powerClient *v.IBMPIVolumeGroupClient) {\n\tfor start := time.Now(); time.Since(start) < time.Second*30; {\n\t\ttime.Sleep(10 * time.Second)\n\t\tvg, err := powerClient.GetDetails(id)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif vg.Status == \"available\" {\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (task *Task) IsExpired() bool {\n\tswitch task.Schedule.Regularity {\n\tcase apiModels.OneTime, apiModels.Trigger:\n\t\treturn common.ValidTime(time.Now().UTC(), task.RunTimeUTC)\n\tcase apiModels.Recurrent:\n\t\treturn !common.ValidTime(task.Schedule.EndRunTime.UTC(), task.RunTimeUTC)\n\t}\n\treturn true\n}", "func (s *TiFlashSpec) Status(ctx context.Context, timeout time.Duration, tlsCfg *tls.Config, pdList ...string) string {\n\tstoreAddr := utils.JoinHostPort(s.Host, s.FlashServicePort)\n\tstate := checkStoreStatus(ctx, storeAddr, tlsCfg, pdList...)\n\tif s.Offline && strings.ToLower(state) == \"offline\" {\n\t\tstate = \"Pending Offline\" // avoid misleading\n\t}\n\treturn state\n}", "func (suite *HealthCheckTestSuite) TestGetAgentStateActive() {\n\t// UpdateEmptyInstanceInformation will return active in the h.ping() function.\n\tsuite.serviceMock.On(\"UpdateEmptyInstanceInformation\", mock.Anything, version.Version, AgentName).Return(nil, nil)\n\tagentState, err := suite.healthCheck.GetAgentState()\n\t// Assert the status is Active and the error is nil.\n\tassert.Equal(suite.T(), agentState, Active, \"agent state should be active\")\n\tassert.Nil(suite.T(), err, \"GatAgentState function should always return nil as error\")\n}", "func TestUptimeCalculator(t *testing.T) {\n\n\tcalcFunc := CreateUpdateCalculationFunction(time.Now().Unix(), 10*time.Second)\n\tstart := time.Now().Unix()\n\tWaitForGracePeriod(10*time.Second, calcFunc, false)\n\tend := time.Now().Unix()\n\tdiff := end - start\n\tassert.True(t, diff >= 10)\n\n}", "func (v *ObservabilityVerifier) ExpectPreparingStatus(g Gomega) {\n\tg.Expect(time.Now().UTC().Sub(v.Shoot.Status.Credentials.Rotation.Observability.LastInitiationTime.Time.UTC())).To(BeNumerically(\"<=\", time.Minute))\n}", "func TestResume(t *testing.T) {\n\tdefer test.Guard(t)()\n\n\tt.Run(\"not expired\", func(t *testing.T) {\n\t\ttestResume(t, false, false, true)\n\t})\n\tt.Run(\"expired not revealed\", func(t *testing.T) {\n\t\ttestResume(t, true, false, false)\n\t})\n\tt.Run(\"expired revealed\", func(t *testing.T) {\n\t\ttestResume(t, true, true, true)\n\t})\n}", "func (r *Reservation) Expired() bool {\n\treturn time.Until(r.DataReservation.ExpirationReservation.Time) <= 0\n}", "func (i *info) expired(now int64) bool {\n\treturn i.TTLStamp <= now\n}", "func (m *ScheduleItem) SetStatus(value *FreeBusyStatus)() {\n err := m.GetBackingStore().Set(\"status\", value)\n if err != nil {\n panic(err)\n }\n}", "func IsExpired(targetDate time.Time, timeAdded time.Duration) bool {\n\treturn time.Since(targetDate.Add(timeAdded)) > 0\n}", "func (c *Client) doWaitForStatus(eniID string, checkNum, checkInterval int, finalStatus string) error {\n\tfor i := 0; i < checkNum; i++ {\n\t\ttime.Sleep(time.Second * time.Duration(checkInterval))\n\t\tenis, err := c.queryENI(eniID, \"\", \"\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, eni := range enis {\n\t\t\tif *eni.NetworkInterfaceId == eniID {\n\t\t\t\tswitch *eni.State {\n\t\t\t\tcase ENI_STATUS_AVAILABLE:\n\t\t\t\t\tswitch finalStatus {\n\t\t\t\t\tcase ENI_STATUS_ATTACHED:\n\t\t\t\t\t\tif eni.Attachment != nil && eni.Attachment.InstanceId != nil {\n\t\t\t\t\t\t\tblog.Infof(\"eni %s is attached\", eniID)\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\tblog.Infof(\"eni %s is not attached\", eniID)\n\t\t\t\t\tcase ENI_STATUS_DETACHED:\n\t\t\t\t\t\tif eni.Attachment == nil {\n\t\t\t\t\t\t\tblog.Infof(\"eni %s is detached\", eniID)\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\tblog.Infof(\"eni %s is not detached\", eniID)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tblog.Infof(\"eni %s is %s now\", eniID, *eni.State)\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\tcase ENI_STATUS_PENDING, ENI_STATUS_ATTACHING, ENI_STATUS_DETACHING, ENI_STATUS_DELETING:\n\t\t\t\t\tblog.Infof(\"eni %s is %s\", eniID, *eni.State)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tblog.Errorf(\"timeout when wait for eni %s\", eniID)\n\treturn fmt.Errorf(\"timeout when wait for eni %s\", eniID)\n}", "func (c deploymentChecker) EndTime() uint64 {\n\treturn c.deployment.ExpireTime\n}", "func TestFsmTimeout(t *testing.T) {\n\tfsm := NewFSM(NewLoginHandler(), 1e5)\n\n\tfsm.Send(&LoginPayload{\"yadda\"})\n\tfsm.Send(&PreparePayload{\"foo\", \"bar\"})\n\tfsm.Send(&LoginPayload{\"yaddaA\"})\n\tfsm.Send(&LoginPayload{\"yaddaB\"})\n\n\ttime.Sleep(1e8)\n\n\tfsm.Send(&LoginPayload{\"yaddaC\"})\n\tfsm.Send(&LoginPayload{\"yaddaD\"})\n\tfsm.Send(&UnlockPayload{})\n\tfsm.Send(&LoginPayload{\"bar\"})\n\n\ttime.Sleep(1e7)\n\n\tt.Logf(\"Status: '%v'.\", fsm.State())\n}", "func (m *MockUpstreamIntf) ActiveInPastSeconds(arg0 time.Duration) bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ActiveInPastSeconds\", arg0)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func TestPreExpired(t *testing.T) {\n\tvar tm time.Time // initial value is in the past\n\treset()\n\tp, _ := New(\"Promo1\", tm, tm)\n\tif res := p.AllowDisplay(ip); res != false {\n\t\tt.Errorf(\"Bad Promo status, got: %v want %v\", res, false)\n\t}\n}", "func (c Certificate) Expired(now time.Time, skew time.Duration) bool {\n\treturn c.IssuedAt.After(now) || c.ExpiresAt.Before(now)\n}", "func (me TEventType) IsHITExpired() bool { return me.String() == \"HITExpired\" }", "func Test_Complete_Flow(t *testing.T) {\n\tassert := assert.New(t)\n\n\tdao := dao.NewMetricDaoMemoryImpl(3) // Setting the TTL in 3 seconds for testing purpose\n\tsrv := service.NewMetricsServiceImpl(dao)\n\n\t// T0\n\terr := srv.AddMetric(\"metric1\", 1)\n\tassert.Nil(err)\n\n\terr = srv.AddMetric(\"metric2\", 3)\n\tassert.Nil(err)\n\n\terr = srv.AddMetric(\"metric2\", 1)\n\tassert.Nil(err)\n\n\terr = srv.AddMetric(\"metric3\", -2)\n\tassert.Nil(err)\n\n\t// Checking the metrics\n\tval, err := srv.SumMetric(\"metric1\")\n\tassert.Nil(err)\n\tassert.Equal(1, val)\n\n\tval, err = srv.SumMetric(\"metric2\")\n\tassert.Nil(err)\n\tassert.Equal(4, val)\n\n\tval, err = srv.SumMetric(\"metric3\")\n\tassert.Nil(err)\n\tassert.Equal(-2, val)\n\n\t// sleeping 3 secs\n\ttime.Sleep(time.Second * 2)\n\n\t// T1 - adding more values to the metrics\n\terr = srv.AddMetric(\"metric1\", 10)\n\tassert.Nil(err)\n\n\terr = srv.AddMetric(\"metric2\", -2)\n\tassert.Nil(err)\n\n\terr = srv.AddMetric(\"metric3\", 10)\n\tassert.Nil(err)\n\n\terr = srv.AddMetric(\"metric3\", 22)\n\tassert.Nil(err)\n\n\t// Checking the metrics again\n\tval, err = srv.SumMetric(\"metric1\")\n\tassert.Nil(err)\n\tassert.Equal(11, val)\n\n\tval, err = srv.SumMetric(\"metric2\")\n\tassert.Nil(err)\n\tassert.Equal(2, val)\n\n\tval, err = srv.SumMetric(\"metric3\")\n\tassert.Nil(err)\n\tassert.Equal(30, val)\n\n\t// sleeping 3 more seconds and the metrics added on T0 should be removed\n\ttime.Sleep(time.Second * 2)\n\n\t// T2 - Checking the metrics again\n\tval, err = srv.SumMetric(\"metric1\")\n\tassert.Nil(err)\n\tassert.Equal(10, val)\n\n\tval, err = srv.SumMetric(\"metric2\")\n\tassert.Nil(err)\n\tassert.Equal(-2, val)\n\n\tval, err = srv.SumMetric(\"metric3\")\n\tassert.Nil(err)\n\tassert.Equal(32, val)\n\n\t// sleeping 5 more seconds and there shouldn't be more metrics\n\ttime.Sleep(time.Second * 3)\n\n\tval, err = srv.SumMetric(\"metric1\")\n\tassert.Nil(err)\n\tassert.Equal(0, val)\n\n\tval, err = srv.SumMetric(\"metric2\")\n\tassert.Nil(err)\n\tassert.Equal(0, val)\n\n\tval, err = srv.SumMetric(\"metric3\")\n\tassert.Nil(err)\n\tassert.Equal(0, val)\n}", "func (i *Info) expired(now int64) bool {\n\treturn i.TTLStamp <= now\n}", "func TestExpiry(t *testing.T) {\n\t_, privateBytes, err := GenerateKeyPair()\n\trequire.NoError(t, err)\n\tprivateKey, err := utils.ParsePrivateKey(privateBytes)\n\trequire.NoError(t, err)\n\n\tclock := clockwork.NewFakeClockAt(time.Now())\n\n\t// Create a new key that can be used to sign and verify tokens.\n\tkey, err := New(&Config{\n\t\tClock: clock,\n\t\tPrivateKey: privateKey,\n\t\tAlgorithm: defaults.ApplicationTokenAlgorithm,\n\t\tClusterName: \"example.com\",\n\t})\n\trequire.NoError(t, err)\n\n\t// Sign a token with a 1 minute expiration.\n\ttoken, err := key.Sign(SignParams{\n\t\tUsername: \"[email protected]\",\n\t\tRoles: []string{\"foo\", \"bar\"},\n\t\tTraits: wrappers.Traits{\n\t\t\t\"trait1\": []string{\"value-1\", \"value-2\"},\n\t\t},\n\t\tExpires: clock.Now().Add(1 * time.Minute),\n\t\tURI: \"http://127.0.0.1:8080\",\n\t})\n\trequire.NoError(t, err)\n\n\t// Verify that the token is still valid.\n\tclaims, err := key.Verify(VerifyParams{\n\t\tUsername: \"[email protected]\",\n\t\tURI: \"http://127.0.0.1:8080\",\n\t\tRawToken: token,\n\t})\n\trequire.NoError(t, err)\n\trequire.Equal(t, claims.Username, \"[email protected]\")\n\trequire.Equal(t, claims.Roles, []string{\"foo\", \"bar\"})\n\trequire.Equal(t, claims.IssuedAt, josejwt.NewNumericDate(clock.Now()))\n\n\t// Advance time by two minutes and verify the token is no longer valid.\n\tclock.Advance(2 * time.Minute)\n\t_, err = key.Verify(VerifyParams{\n\t\tUsername: \"[email protected]\",\n\t\tURI: \"http://127.0.0.1:8080\",\n\t\tRawToken: token,\n\t})\n\trequire.Error(t, err)\n}", "func (r ReservedAddress) Expired(t time.Time) bool {\n\tif r.Expires == nil {\n\t\treturn false\n\t}\n\n\treturn r.Expires.Before(t)\n}", "func (px *Paxos) Status(seq int) (Fate, interface{}) {\n \n if seq < px.Min() {\n return Forgotten, nil\n }\n\n instance := px.getInstance(seq)\n return instance.Fate, instance.Va\n}", "func TestAssessRunStatusUpdateResult(t *testing.T) {\n\tf := newFixture(t)\n\tdefer f.Close()\n\tc, _, _ := f.newController(noResyncPeriodFunc)\n\trun := &v1alpha1.AnalysisRun{\n\t\tSpec: v1alpha1.AnalysisRunSpec{\n\t\t\tMetrics: []v1alpha1.Metric{\n\t\t\t\t{\n\t\t\t\t\tName: \"sleep-infinity\",\n\t\t\t\t\tProvider: v1alpha1.MetricProvider{\n\t\t\t\t\t\tJob: &v1alpha1.JobMetric{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"fail-after-30\",\n\t\t\t\t\tProvider: v1alpha1.MetricProvider{\n\t\t\t\t\t\tJob: &v1alpha1.JobMetric{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tStatus: v1alpha1.AnalysisRunStatus{\n\t\t\tPhase: v1alpha1.AnalysisPhaseRunning,\n\t\t\tMetricResults: []v1alpha1.MetricResult{\n\t\t\t\t{\n\t\t\t\t\tName: \"sleep-infinity\",\n\t\t\t\t\tPhase: v1alpha1.AnalysisPhaseRunning,\n\t\t\t\t\tMeasurements: []v1alpha1.Measurement{{\n\t\t\t\t\t\tPhase: v1alpha1.AnalysisPhaseRunning,\n\t\t\t\t\t\tStartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))),\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"fail-after-30\",\n\t\t\t\t\tCount: 1,\n\t\t\t\t\tFailed: 1,\n\t\t\t\t\tPhase: v1alpha1.AnalysisPhaseRunning, // This should flip to Failed\n\t\t\t\t\tMeasurements: []v1alpha1.Measurement{{\n\t\t\t\t\t\tPhase: v1alpha1.AnalysisPhaseFailed,\n\t\t\t\t\t\tStartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))),\n\t\t\t\t\t\tFinishedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))),\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tstatus, message := c.assessRunStatus(run, run.Spec.Metrics, map[string]bool{})\n\tassert.Equal(t, v1alpha1.AnalysisPhaseRunning, status)\n\tassert.Equal(t, \"\", message)\n\tassert.Equal(t, v1alpha1.AnalysisPhaseFailed, run.Status.MetricResults[1].Phase)\n}", "func (v value) expired(c *Cache) bool{\n return time.Since(v.time)>c.expire\n}", "func testExpired(ttl time.Duration) cache.DirtyFunc {\n\treturn func(file storage.FileEntry) bool {\n\t\treturn file.LastModified.Before(time.Now().Add(-ttl))\n\t}\n}", "func certEnd(end time.Time) string {\n\tnow := time.Now()\n\tmonth, _ := time.ParseDuration(\"720h\")\n\tthreshold := now.Add(month)\n\tif threshold.Before(end) {\n\t\treturn timeString(end, green)\n\t} else if now.Before(end) {\n\t\treturn timeString(end, yellow)\n\t} else {\n\t\treturn timeString(end, red)\n\t}\n}", "func (loc *LocInfo) Expired() bool {\n\tif time.Now().Before(loc.ValidUntil) {\n\t\treturn false\n\t}\n\treturn true\n}", "func (cs *CleaningSession) End(endedAt time.Time) {\n\tif cs.IsActive {\n\t\t// Mark session as inactive.\n\t\tcs.EndedAt = &endedAt\n\t\tcs.IsActive = false\n\t\tif cs.StartedAt != nil {\n\t\t\t// Calculate session duration.\n\t\t\tcs.DurationSec = int(cs.EndedAt.Sub(*cs.StartedAt).Seconds())\n\t\t}\n\t}\n}", "func (t Time) After(u Time) bool {}", "func (r *OperationReqReconciler) isExpired(request *userv1.Operationrequest) bool {\n\tif request.Status.Phase != userv1.RequestCompleted && request.CreationTimestamp.Add(r.expirationTime).Before(time.Now()) {\n\t\tr.Logger.Info(\"operation request is expired\", \"name\", request.Name)\n\t\treturn true\n\t}\n\treturn false\n}", "func TestRenewBeforeTTLExpires(t *testing.T) {\n\tttl := 10\n\tc, v, secret := loginHelper(t, fmt.Sprintf(\"%vs\", ttl))\n\tif secret.LeaseDuration < 2 {\n\t\tt.Fatalf(\"expected lease to be at least 2s, but was: %d\", secret.LeaseDuration)\n\t} else if secret.LeaseDuration > 10 {\n\t\tt.Fatalf(\"expected lease to be at most 10s, but was: %d\", secret.LeaseDuration)\n\t}\n\n\t_, err := c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\trenewer, err := v.NewRenewer(&vault.RenewerInput{\n\t\tSecret: secret,\n\t\tIncrement: ttl,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\t// Begin a renewer background process, and wait until it fires\n\ttime.Sleep(time.Duration(ttl/2) * time.Second)\n\tgo renewer.Renew()\n\tdefer renewer.Stop()\n\tselect {\n\tcase err := <-renewer.DoneCh():\n\t\tif err != nil {\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\tcase <-renewer.RenewCh():\n\t}\n\n\t// Make sure that the vault lease was only extended by 10s\n\tleaseInfo, err := v.Logical().Write(\"/sys/leases/lookup\", map[string]interface{}{\n\t\t\"lease_id\": secret.LeaseID,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\tnewDurationStr := leaseInfo.Data[\"ttl\"].(json.Number)\n\tnewDuration, err := newDurationStr.Int64()\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\tif newDuration < 2 {\n\t\tt.Fatalf(\"expected lease to be at least 2s, but was: %d\", newDuration)\n\t} else if newDuration > 20 {\n\t\tt.Fatalf(\"expected lease to be at most 20s, but was: %d\", newDuration)\n\t}\n\n\t// Make sure that the Pachyderm token was also renewed\n\ttime.Sleep(time.Duration(ttl/2+1) * time.Second) // wait til old lease exires\n\t_, err = c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n}", "func TestAlarmStops(t *testing.T) {\n\tdelta := 10 * time.Millisecond\n\tnow := time.Now()\n\ta := NewAlarm(delta, TimeFromStandardTime(now), TimeFromStandardTime(now.Add(time.Second)))\n\ta.Stop()\n\ttime.Sleep(2 * delta)\n\tselect {\n\tcase <-a.C:\n\t\tt.Fatal(\"Alarm did not shut down\")\n\tdefault:\n\t\t// ok\n\t}\n}", "func TestVerifyExpired(t *testing.T) {\n\toptions := iniVerifyOptions(t)\n\toptions.CurrentTime = time.Date(2020, 01, 01, 01, 01, 01, 0, gmt)\n\n\tp := loadProxy(\"test-samples/RfcProxy.pem\", t)\n\tif e := p.Verify(options); e == nil {\n\t\tt.Error(\"Verification must fail\")\n\t} else {\n\t\tt.Log(e)\n\t}\n}", "func TestExpiringCert(t *testing.T) {\n\t// Create cert/key pair\n\tcert, key, err := genCerts(time.Now().Add(360 * time.Hour))\n\tif err != nil {\n\t\tt.Logf(\"Unable to generate test certificates - %s\", err)\n\t\tt.FailNow()\n\t}\n\n\t// Start Listener\n\tl, err := startListener(cert, key)\n\tif err != nil {\n\t\tt.Logf(\"%s\", err)\n\t\tt.FailNow()\n\t}\n\ttime.Sleep(30 * time.Millisecond)\n\tdefer l.Close()\n\n\t// Test if it expires within x days\n\tt.Run(\"ExpiresWithin30Days\", func(t *testing.T) {\n\t\tvar v bool\n\t\tv, err := ExpiresWithinDays(\"127.0.0.1:9000\", 30)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unexpected failure when calling ExpiresWithinDays - %s\", err)\n\t\t}\n\t\tif v == false {\n\t\t\tt.Errorf(\"Unexpected result when testing ExpiresWithinDays with a cert that expires in 15 days, expected true and got %+v\", v)\n\t\t}\n\t})\n\n\t// Test if it expires by x date\n\tt.Run(\"ExpiresBeforeDate\", func(t *testing.T) {\n\t\tvar v bool\n\t\tv, err := ExpiresBeforeDate(\"127.0.0.1:9000\", time.Now().Add(720*time.Hour))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unexpected failure when calling ExpiresBeforeDate - %s\", err)\n\t\t}\n\t\tif v == false {\n\t\t\tt.Errorf(\"Unexpected result when testing ExpiredsBeforeDate with a cert that expires in 15 days, expected true got %+v\", v)\n\t\t}\n\t})\n}", "func pastActiveDeadline(job *appsv1alpha1.BroadcastJob) bool {\n\tif job.Spec.CompletionPolicy.ActiveDeadlineSeconds == nil || job.Status.StartTime == nil {\n\t\treturn false\n\t}\n\tnow := metav1.Now()\n\tstart := job.Status.StartTime.Time\n\tduration := now.Time.Sub(start)\n\tallowedDuration := time.Duration(*job.Spec.CompletionPolicy.ActiveDeadlineSeconds) * time.Second\n\treturn duration >= allowedDuration\n}", "func (px *Paxos) Status(seq int) (Fate, interface{}) {\n\n\tif seq < px.Min() {\n\t\treturn Forgotten, nil\n\t}\n\n\tpx.mu.Lock()\n\tdefer px.mu.Unlock()\n\n\tif val, ok := px.decidedVals[seq]; ok {\n\t\treturn Decided, val\n\t}\n\treturn Pending, nil\n}", "func TestProgressResumeByHeartbeatResp(t *testing.T) {\n\tr := newTestRaft(1, []uint64{1, 2}, 5, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(r)\n\tr.becomeCandidate()\n\tr.becomeLeader()\n\tr.prs[2].Paused = true\n\n\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})\n\tif !r.prs[2].Paused {\n\t\tt.Errorf(\"paused = %v, want true\", r.prs[2].Paused)\n\t}\n\n\tr.prs[2].becomeReplicate()\n\tr.Step(pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeatResp})\n\tif r.prs[2].Paused {\n\t\tt.Errorf(\"paused = %v, want false\", r.prs[2].Paused)\n\t}\n}", "func (t *TimeTimeout) IsElapsed(context.Context) bool { return t.After(time.Now()) }", "func (m *IosUpdateConfiguration) SetActiveHoursEnd(value *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.TimeOnly)() {\n err := m.GetBackingStore().Set(\"activeHoursEnd\", value)\n if err != nil {\n panic(err)\n }\n}", "func (px *Paxos) Status(seq int) (Fate, interface{}) {\n\t// Your code here\n\n\t//log.Printf(\"judge status of %d\\n\", seq)\n\tif seq < px.Min() {\n\t\t//\tlog.Printf(\"forgotten\\n\")\n\t\treturn Forgotten, nil\n\t}\n\t//log.Printf(\"no forgotten\\n\")\n\tpx.mu.Lock()\n\tdefer px.mu.Unlock()\n\n\t_, exist := px.acceptor[seq]\n\n\tif exist {\n\t\treturn px.acceptor[seq].state, px.acceptor[seq].decided.Value\n\t}\n\treturn Pending, nil\n}", "func JudgeContestStatus(cst *models.Contest, t time.Time) int {\n\tif t.Before(cst.StartTime) {\n\t\treturn -1\n\t}\n\tif t.After(cst.EndTime) {\n\t\treturn 1\n\t}\n\treturn 0\n}", "func isExpired(cli *clientv3.Client, ev *clientv3.Event) (bool, error) {\n\tif ev.PrevKv == nil {\n\t\treturn false, nil\n\t}\n\n\tleaseID := clientv3.LeaseID(ev.PrevKv.Lease)\n\tif leaseID == clientv3.NoLease {\n\t\treturn false, nil\n\t}\n\n\tttlResponse, err := cli.TimeToLive(context.Background(), leaseID)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn ttlResponse.TTL == -1, nil\n}", "func waitForTableToBeActiveWithRandomSleep(tableName string, client *dynamodb.DynamoDB, maxRetries int, sleepBetweenRetriesMin time.Duration, sleepBetweenRetriesMax time.Duration, terragruntOptions *options.TerragruntOptions) error {\n\tfor i := 0; i < maxRetries; i++ {\n\t\ttableReady, err := LockTableExistsAndIsActive(tableName, client)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif tableReady {\n\t\t\tterragruntOptions.Logger.Debugf(\"Success! Table %s is now in active state.\", tableName)\n\t\t\treturn nil\n\t\t}\n\n\t\tsleepBetweenRetries := util.GetRandomTime(sleepBetweenRetriesMin, sleepBetweenRetriesMax)\n\t\tterragruntOptions.Logger.Debugf(\"Table %s is not yet in active state. Will check again after %s.\", tableName, sleepBetweenRetries)\n\t\ttime.Sleep(sleepBetweenRetries)\n\t}\n\n\treturn errors.WithStackTrace(TableActiveRetriesExceeded{TableName: tableName, Retries: maxRetries})\n}", "func (s *S) TestGetStatusNotExtended(c *check.C) {\n\tserver, requests := s.startServer(nonExtendedStatus)\n\tdefer server.Close()\n\n\tclient := Client{Endpoint: server.URL, UserID: \"myuser\", UserKey: \"123\"}\n\tstatus, err := client.GetStatus([]string{\"abc123\"}, false)\n\tc.Assert(err, check.IsNil)\n\n\texpectedCreateDate, _ := time.Parse(dateTimeLayout, \"2015-12-31 20:45:30\")\n\texpectedStartDate, _ := time.Parse(dateTimeLayout, \"2015-12-31 20:45:34\")\n\texpectedFinishDate, _ := time.Parse(dateTimeLayout, \"2015-12-31 21:00:03\")\n\texpectedDownloadDate, _ := time.Parse(dateTimeLayout, \"2015-12-31 20:45:32\")\n\texpected := []StatusResponse{\n\t\t{\n\t\t\tMediaID: \"abc123\",\n\t\t\tUserID: \"myuser\",\n\t\t\tSourceFile: \"http://some.video/file.mp4\",\n\t\t\tMediaStatus: \"Finished\",\n\t\t\tCreateDate: expectedCreateDate,\n\t\t\tStartDate: expectedStartDate,\n\t\t\tFinishDate: expectedFinishDate,\n\t\t\tDownloadDate: expectedDownloadDate,\n\t\t\tTimeLeft: \"21\",\n\t\t\tProgress: 100.0,\n\t\t\tFormats: []FormatStatus{\n\t\t\t\t{\n\t\t\t\t\tID: \"f123\",\n\t\t\t\t\tStatus: \"Finished\",\n\t\t\t\t\tCreateDate: expectedCreateDate,\n\t\t\t\t\tStartDate: expectedStartDate,\n\t\t\t\t\tFinishDate: expectedFinishDate,\n\t\t\t\t\tDestinations: []DestinationStatus{{Name: \"s3://mynicebucket\", Status: \"Saved\"}},\n\t\t\t\t\tSize: \"0x1080\",\n\t\t\t\t\tBitrate: \"3500k\",\n\t\t\t\t\tOutput: \"mp4\",\n\t\t\t\t\tVideoCodec: \"libx264\",\n\t\t\t\t\tAudioCodec: \"dolby_aac\",\n\t\t\t\t\tFileSize: \"78544430\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tc.Assert(status, check.DeepEquals, expected)\n\n\treq := <-requests\n\tc.Assert(req.query[\"action\"], check.Equals, \"GetStatus\")\n\tc.Assert(req.query[\"mediaid\"], check.Equals, \"abc123\")\n\tc.Assert(req.query[\"extended\"], check.IsNil)\n}", "func (px *Paxos) Status(seq int) (bool, interface{}) {\n\t// Your code here.\n\tcurMin := px.Min()\n\tpx.mu.Lock()\n\tdefer px.mu.Unlock()\n\n\ttargetIns := px.instances[seq]\n\tif seq < curMin || targetIns == nil {\n\t\treturn false, nil\n\t} else {\n\t\t//debug\n\t\t// fmt.Printf(\"Status: isDecided=%t, va=%v, me=%d, seq %d\\n\", targetIns.isDecided, targetIns.vDecided, px.me, seq)\n\t\treturn targetIns.isDecided, targetIns.vDecided\n\t}\n}", "func waitForTableToBeActive(tableName string, client *dynamodb.DynamoDB, maxRetries int, sleepBetweenRetries time.Duration, terragruntOptions *options.TerragruntOptions) error {\n\treturn waitForTableToBeActiveWithRandomSleep(tableName, client, maxRetries, sleepBetweenRetries, sleepBetweenRetries, terragruntOptions)\n}", "func TestPacketDropIsExpired(t *testing.T) {\n\texpiredTime := util.GetExpiredTimeInString(util.DefaultPacketDropExpirationMinutes, PacketDropLogTimeLayout)\n\texpiredPacketDrop := PacketDrop{LogTime: expiredTime}\n\tif !expiredPacketDrop.IsExpired() {\n\t\tt.Fatal(\"Expected IsExpired() return true, got false\")\n\t}\n\n\tcurTime := time.Now().Format(PacketDropLogTimeLayout)\n\tcurPacketDrop := PacketDrop{LogTime: curTime}\n\tif curPacketDrop.IsExpired() {\n\t\tt.Fatal(\"Expected IsExpired() return false, got true\")\n\t}\n\n}", "func (p *peerAddr) isExpired(timeout time.Duration, curTime time.Time) bool {\n\treturn curTime.Sub(p.lastPing.Value.(time.Time)) >= timeout\n}", "func (item *Item) Expired() bool {\n\tif item.Expiration == 0 {\n\t\treturn false\n\t}\n\treturn time.Now().UnixNano() > item.Expiration\n}", "func Timeout(expire int, debug bool) {\n\ttime.Sleep(time.Duration(expire) * time.Second)\n\tif debug {\n\t\tlog.Println(colorInfo + \" Time is over\")\n\t\tlog.Println(colorInfo + \" Exiting...\")\n\t}\n\tos.Exit(0)\n}", "func (err *ValidationError) IsExpired() bool { return err.exp }", "func checkStatus(currentStatus, wantedStatus int8) (err error) {\n\tswitch currentStatus {\n\tcase constant.RUNNING_STATUS_PREPARING:\n\t\terr = ErrSchedulerBeingInitilated\n\tcase constant.RUNNING_STATUS_STARTING:\n\t\terr = ErrSchedulerBeingStarted\n\tcase constant.RUNNING_STATUS_STOPPING:\n\t\terr = ErrSchedulerBeingStopped\n\tcase constant.RUNNING_STATUS_PAUSING:\n\t\terr = ErrSchedulerBeingPaused\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif currentStatus == constant.RUNNING_STATUS_UNPREPARED &&\n\t\twantedStatus != constant.RUNNING_STATUS_PREPARING {\n\t\terr = ErrSchedulerNotInitialized\n\t\treturn\n\t}\n\n\tif currentStatus == constant.RUNNING_STATUS_STOPPED {\n\t\terr = ErrSchedulerStopped\n\t\treturn\n\t}\n\n\tswitch wantedStatus {\n\tcase constant.RUNNING_STATUS_PREPARING:\n\t\tif currentStatus != constant.RUNNING_STATUS_UNPREPARED {\n\t\t\terr = ErrSchedulerInitialized\n\t\t}\n\tcase constant.RUNNING_STATUS_STARTING:\n\t\tif currentStatus != constant.RUNNING_STATUS_PREPARED {\n\t\t\terr = ErrSchedulerStarted\n\t\t}\n\tcase constant.RUNNING_STATUS_PAUSING:\n\t\tif currentStatus != constant.RUNNING_STATUS_STARTED {\n\t\t\terr = ErrSchedulerNotStarted\n\t\t}\n\tcase constant.RUNNING_STATUS_STOPPING:\n\t\tif currentStatus != constant.RUNNING_STATUS_STARTED &&\n\t\t\tcurrentStatus != constant.RUNNING_STATUS_PAUSED {\n\t\t\terr = ErrSchedulerNotStarted\n\t\t}\n\tdefault:\n\t\terr = ErrStatusUnsupported\n\t}\n\treturn\n}", "func TestGCRAuthLifespan(t *testing.T) {\n\terr := cleanUp()\n\tif err != nil {\n\t\tt.Fatal(\"Could not guarantee that no credential file existed.\")\n\t}\n\ttested := getCredStore(t)\n\tconst expctedRefresh = \"refreshing!\"\n\texpectedExpiry := time.Now()\n\tgcrTok := &oauth2.Token{\n\t\tAccessToken: testAccessToken,\n\t\tRefreshToken: expctedRefresh,\n\t\tExpiry: expectedExpiry,\n\t}\n\n\t// set the credentials\n\terr = tested.SetGCRAuth(gcrTok)\n\tif err != nil {\n\t\tt.Fatalf(\"SetGCRAuth returned an error: %v\", err)\n\t}\n\n\t// retrieve them again\n\tauth, err := tested.GetGCRAuth()\n\tif err != nil {\n\t\tt.Fatalf(\"GetGCRAuth returned an error: %v\", err)\n\t}\n\tactualAccessTok := auth.initialToken.AccessToken\n\tif actualAccessTok != testAccessToken {\n\t\tt.Errorf(\"access_token: Expected \\\"%s\\\", got \\\"%s\\\"\", testAccessToken, actualAccessTok)\n\t}\n\tactualRefresh := auth.initialToken.RefreshToken\n\tif actualRefresh != expctedRefresh {\n\t\tt.Errorf(\"refresh_token: Expected \\\"%s\\\", got \\\"%s\\\"\", expctedRefresh, actualRefresh)\n\t}\n\tactualExp := auth.initialToken.Expiry\n\tif !actualExp.Equal(expectedExpiry) {\n\t\tt.Errorf(\"Expiry: Expected %v, got %v\", expectedExpiry, actualExp)\n\t}\n\n\t// delete them\n\terr = tested.DeleteGCRAuth()\n\tif err != nil {\n\t\tt.Fatalf(\"DeleteGCRAuth returned an error: %v\", err)\n\t}\n\n\t// make sure they're gone\n\tauth, err = tested.GetGCRAuth()\n\tif err == nil {\n\t\tt.Fatalf(\"Expected no credentials, got %v\", *auth)\n\t}\n}", "func (c *Client) BecomeInactive() {\n\tc.mu.Lock()\n\t// We won't join any new rounds once this is set\n\tc.leaving = true\n\tc.mu.Unlock()\n\n\tfor !c.killed() {\n\t\tc.removeSelf()\n\t\tstartTime := time.Now()\n\t\ttimeout := time.Second * 2\n\t\tfor time.Since(startTime) < timeout {\n\t\t\tvar tempActive bool\n\t\t\tc.mu.Lock()\n\t\t\t// this is set when client receives configuration update\n\t\t\ttempActive = c.active\n\t\t\tc.mu.Unlock()\n\n\t\t\tif !tempActive {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttime.Sleep(50 * time.Millisecond)\n\t\t}\n\t}\n}", "func (item Item) Expired() bool {\n\tif item.Expiration == 0 {\n\t\treturn false\n\t}\n\treturn time.Now().UnixNano() > item.Expiration\n}", "func TestRequestUpFromStop(t *testing.T) {\n\t// setup\n\tdwController := setup(t, 2, Stopped, []common.PiPin{common.OpenerUp})\n\n\t// test\n\tdwController.SetRequestedFloor(3)\n\twaitForStatus(t, 2, 3, Up, dwController, 3*time.Second)\n}", "func TestGetSLIStartTimeAfterEndTime(t *testing.T) {\n\tdh := NewDynatraceHandler(\"http://dynatrace\", \"sockshop\", \"dev\", \"carts\", nil, nil, \"\")\n\n\tstart := time.Now().Format(time.RFC3339)\n\t// artificially increase end time to be in the future\n\tend := time.Now().Add(-1 * time.Minute).Format(time.RFC3339)\n\tvalue, err := dh.GetSLIValue(Throughput, start, end, []*events.SLIFilter{})\n\n\tassert.EqualValues(t, 0.0, value)\n\tassert.NotNil(t, err, nil)\n\tassert.EqualValues(t, \"start time needs to be before end time\", err.Error())\n}", "func TestLoginExpires(t *testing.T) {\n\tc, _, secret := loginHelper(t, \"2s\")\n\n\t// Make sure token is valid\n\t_, err := c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\t// Wait for TTL to expire and check that token is no longer valid\n\ttime.Sleep(time.Duration(secret.LeaseDuration+1) * time.Second)\n\t_, err = c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err == nil {\n\t\tt.Fatalf(\"API call should fail, but token did not expire\")\n\t}\n}", "func (timeout *Timeout) IsActive() bool {\n\treturn timeout.state == Active\n}", "func (r *restarter) apply(ctx context.Context, now time.Time, status string) bool {\n\thealthy := func() {\n\t\tif !r.unhealthyState.IsZero() {\n\t\t\tr.logger.Debug(\"canceling restart because check became healthy\")\n\t\t\tr.unhealthyState = time.Time{}\n\t\t}\n\t}\n\tswitch status {\n\tcase \"critical\": // consul\n\tcase string(structs.CheckFailure): // nomad\n\tcase string(structs.CheckPending): // nomad\n\tcase \"warning\": // consul\n\t\tif r.ignoreWarnings {\n\t\t\t// Warnings are ignored, reset state and exit\n\t\t\thealthy()\n\t\t\treturn false\n\t\t}\n\tdefault:\n\t\t// All other statuses are ok, reset state and exit\n\t\thealthy()\n\t\treturn false\n\t}\n\n\tif now.Before(r.graceUntil) {\n\t\t// In grace period, exit\n\t\treturn false\n\t}\n\n\tif r.unhealthyState.IsZero() {\n\t\t// First failure, set restart deadline\n\t\tif r.timeLimit != 0 {\n\t\t\tr.logger.Debug(\"check became unhealthy. Will restart if check doesn't become healthy\", \"time_limit\", r.timeLimit)\n\t\t}\n\t\tr.unhealthyState = now\n\t}\n\n\t// restart timeLimit after start of this check becoming unhealthy\n\trestartAt := r.unhealthyState.Add(r.timeLimit)\n\n\t// Must test >= because if limit=1, restartAt == first failure\n\tif now.Equal(restartAt) || now.After(restartAt) {\n\t\t// hasn't become healthy by deadline, restart!\n\t\tr.logger.Debug(\"restarting due to unhealthy check\")\n\n\t\t// Tell TaskRunner to restart due to failure\n\t\treason := fmt.Sprintf(\"healthcheck: check %q unhealthy\", r.checkName)\n\t\tevent := structs.NewTaskEvent(structs.TaskRestartSignal).SetRestartReason(reason)\n\t\tgo asyncRestart(ctx, r.logger, r.task, event)\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (px *Paxos) Status(seq int) (Fate, interface{}) {\n\t// Your code here.\n\tif seq < px.Min() {\n\t\treturn Forgotten, nil\n\t}\n\n\tnode, ok := px.prepareStatus.Find(seq)\n\tif ok && node.State.Done {\n\t\treturn Decided, node.State.VA\n\t}\n\treturn Pending, nil\n}", "func (v Value) IsExpiredForTime(time int64) bool {\n\treturn time >= v.EndSeconds\n}", "func (px *Paxos) Status(seq int) (Fate, interface{}) {\n\tfate := Pending\n\tvar retValue interface{}\n\tpx.mu.Lock()\n\tif seq < px.minSeq {\n\t\tfate = Forgotten\n\t} else if px.isDecided(seq) {\n\t\tfate = Decided\n\t\tretValue = px.getDecidedValue(seq)\n\t}\n\tpx.mu.Unlock()\n\treturn fate, retValue\n}", "func (t *Token) Expired() bool {\n\treturn time.Now().Unix() >= t.ExpiredAt\n}", "func (v Value) IsActiveForTime(time int64) bool {\n\treturn time >= v.StartSeconds && time < v.EndSeconds\n}", "func (tm* Timeout)Update(){\n\ttm.Elapsed = time.Now().Sub(tm.Start)\n tm.HasTimedOut = tm.Elapsed > tm.TaskTimeout \n}", "func (s *Session) Expired(dur time.Duration) bool {\n\treturn time.Now().After(s.lastAccess.Add(dur))\n}" ]
[ "0.58127785", "0.5769869", "0.5630366", "0.5617064", "0.56041414", "0.5535202", "0.5517026", "0.5507166", "0.5478939", "0.54784715", "0.54395", "0.54349065", "0.54201555", "0.5401868", "0.5392047", "0.53748745", "0.5346386", "0.5321631", "0.53121084", "0.53112787", "0.53027177", "0.52823097", "0.52798134", "0.5242435", "0.5234962", "0.5181048", "0.5143215", "0.51230544", "0.51187235", "0.5109071", "0.51064044", "0.5098107", "0.5083195", "0.5080631", "0.5080154", "0.5068439", "0.50616574", "0.5056146", "0.505467", "0.5046951", "0.5043758", "0.5043491", "0.50411105", "0.5022543", "0.50202733", "0.5014917", "0.5010061", "0.5009911", "0.50019765", "0.50006723", "0.499877", "0.49836686", "0.49823153", "0.49801674", "0.49765584", "0.4976134", "0.4975368", "0.49693385", "0.4969147", "0.4964394", "0.49643785", "0.49595988", "0.49539825", "0.49524286", "0.49512866", "0.4949498", "0.49447352", "0.49296233", "0.49284983", "0.49257284", "0.49140838", "0.49101865", "0.4908879", "0.49028784", "0.4899148", "0.4898136", "0.48929492", "0.48887157", "0.48867747", "0.48811015", "0.48763874", "0.48577088", "0.48541445", "0.4854116", "0.48539287", "0.4849031", "0.4844953", "0.48432904", "0.48419428", "0.48339728", "0.48310214", "0.48300347", "0.48241964", "0.48233247", "0.48230052", "0.482169", "0.4814955", "0.48118466", "0.48092213", "0.48001447" ]
0.5692739
2
Test that an initially expired promo may not be displayed
func TestPreExpired(t *testing.T) { var tm time.Time // initial value is in the past reset() p, _ := New("Promo1", tm, tm) if res := p.AllowDisplay(ip); res != false { t.Errorf("Bad Promo status, got: %v want %v", res, false) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestExpiration(t *testing.T) {\n\tmockclock := clock.NewMock()\n\tsetClock(mockclock) // replace clock with mock for speedy testing\n\n\tnow := mockclock.Now()\n\treset()\n\tp, _ := New(\"Promo1\", now, now.Add(1*time.Hour))\n\n\truntime.Gosched()\n\n\tif res := p.AllowDisplay(ip); res != true {\n\t\tt.Errorf(\"Bad Promo status, got: %v want %v\", res, true)\n\t}\n\n\t// wind clock forward until after display period\n\tmockclock.Add(1*time.Hour + 1*time.Second)\n\n\tif res := p.AllowDisplay(ip); res != false {\n\t\tt.Errorf(\"Bad Promo status, got: %v want %v\", res, false)\n\t}\n}", "func (exp *ControlleeExpectations) isExpired() bool {\n\treturn clock.RealClock{}.Since(exp.timestamp) > ExpectationsTimeout\n}", "func (s *StaticProvider) IsExpired() bool {\n\treturn false\n}", "func (e *EnvProvider) IsExpired() bool {\n\treturn !e.retrieved\n}", "func TestExpiration(t *testing.T) {\n\tr := NewRegistrar()\n\tr.Add(session)\n\ttime.Sleep(expireDuration)\n\tif r.Validate(user) {\n\t\tt.Error(\"The token has expired, but the user was still validated!\")\n\t}\n\tt.Log(\"The token expired, and the user was succesfully reported Invalid.\")\n}", "func (c *mockCredentialsProvider) IsExpired() bool {\n\treturn false\n}", "func isUFAExpired(ufaDetails map[string]interface{}) bool {\n\tif ufaDetails != nil {\n\t\traisedTotal := getSafeNumber(ufaDetails[\"raisedInvTotal\"])\n\t\ttotalCharge := getSafeNumber(ufaDetails[\"netCharge\"])\n\t\ttolerance := getSafeNumber(ufaDetails[\"chargTolrence\"])\n\t\tmaxCharge := totalCharge + (totalCharge * tolerance / 100)\n\t\treturn !(raisedTotal < maxCharge)\n\t}\n\treturn true\n}", "func (err *ValidationError) IsExpired() bool { return err.exp }", "func TestPacketDropIsExpired(t *testing.T) {\n\texpiredTime := util.GetExpiredTimeInString(util.DefaultPacketDropExpirationMinutes, PacketDropLogTimeLayout)\n\texpiredPacketDrop := PacketDrop{LogTime: expiredTime}\n\tif !expiredPacketDrop.IsExpired() {\n\t\tt.Fatal(\"Expected IsExpired() return true, got false\")\n\t}\n\n\tcurTime := time.Now().Format(PacketDropLogTimeLayout)\n\tcurPacketDrop := PacketDrop{LogTime: curTime}\n\tif curPacketDrop.IsExpired() {\n\t\tt.Fatal(\"Expected IsExpired() return false, got true\")\n\t}\n\n}", "func (me TGetReviewableHITsSortProperty) IsExpiration() bool { return me.String() == \"Expiration\" }", "func isExpired(cli *clientv3.Client, ev *clientv3.Event) (bool, error) {\n\tif ev.PrevKv == nil {\n\t\treturn false, nil\n\t}\n\n\tleaseID := clientv3.LeaseID(ev.PrevKv.Lease)\n\tif leaseID == clientv3.NoLease {\n\t\treturn false, nil\n\t}\n\n\tttlResponse, err := cli.TimeToLive(context.Background(), leaseID)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn ttlResponse.TTL == -1, nil\n}", "func TestVerifyExpired(t *testing.T) {\n\toptions := iniVerifyOptions(t)\n\toptions.CurrentTime = time.Date(2020, 01, 01, 01, 01, 01, 0, gmt)\n\n\tp := loadProxy(\"test-samples/RfcProxy.pem\", t)\n\tif e := p.Verify(options); e == nil {\n\t\tt.Error(\"Verification must fail\")\n\t} else {\n\t\tt.Log(e)\n\t}\n}", "func (s *Static) IsExpired() bool {\n\treturn false\n}", "func (me TEventType) IsHITExpired() bool { return me.String() == \"HITExpired\" }", "func (me TSearchHITsSortProperty) IsExpiration() bool { return me.String() == \"Expiration\" }", "func (r *OperationReqReconciler) isExpired(request *userv1.Operationrequest) bool {\n\tif request.Status.Phase != userv1.RequestCompleted && request.CreationTimestamp.Add(r.expirationTime).Before(time.Now()) {\n\t\tr.Logger.Info(\"operation request is expired\", \"name\", request.Name)\n\t\treturn true\n\t}\n\treturn false\n}", "func (r IABResponse) IsExpired() bool {\n\tswitch {\n\tcase !r.IsValidSubscription():\n\t\treturn false\n\tdefault:\n\t\tnow := time.Now().UnixNano() / int64(time.Millisecond)\n\t\treturn r.SubscriptionPurchase.ExpiryTimeMillis < now\n\t}\n}", "func (m *VaultCredsProvider) IsExpired() bool {\n\treturn false\n}", "func (dc *DelegatedCredential) isExpired(start, now time.Time) bool {\n\tend := start.Add(dc.cred.validTime)\n\treturn !now.Before(end)\n}", "func (item Item) isExpired() bool {\n\tif item.Expiration == 0 {\n\t\treturn false\n\t}\n\treturn item.Expiration < time.Now().UnixNano()\n}", "func (task *Task) IsExpired() bool {\n\tswitch task.Schedule.Regularity {\n\tcase apiModels.OneTime, apiModels.Trigger:\n\t\treturn common.ValidTime(time.Now().UTC(), task.RunTimeUTC)\n\tcase apiModels.Recurrent:\n\t\treturn !common.ValidTime(task.Schedule.EndRunTime.UTC(), task.RunTimeUTC)\n\t}\n\treturn true\n}", "func (b *ProviderBasis) IsExpired() bool {\n\tif b.CurrentTime == nil {\n\t\tb.CurrentTime = time.Now\n\t}\n\treturn !b.AlwaysValid && !b.CurrentTime().Before(b.expiration)\n}", "func HasExpired(dev *schemas.Developer) bool {\n\t// null time or before now\n\treturn dev.Expiration.Equal(time.Time{}) || dev.Expiration.Before(time.Now())\n}", "func (t *timer) checkExpiration() bool {\n\t// Transition to fully disabled state if we're just consuming an\n\t// orphaned timer.\n\tif t.state == timerStateOrphaned {\n\t\tt.state = timerStateDisabled\n\t\treturn false\n\t}\n\n\t// The timer is enabled, but it may have expired early. Check if that's\n\t// the case, and if so, reset the runtime timer to the correct time.\n\tnow := t.clock.NowMonotonic()\n\tif now.Before(t.target) {\n\t\tt.clockTarget = t.target\n\t\tt.timer.Reset(t.target.Sub(now))\n\t\treturn false\n\t}\n\n\t// The timer has actually expired, disable it for now and inform the\n\t// caller.\n\tt.state = timerStateDisabled\n\treturn true\n}", "func expired(token *Token) bool {\n\tif token.Expires.IsZero() && len(token.Access) != 0 {\n\t\treturn false\n\t}\n\treturn token.Expires.Add(-expiryDelta).\n\t\tBefore(time.Now())\n}", "func TestActivation(t *testing.T) {\n\tmockclock := clock.NewMock()\n\tsetClock(mockclock) // replace clock with mock for speedy testing\n\n\tnow := mockclock.Now()\n\treset()\n\tp, _ := New(\"Promo1\", now.Add(1*time.Hour), now.Add(24*time.Hour))\n\n\truntime.Gosched()\n\n\tif res := p.AllowDisplay(ip); res != false {\n\t\tt.Errorf(\"Bad Promo status, got: %v want %v\", res, false)\n\t}\n\n\t// wind clock forward until after start time; enter display period\n\tmockclock.Add(2 * time.Hour)\n\n\tif res := p.AllowDisplay(ip); res != true {\n\t\tt.Errorf(\"Bad Promo status, got: %v want %v\", res, true)\n\t}\n}", "func (r *record) isExpired(now time.Time) bool {\n\tif r.Expires == 0 {\n\t\treturn false\n\t}\n\texpiryDateUTC := time.Unix(r.Expires, 0).UTC()\n\treturn now.UTC().After(expiryDateUTC)\n}", "func (r *resourceRecord) isCloseToExpiring() bool {\n\telapsed := (r.initialTimeToLive - r.remainingTimeToLive).Seconds()\n\n\t// RFC 6762 section 10 specifies that records should be refreshed when more than 80% of\n\t// their initial time-to-live has elapsed.\n\treturn (elapsed / r.initialTimeToLive.Seconds()) > 0.8\n}", "func TestLoginExpires(t *testing.T) {\n\tc, _, secret := loginHelper(t, \"2s\")\n\n\t// Make sure token is valid\n\t_, err := c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\t// Wait for TTL to expire and check that token is no longer valid\n\ttime.Sleep(time.Duration(secret.LeaseDuration+1) * time.Second)\n\t_, err = c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err == nil {\n\t\tt.Fatalf(\"API call should fail, but token did not expire\")\n\t}\n}", "func (c Choco) Expired() bool {\n\treturn time.Since(c.TimeStamp) > time.Second\n}", "func (i *info) expired(now int64) bool {\n\treturn i.TTLStamp <= now\n}", "func (l *License) Expired() bool {\n\treturn l.Exp.IsZero() == false && time.Now().After(l.Exp)\n}", "func (t *TombstoneGC) PendingExpiration() bool {\n\tt.lock.Lock()\n\tdefer t.lock.Unlock()\n\treturn len(t.expires) > 0\n}", "func testNoNilTimeoutReplacement(ctx context.Context, t *testing.T, w *Wallet) {\n\terr := w.Unlock(ctx, testPrivPass, nil)\n\tif err != nil {\n\t\tt.Fatal(\"failed to unlock wallet\")\n\t}\n\ttimeChan := make(chan time.Time)\n\terr = w.Unlock(ctx, testPrivPass, timeChan)\n\tif err != nil {\n\t\tt.Fatal(\"failed to unlock wallet with time channel\")\n\t}\n\tselect {\n\tcase timeChan <- time.Time{}:\n\tcase <-time.After(100 * time.Millisecond):\n\t\tt.Fatal(\"time channel was not read in 100ms\")\n\t}\n\tif w.Locked() {\n\t\tt.Fatal(\"expected wallet to remain unlocked due to previous unlock without timeout\")\n\t}\n}", "func leaseExpired(grantedAt time.Time) bool {\n\treturn time.Since(grantedAt).Seconds() > (storagerpc.LeaseSeconds + storagerpc.LeaseGuardSeconds)\n}", "func (item Item) expired() bool {\n\tif item.Expiration == 0 {\n\t\treturn false\n\t}\n\treturn time.Now().UnixNano() > item.Expiration\n}", "func (item *item) expired() bool {\n\tif item.ttl <= 0 {\n\t\treturn false\n\t}\n\treturn item.expireAt.Before(time.Now())\n}", "func (p *SSOCredentialProvider) IsExpired() bool {\n\tt, err := time.Parse(\"2006-01-02T15:04:05UTC\", p.Cache.ExpiresAt)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn t.Before(time.Now())\n}", "func (i *Info) expired(now int64) bool {\n\treturn i.TTLStamp <= now\n}", "func TestCertificateNotExpired(t *testing.T) {\n\t// given\n\tvar expiredDate = time.Now().Add(time.Hour * 24 * (30 + 1)) // 31 days.\n\n\tvar fakeCerts = []*x509.Certificate{\n\t\t{\n\t\t\tNotAfter: expiredDate,\n\t\t\tSubject: pkix.Name{\n\t\t\t\tCommonName: \"Test cert\",\n\t\t\t},\n\t\t},\n\t}\n\n\t// when\n\tmsg := getCertificateChainMsg(fakeCerts)\n\n\t// then\n\tif msg != \"\" {\n\t\tt.Fatalf(\"Expected empty message was: %s\", msg)\n\t}\n}", "func (p *DiscoveryProtocol) requestExpired(req *api.DiscoveryRequest) bool {\n\tnow := uint32(time.Now().Unix())\n\tif req.DiscoveryMsgData.Expiry < now {\n\t\tlog.Printf(\"Now: %d, expiry: %d\", now, req.DiscoveryMsgData.Expiry)\n\t\tlog.Println(\"Message Expired. Dropping message... \")\n\t\treturn true\n\t}\n\n\treturn false\n}", "func isQueryExpired(expires int64) bool {\n\tif expires < time.Now().Unix() {\n\t\tlog.Info(\"Query expired\", \"expirationTime\", expires, \"now\", time.Now().Unix())\n\t\treturn true\n\t}\n\tlog.Info(\"Query is not expired\")\n\treturn false\n}", "func (tcertBlock *TCertBlock) isExpired() bool {\n\ttsNow := time.Now()\n\tnotAfter := tcertBlock.GetTCert().GetCertificate().NotAfter\n\tpoolLogger.Debugf(\"#isExpired: %s now: %s deadline: %s \\n \", tsNow.Add(fivemin).After(notAfter), tsNow, notAfter)\n\tif tsNow.Add(fivemin).After(notAfter) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (a *AssumeRoleProvider) IsExpired() bool {\n\treturn a.expiration.Before(time.Now())\n}", "func IsExpired(targetDate time.Time, timeAdded time.Duration) bool {\n\treturn time.Since(targetDate.Add(timeAdded)) > 0\n}", "func (c CachedObject) IsExpired() bool {\r\n\r\n\telapsed := time.Now().Sub(c.CreatedAt.Add(time.Hour * getExpiryTimeInHrs()))\r\n\r\n\tif elapsed > 0.0 {\r\n\t\treturn true\r\n\t}\r\n\r\n\treturn false\r\n}", "func (c Certificate) Expired(now time.Time, skew time.Duration) bool {\n\treturn c.IssuedAt.After(now) || c.ExpiresAt.Before(now)\n}", "func (oc *OrdererCapabilities) ExpirationCheck() bool {\n\treturn oc.ExpirationVal\n}", "func (l Info) IsExpiredWithGracePeriod() bool {\n\treturn l.ExpiresAt.Add(3 * 24 * time.Hour).Before(time.Now())\n}", "func TestRenewBeforeTTLExpires(t *testing.T) {\n\tttl := 10\n\tc, v, secret := loginHelper(t, fmt.Sprintf(\"%vs\", ttl))\n\tif secret.LeaseDuration < 2 {\n\t\tt.Fatalf(\"expected lease to be at least 2s, but was: %d\", secret.LeaseDuration)\n\t} else if secret.LeaseDuration > 10 {\n\t\tt.Fatalf(\"expected lease to be at most 10s, but was: %d\", secret.LeaseDuration)\n\t}\n\n\t_, err := c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\trenewer, err := v.NewRenewer(&vault.RenewerInput{\n\t\tSecret: secret,\n\t\tIncrement: ttl,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\t// Begin a renewer background process, and wait until it fires\n\ttime.Sleep(time.Duration(ttl/2) * time.Second)\n\tgo renewer.Renew()\n\tdefer renewer.Stop()\n\tselect {\n\tcase err := <-renewer.DoneCh():\n\t\tif err != nil {\n\t\t\tt.Fatalf(err.Error())\n\t\t}\n\tcase <-renewer.RenewCh():\n\t}\n\n\t// Make sure that the vault lease was only extended by 10s\n\tleaseInfo, err := v.Logical().Write(\"/sys/leases/lookup\", map[string]interface{}{\n\t\t\"lease_id\": secret.LeaseID,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\tnewDurationStr := leaseInfo.Data[\"ttl\"].(json.Number)\n\tnewDuration, err := newDurationStr.Int64()\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\tif newDuration < 2 {\n\t\tt.Fatalf(\"expected lease to be at least 2s, but was: %d\", newDuration)\n\t} else if newDuration > 20 {\n\t\tt.Fatalf(\"expected lease to be at most 20s, but was: %d\", newDuration)\n\t}\n\n\t// Make sure that the Pachyderm token was also renewed\n\ttime.Sleep(time.Duration(ttl/2+1) * time.Second) // wait til old lease exires\n\t_, err = c.AuthAPIClient.GetAdmins(c.Ctx(), &auth.GetAdminsRequest{})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n}", "func handleExpiredMitigation(resource *libcoap.Resource, customer *models.Customer) {\n _, cuid, mid, err := controllers.ParseURIPath(strings.Split(resource.UriPath(), \"/\"))\n if err != nil {\n log.Warnf(\"Failed to parse Uri-Path, error: %s\", err)\n }\n if mid == nil {\n log.Warn(\"Mid is not presented in uri-path\")\n return\n }\n\n mids, err := models.GetMitigationIds(customer.Id, cuid)\n if err != nil {\n log.Warnf(\"Get mitigation scopes error: %+v\", err)\n return\n }\n dup := isDuplicateMitigation(mids, *mid)\n\n controllers.DeleteMitigation(customer.Id, cuid, *mid, 0)\n if !dup {\n resource.ToRemovableResource()\n }\n}", "func freshOCSP(resp *ocsp.Response) bool {\n\tnextUpdate := resp.NextUpdate\n\t// If there is an OCSP responder certificate, and it expires before the\n\t// OCSP response, use its expiration date as the end of the OCSP\n\t// response's validity period.\n\tif resp.Certificate != nil && resp.Certificate.NotAfter.Before(nextUpdate) {\n\t\tnextUpdate = resp.Certificate.NotAfter\n\t}\n\t// start checking OCSP staple about halfway through validity period for good measure\n\trefreshTime := resp.ThisUpdate.Add(nextUpdate.Sub(resp.ThisUpdate) / 2)\n\treturn time.Now().Before(refreshTime)\n}", "func (v value) expired(c *Cache) bool{\n return time.Since(v.time)>c.expire\n}", "func isLeaseExpired(lease *db.Lease, context *leaseContext, actualPrincipalSpend float64, principalBudgetAmount float64) (bool, db.LeaseStatusReason) {\n\n\tif context.expireDate >= lease.ExpiresOn {\n\t\treturn true, db.LeaseExpired\n\t} else if context.actualSpend > lease.BudgetAmount {\n\t\treturn true, db.LeaseOverBudget\n\t} else if actualPrincipalSpend > principalBudgetAmount {\n\t\treturn true, db.LeaseOverPrincipalBudget\n\t}\n\n\treturn false, db.LeaseActive\n}", "func IsExpired(ctx Context, t UnixTime) bool {\n\tblockNow, err := BlockTime(ctx)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"%+v\", err))\n\t}\n\treturn t <= AsUnixTime(blockNow)\n}", "func testExpired(ttl time.Duration) cache.DirtyFunc {\n\treturn func(file storage.FileEntry) bool {\n\t\treturn file.LastModified.Before(time.Now().Add(-ttl))\n\t}\n}", "func (t TToken) checkExpired() error {\n\texp, err := t.getExpiry()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif secondsPassed := time.Since(exp).Seconds(); secondsPassed > 30 {\n\t\treturn errors.New(\"token expired more than 30 seconds ago (#) \")\n\t}\n\treturn nil\n}", "func (page RoleEligibilityScheduleListResultPage) NotDone() bool {\n\treturn !page.reslr.IsEmpty()\n}", "func (staticMgr *staticManager) TimeTilNextPayment(host *host.Host) time.Duration {\n\treturn time.Duration(0)\n}", "func TestExpiry(t *testing.T) {\n\t_, privateBytes, err := GenerateKeyPair()\n\trequire.NoError(t, err)\n\tprivateKey, err := utils.ParsePrivateKey(privateBytes)\n\trequire.NoError(t, err)\n\n\tclock := clockwork.NewFakeClockAt(time.Now())\n\n\t// Create a new key that can be used to sign and verify tokens.\n\tkey, err := New(&Config{\n\t\tClock: clock,\n\t\tPrivateKey: privateKey,\n\t\tAlgorithm: defaults.ApplicationTokenAlgorithm,\n\t\tClusterName: \"example.com\",\n\t})\n\trequire.NoError(t, err)\n\n\t// Sign a token with a 1 minute expiration.\n\ttoken, err := key.Sign(SignParams{\n\t\tUsername: \"[email protected]\",\n\t\tRoles: []string{\"foo\", \"bar\"},\n\t\tTraits: wrappers.Traits{\n\t\t\t\"trait1\": []string{\"value-1\", \"value-2\"},\n\t\t},\n\t\tExpires: clock.Now().Add(1 * time.Minute),\n\t\tURI: \"http://127.0.0.1:8080\",\n\t})\n\trequire.NoError(t, err)\n\n\t// Verify that the token is still valid.\n\tclaims, err := key.Verify(VerifyParams{\n\t\tUsername: \"[email protected]\",\n\t\tURI: \"http://127.0.0.1:8080\",\n\t\tRawToken: token,\n\t})\n\trequire.NoError(t, err)\n\trequire.Equal(t, claims.Username, \"[email protected]\")\n\trequire.Equal(t, claims.Roles, []string{\"foo\", \"bar\"})\n\trequire.Equal(t, claims.IssuedAt, josejwt.NewNumericDate(clock.Now()))\n\n\t// Advance time by two minutes and verify the token is no longer valid.\n\tclock.Advance(2 * time.Minute)\n\t_, err = key.Verify(VerifyParams{\n\t\tUsername: \"[email protected]\",\n\t\tURI: \"http://127.0.0.1:8080\",\n\t\tRawToken: token,\n\t})\n\trequire.Error(t, err)\n}", "func TestRetryNotRequired(t *testing.T) {\n\tcheck := assert.New(t)\n\tretryRequired := checkRetryRequired(http.StatusConflict)\n\tcheck.Equal(retryRequired, false)\n}", "func (page RoleEligibilityScheduleInstanceListResultPage) NotDone() bool {\n\treturn !page.resilr.IsEmpty()\n}", "func isTokenExpired(jwtData *JWTData) bool {\n\n\tnowTime := time.Now().Unix()\n\texpireTime := int64(jwtData.Exp)\n\n\tif expireTime < nowTime {\n\t\tlog.Warnf(\"Token is expired!\")\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (page SingleSignOnResourceListResponsePage) NotDone() bool {\n\treturn !page.ssorlr.IsEmpty()\n}", "func (c *DomainController) NotExpired() *DomainController {\n\tc.validators = append(c.validators, func(controller *DomainController) error {\n\t\treturn controller.notExpired()\n\t})\n\treturn c\n}", "func (page RoleEligibilityScheduleRequestListResultPage) NotDone() bool {\n\treturn !page.resrlr.IsEmpty()\n}", "func handleExpiredMitigation(requestPath []string, resource *libcoap.Resource, customer *models.Customer, context *libcoap.Context, status int) {\n _, cuid, mid, err := messages.ParseURIPath(requestPath)\n if err != nil {\n log.Warnf(\"Failed to parse Uri-Path, error: %s\", err)\n return\n }\n if mid == nil {\n log.Warn(\"Mid is not presented in uri-path\")\n return\n }\n\n mids, err := models.GetMitigationIds(customer.Id, cuid)\n if err != nil {\n log.Warnf(\"Get mitigation scopes error: %+v\", err)\n return\n }\n\n resource.SetCustomerId(&customer.Id)\n dup := isDuplicateMitigation(mids, *mid)\n\n if !dup {\n resource.ToRemovableResource()\n }\n\n // Enable removable for resource all if the last mitigation is expired\n if len(mids) == 1 && mids[0] == *mid && status == models.Terminated {\n uriPath := messages.MessageTypes[messages.MITIGATION_REQUEST].Path\n queryAll := uriPath + \"/cuid=\" + cuid\n resourceAll := context.GetResourceByQuery(&queryAll)\n if resourceAll != nil {\n resourceAll.ToRemovableResource()\n sizeBlock2 := resourceAll.GetSizeBlock2FromSubscribers()\n if sizeBlock2 >= 0 {\n resourceAll.SetIsBlockwiseInProgress(true)\n }\n }\n }\n}", "func (s *Suite) TestAttemptExpiration(c *check.C) {\n\tworkSpec, worker := s.makeWorkSpecAndWorker(c)\n\tworkUnit, err := workSpec.AddWorkUnit(\"a\", map[string]interface{}{}, 0.0)\n\tc.Assert(err, check.IsNil)\n\n\tattempts, err := worker.RequestAttempts(coordinate.AttemptRequest{})\n\tc.Assert(err, check.IsNil)\n\tc.Assert(attempts, check.HasLen, 1)\n\tattempt := attempts[0]\n\n\tstatus, err := attempt.Status()\n\tc.Assert(err, check.IsNil)\n\tc.Check(status, check.Equals, coordinate.Pending)\n\n\tattempts, err = worker.RequestAttempts(coordinate.AttemptRequest{})\n\tc.Assert(err, check.IsNil)\n\tc.Check(attempts, check.HasLen, 0)\n\n\t// There is a default expiration of 15 minutes (checked elsewhere)\n\t// So if we wait for, say, 20 minutes we should become expired\n\ts.Clock.Add(time.Duration(20) * time.Minute)\n\tstatus, err = attempt.Status()\n\tc.Assert(err, check.IsNil)\n\tc.Check(status, check.Equals, coordinate.Expired)\n\n\t// The work unit should be \"available\" for all purposes\n\tmeta, err := workSpec.Meta(true)\n\tc.Assert(err, check.IsNil)\n\tc.Check(meta.AvailableCount, check.Equals, 1)\n\tc.Check(meta.PendingCount, check.Equals, 0)\n\n\tuStatus, err := workUnit.Status()\n\tc.Assert(err, check.IsNil)\n\tc.Check(uStatus, check.Equals, coordinate.AvailableUnit)\n\n\t// If we request more attempts we should get back the expired\n\t// unit again\n\tattempts, err = worker.RequestAttempts(coordinate.AttemptRequest{})\n\tc.Assert(err, check.IsNil)\n\tc.Assert(attempts, check.HasLen, 1)\n\n\tstatus, err = attempts[0].Status()\n\tc.Assert(err, check.IsNil)\n\tc.Check(status, check.Equals, coordinate.Pending)\n}", "func (s *subscription) IsExpired() bool {\n\treturn s.ExpiresAt.Before(time.Now())\n}", "func (d *Result) HasExpiration() bool {\n\treturn d.withExpiration\n}", "func (p *Pictures) IsExpired() bool {\n\treturn time.Now().After(p.ExpiresAt)\n}", "func (m *ProviderTerms) Expired() bool {\n\treturn m.ExpiredAt < time.Now()+TermsExpiredDuration\n}", "func TestResume(t *testing.T) {\n\tdefer test.Guard(t)()\n\n\tt.Run(\"not expired\", func(t *testing.T) {\n\t\ttestResume(t, false, false, true)\n\t})\n\tt.Run(\"expired not revealed\", func(t *testing.T) {\n\t\ttestResume(t, true, false, false)\n\t})\n\tt.Run(\"expired revealed\", func(t *testing.T) {\n\t\ttestResume(t, true, true, true)\n\t})\n}", "func (b *Object) expired() bool {\n\tif b.expire <= 0 {\n\t\treturn false\n\t}\n\n\treturn time.Now().Unix() >= b.expire\n}", "func (s *Suite) TestAttemptExpiration() {\n\tsts := SimpleTestSetup{\n\t\tNamespaceName: \"TestAttemptExpiration\",\n\t\tWorkerName: \"worker\",\n\t\tWorkSpecName: \"spec\",\n\t\tWorkUnitName: \"a\",\n\t}\n\tsts.SetUp(s)\n\tdefer sts.TearDown(s)\n\n\tattempts, err := sts.Worker.RequestAttempts(coordinate.AttemptRequest{})\n\tif !(s.NoError(err) && s.Len(attempts, 1)) {\n\t\treturn\n\t}\n\tattempt := attempts[0]\n\n\tstatus, err := attempt.Status()\n\tif s.NoError(err) {\n\t\ts.Equal(coordinate.Pending, status)\n\t}\n\n\tsts.RequestNoAttempts(s)\n\n\t// There is a default expiration of 15 minutes (checked elsewhere)\n\t// So if we wait for, say, 20 minutes we should become expired\n\ts.Clock.Add(time.Duration(20) * time.Minute)\n\tstatus, err = attempt.Status()\n\tif s.NoError(err) {\n\t\ts.Equal(coordinate.Expired, status)\n\t}\n\n\t// The work unit should be \"available\" for all purposes\n\tmeta, err := sts.WorkSpec.Meta(true)\n\tif s.NoError(err) {\n\t\ts.Equal(1, meta.AvailableCount)\n\t\ts.Equal(0, meta.PendingCount)\n\t}\n\tsts.CheckUnitStatus(s, coordinate.AvailableUnit)\n\n\t// If we request more attempts we should get back the expired\n\t// unit again\n\tattempt = sts.RequestOneAttempt(s)\n\ts.AttemptStatus(coordinate.Pending, attempt)\n}", "func LiveVerifyExpired(obj *models.LiveVerify, appUIVersion string, appVersionCode int) (expired bool) {\n\tif obj == nil {\n\t\treturn true\n\t}\n\t//兼容老版本,老版本不需要做 活体认证的有效期限制\n\tif types.IndonesiaAppRipeVersionLiveVerify > appVersionCode {\n\t\treturn false\n\t}\n\n\tinterval, _ := config.ValidItemInt(types.LiveVerifyInterval)\n\tlogs.Debug(\"[service.account.LiveVerifyExpired] config interval:\", interval,\n\t\t\" obj.Ctime:\", obj.Ctime,\n\t\t\" tools.GetUnixMillis() - obj.Ctime:\", tools.GetUnixMillis()-obj.Ctime)\n\treturn (tools.GetUnixMillis() - obj.Ctime) > int64(interval*60*1000)\n}", "func HasExpired(cloudEvent map[string]interface{}) bool {\n\te, ok := cloudEvent[ExpirationField]\n\tif ok && e != \"\" {\n\t\texpiration, err := time.Parse(time.RFC3339, fmt.Sprintf(\"%s\", e))\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\n\t\treturn expiration.UTC().Before(time.Now().UTC())\n\t}\n\n\treturn false\n}", "func TestSendENIStateChangeExpired(t *testing.T) {\n\tmockCtrl := gomock.NewController(t)\n\tdefer mockCtrl.Finish()\n\n\tmockStateManager := mock_dockerstate.NewMockTaskEngineState(mockCtrl)\n\teventChannel := make(chan statechange.Event)\n\tctx := context.TODO()\n\n\tgomock.InOrder(\n\t\tmockStateManager.EXPECT().ENIByMac(randomMAC).Return(&ni.ENIAttachment{\n\t\t\tAttachmentInfo: attachmentinfo.AttachmentInfo{\n\t\t\t\tAttachStatusSent: false,\n\t\t\t\tExpiresAt: time.Now().Add(expirationTimeSubtraction),\n\t\t\t},\n\t\t\tMACAddress: randomMAC,\n\t\t}, true),\n\t\tmockStateManager.EXPECT().RemoveENIAttachment(randomMAC),\n\t)\n\n\twatcher := setupWatcher(ctx, nil, mockStateManager, eventChannel, primaryMAC)\n\n\tassert.Error(t, watcher.sendENIStateChange(randomMAC))\n}", "func (page RoleManagementPolicyListResultPage) NotDone() bool {\n\treturn !page.rmplr.IsEmpty()\n}", "func (me TQualificationStatus) IsRevoked() bool { return me.String() == \"Revoked\" }", "func shouldRefresh(now func() time.Time, notAfter, softNotAfter *tspb.Timestamp) (\n\tuseToken, useCreds bool, _ status.S) {\n\tif notAfter == nil {\n\t\treturn false, false, status.Internal(nil, \"missing not after\")\n\t}\n\tna, err := ptypes.Timestamp(notAfter)\n\tif err != nil {\n\t\treturn false, false, status.Internal(err, \"bad not after\")\n\t}\n\tts := now()\n\tif ts.After(na) {\n\t\treturn false, true, nil\n\t}\n\tif softNotAfter != nil {\n\t\tsna, err := ptypes.Timestamp(softNotAfter)\n\t\tif err != nil {\n\t\t\treturn false, false, status.Internal(err, \"bad soft not after\")\n\t\t}\n\t\tif ts.After(sna) {\n\t\t\treturn true, false, nil\n\t\t}\n\t}\n\treturn false, false, nil\n}", "func (me TrefreshModeEnumType) IsOnExpire() bool { return me == \"onExpire\" }", "func (o *MuteFindingResponseProperties) HasExpirationDate() bool {\n\treturn o != nil && o.ExpirationDate != nil\n}", "func (d *Driver) Expired() bool {\n\tif d.Expiration == 0 {\n\t\treturn false\n\t}\n\treturn time.Now().UnixNano() > d.Expiration\n}", "func isExpired(filename string) bool {\n\tvar t time.Time\n\tlast := timeFromFilename(filename)\n\n\t// exp time for prices dataset is 1 day\n\tif strings.Contains(filename, datasetPrices) {\n\t\tt = last.AddDate(0, 0, 1)\n\t}\n\n\t// exp time for stations dataset is 15 days\n\tif strings.Contains(filename, datasetStations) {\n\t\tt = last.AddDate(0, 0, 15)\n\t}\n\n\treturn t.Before(time.Now())\n}", "func (t *token) IsExpired() bool {\n\tif t == nil {\n\t\treturn true\n\t}\n\treturn t.Expired()\n}", "func IsTimeExpiredInTime(tt time.Time, offsetInSeconds float64) bool {\n\tremainder := tt.Sub(time.Now())\n\tlog.Info(\"remainder: %v calc : %v\", remainder, (remainder.Seconds() + offsetInSeconds))\n\n\treturn !((remainder.Seconds() + offsetInSeconds) > 0)\n}", "func (p *UserPendingPermissions) Expired(ttl time.Duration, now time.Time) bool {\n\treturn !now.Before(p.UpdatedAt.Add(ttl))\n}", "func (spit *Spit) RemainingExpiration() int {\r\n\treturn int(spit.DateCreatedTime().Add(time.Duration(spit.Exp)*time.Second).Unix()-\r\n\t\ttime.Now().UTC().Unix()) - 1\r\n}", "func (t *cachedToken) usable() bool {\n\treturn t.token != \"\" || time.Now().Add(30*time.Second).Before(t.expiry)\n}", "func TestInvalidCutOffPeriod(t *testing.T) {\n\n\tassert := assert.New(t)\n\n\ttrans := &transport{}\n\n\tobs, err := simpleSetup(trans, 0*time.Second, nil)\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n}", "func (j *Service) IsExpired(claims Claims) bool {\n\treturn !claims.VerifyExpiresAt(time.Now().Unix(), true)\n}", "func (m *Attachment) HasExpired() bool {\n\tvar validTime = m.SigningTime.Add(time.Duration(m.SigningMinutes) * time.Minute)\n\treturn validTime.Unix() < time.Now().Unix()\n}", "func (loc *LocInfo) Expired() bool {\n\tif time.Now().Before(loc.ValidUntil) {\n\t\treturn false\n\t}\n\treturn true\n}", "func (o *MuteFindingRequestProperties) HasExpirationDate() bool {\n\treturn o != nil && o.ExpirationDate != nil\n}", "func (page OdataProductResultPage) NotDone() bool {\n\treturn !page.opr.IsEmpty()\n}", "func (page AvailableSkusResultPage) NotDone() bool {\n\treturn !page.asr.IsEmpty()\n}", "func NoAlarm(s string) {}", "func BadOscillateTime(peakAccel, period float64) TimeFn {\n\t// todo (bs): let's see if I can inline cacheV here. \"mistakes\" like this\n\t// which are too dependent on sub-behavior like that can accidentally\n\t// disappear.\n\treturn func(t float64) float64 {\n\t\tcacheV := CacheDirectLookup(sinWaveCache, period*t)\n\t\treturn (1+(peakAccel/2))*t - peakAccel/(4*math.Pi*period)*cacheV\n\t}\n}", "func (c *CSRFStore) expired() bool {\n\treturn c.token == nil || time.Now().After(c.token.ExpiresAt)\n\n}" ]
[ "0.7152463", "0.61216784", "0.5975197", "0.59364676", "0.59358203", "0.59336835", "0.5894708", "0.58882517", "0.57942945", "0.57901603", "0.57711214", "0.57444674", "0.5736263", "0.5717031", "0.5687377", "0.5684727", "0.56167704", "0.5595198", "0.55641276", "0.55085486", "0.5462985", "0.54606014", "0.5442721", "0.5416922", "0.5381961", "0.5375618", "0.53732544", "0.5358615", "0.53541857", "0.5352581", "0.534775", "0.5309673", "0.5295682", "0.529095", "0.52904725", "0.52821934", "0.52799326", "0.5275708", "0.526344", "0.5255895", "0.52539575", "0.52526563", "0.5246999", "0.52343136", "0.5226376", "0.5218475", "0.52160776", "0.5210295", "0.5202812", "0.51997435", "0.51579344", "0.5138727", "0.5135128", "0.5130804", "0.5127194", "0.51224923", "0.51189375", "0.5117907", "0.5113859", "0.5112558", "0.5109658", "0.50984156", "0.50860006", "0.5074075", "0.506488", "0.5062342", "0.50537604", "0.50434035", "0.5032897", "0.5030963", "0.5030525", "0.50241154", "0.50147736", "0.5011294", "0.50108707", "0.49892598", "0.49780756", "0.49734303", "0.49674803", "0.4966009", "0.4960303", "0.4960241", "0.49601522", "0.49576706", "0.49563724", "0.49557328", "0.49511218", "0.4947359", "0.49447605", "0.49358094", "0.49352372", "0.49350464", "0.49314016", "0.4928627", "0.49208158", "0.49190193", "0.49119928", "0.49071962", "0.4905109", "0.4900189" ]
0.7433115
0
Callback sends the CallbackRequest type to the configured StatusCallbackUrl. If it fails to deliver in n attempts or the request is invalid it will return an error.
func Callback(cbReq *CallbackRequest, opts *CallbackOptions) error { client := opts.Client if client == nil { client = http.DefaultClient } buf := bytes.NewBuffer(nil) err := json.NewEncoder(buf).Encode(cbReq) if err != nil { return err } signature, err := opts.Signer.Sign(buf.Bytes()) if err != nil { return err } req, err := http.NewRequest("POST", cbReq.StatusCallbackUrl, buf) if err != nil { return err } req.Header.Set("X-OpenGDPR-Processor-Domain", opts.ProcessorDomain) req.Header.Set("X-OpenGDPR-Signature", signature) // Attempt to make callback for i := 0; i < opts.MaxAttempts; i++ { resp, err := client.Do(req) if err != nil || resp.StatusCode != 200 { time.Sleep(opts.Backoff) continue } // Success return nil } return fmt.Errorf("callback timed out for %s", cbReq.StatusCallbackUrl) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *Client) ServiceStatusWithCallback(request *ServiceStatusRequest, callback func(response *ServiceStatusResponse, err error)) (<-chan int) {\nresult := make(chan int, 1)\nerr := client.AddAsyncTask(func() {\nvar response *ServiceStatusResponse\nvar err error\ndefer close(result)\nresponse, err = client.ServiceStatus(request)\ncallback(response, err)\nresult <- 1\n})\nif err != nil {\ndefer close(result)\ncallback(nil, err)\nresult <- 0\n}\nreturn result\n}", "func (handler *HTTPCallBackHanlder) Callback(c echo.Context) error {\n\n\tctx := c.Request().Context()\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tevents, err := handler.Bot.ParseRequest(c.Request())\n\tif err != nil {\n\t\tif err == linebot.ErrInvalidSignature {\n\t\t\tc.String(400, linebot.ErrInvalidSignature.Error())\n\t\t} else {\n\t\t\tc.String(500, \"internal\")\n\t\t}\n\t}\n\n\tfor _, event := range events {\n\t\tif event.Type == linebot.EventTypeMessage {\n\t\t\tswitch message := event.Message.(type) {\n\t\t\tcase *linebot.TextMessage:\n\t\t\t\tmessageFromPing := servicemanagement.PingService(message.Text, handler.ServicesInfo, time.Second*5)\n\t\t\t\tif _, err = handler.Bot.ReplyMessage(event.ReplyToken, linebot.NewTextMessage(messageFromPing)).Do(); err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn c.JSON(200, \"\")\n}", "func (c *controller) Callback(ctx context.Context, request *web.Request) web.Result {\n\tif resp := c.service.callback(ctx, request); resp != nil {\n\t\treturn resp\n\t}\n\treturn c.responder.NotFound(errors.New(\"broker for callback not found\"))\n}", "func callback(\n\tservice models.DeviceService,\n\tid string,\n\taction string,\n\tactionType models.ActionType,\n\tlc logger.LoggingClient) error {\n\n\tclient := &http.Client{}\n\turl := service.Addressable.GetCallbackURL()\n\tif len(url) > 0 {\n\t\tbody, err := getBody(id, actionType)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq, err := http.NewRequest(string(action), url, bytes.NewReader(body))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq.Header.Add(clients.ContentType, clients.ContentTypeJSON)\n\n\t\tgo makeRequest(client, req, lc)\n\t} else {\n\t\tlc.Info(\"callback::no addressable for \" + service.Name)\n\t}\n\treturn nil\n}", "func CallbackHandler(callbackChan chan Callback) http.HandlerFunc {\n\treturn http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {\n\t\tvar cb Callback\n\t\terr := cb.Parse(req)\n\t\tif err != nil {\n\t\t\tresp.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tresp.WriteHeader(http.StatusOK)\n\t\t// start seperate goroutine to allow http request to return.\n\t\tgo func() {\n\t\t\tcallbackChan <- cb\n\t\t}()\n\t})\n}", "func (f *oauthFlow) Callback(r *http.Request) (string, error) {\n\n\tif states[r.FormValue(\"state\")] != true {\n\t\treturn \"\", ErrStateNotFound\n\t}\n\n\tdelete(states, r.FormValue(\"state\"))\n\n\ttype access_tokenResponse struct {\n\t\tAccessToken string `json:\"access_token\"`\n\t\tScope string `json:\"scope\"`\n\t\tTokenType string `json:\"token_type\"`\n\t\tGithubError\n\t}\n\n\tu, _ := url.Parse(f.BaseUrl)\n\tif u.Path == \"\" {\n\t\tu.Path = \"/\"\n\t}\n\n\tu.Path = path.Join(u.Path, \"login/oauth/access_token\")\n\n\tq := u.Query()\n\tq.Set(\"client_id\", f.ClientId)\n\tq.Set(\"client_secret\", f.ClientSecret)\n\tq.Set(\"code\", r.FormValue(\"code\"))\n\tu.RawQuery = q.Encode()\n\n\treq := &http.Request{\n\t\tMethod: \"POST\",\n\t\tURL: u,\n\t\tHeader: http.Header{\n\t\t\t\"Accept\": {\"application/json\"},\n\t\t},\n\t}\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tdefer resp.Body.Close()\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tjs := access_tokenResponse{}\n\terr = json.NewDecoder(resp.Body).Decode(&js)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif js.AccessToken == \"\" {\n\t\treturn \"\", &GithubError{js.RawError, js.ErrorDescription, js.ErrorUri}\n\t}\n\n\treturn js.AccessToken, nil\n}", "func Callback(state string, codeVerifier string, redirectURL string, oauth2config oauth2.Config, targetLabel string, finish chan bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer func() {\n\t\t\tfinish <- true\n\t\t}()\n\t\tvar msg string\n\t\tvar page string\n\n\t\t// checking state\n\t\tif state != r.URL.Query().Get(\"state\") {\n\t\t\tmsg = fmt.Sprintf(errorMarkup, \"Invalid state\")\n\t\t\tpage = fmt.Sprintf(callbackPage, msg)\n\t\t} else {\n\t\t\t// State OK, continue OpenID Connect Flow\n\t\t\tcode := r.URL.Query().Get(\"code\")\n\t\t\tctx := context.Background()\n\t\t\toauth2Token, err := oauth2config.Exchange(ctx, code, oauth2.SetAuthURLParam(\"code_verifier\", codeVerifier))\n\t\t\tif err != nil {\n\t\t\t\t// Exchange error\n\t\t\t\tmsg = fmt.Sprintf(errorMarkup, err.Error())\n\t\t\t\tpage = fmt.Sprintf(callbackPage, msg)\n\t\t\t} else {\n\t\t\t\t// Exchange success\n\t\t\t\tpage = fmt.Sprintf(callbackPage, successMarkup)\n\n\t\t\t\t// Storing tokens on current target\n\t\t\t\toauth2Token.AccessToken = oauth2Token.Extra(\"id_token\").(string)\n\t\t\t\terr = StorageTokens(targetLabel, *oauth2Token)\n\t\t\t\tif err != nil {\n\t\t\t\t\t// Exchange error\n\t\t\t\t\tmsg = fmt.Sprintf(errorMarkup, err.Error())\n\t\t\t\t\tpage = fmt.Sprintf(callbackPage, msg)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tw.Header().Add(\"Content-Type\", \"text/html\")\n\t\t_, err := w.Write([]byte(page))\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Client error writing callback page: (%s)\\n\", err.Error())\n\t\t}\n\t}\n}", "func CallbackHandler(c *gin.Context) {\n\n\t// Retrieve query params for state and code\n\tstate := c.Query(\"state\")\n\tcode := c.Query(\"code\")\n\t//2次進攻redirectURL\n\tresp, err := http.Post(\"https://app.asana.com/-/oauth_token\",\n\t\t\"application/x-www-form-urlencoded\",\n\t\tstrings.NewReader(\"grant_type=authorization_code&client_id=\"+clientID+\"&client_secret=\"+clientSecret+\"&redirect_uri=\"+redirectURL+\"&state=\"+state+\"&code=\"+code))\n\tif err != nil {\n\t\tutil.Error(err.Error())\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tutil.Error(\"resp error\")\n\t}\n\n\tServerslice1 := UserType{}\n\te := json.Unmarshal([]byte(body), &Serverslice1)\n\tif e != nil {\n\t\tutil.Error(e.Error())\n\t}\n\n\t// Save the username in the session\n\t//session.Set(userkey, Serverslice1.Data.Name)\n\n\t//fmt.Println(body)\n\n\t//rsp回來的資料\n\tutil.Info(string(\" > User \"+Serverslice1.Data.Name) + \" login ! \")\n\tc.Writer.Write([]byte(\"Hi, \" + string(Serverslice1.Data.Name)))\n}", "func (client *ClientRPCMethods) Status(in *string, response *ServerStatus) (err error) {\n\t*response = *client.client.callback.Status()\n\treturn nil\n}", "func (p *OAuthProxy) OAuthCallback(rw http.ResponseWriter, req *http.Request) {\n\t// We receive the callback from the SSO Authenticator. This request will either contain an\n\t// error, or it will contain a `code`; the code can be used to fetch an access token, and\n\t// other metadata, from the authenticator.\n\tlogger := log.NewLogEntry()\n\n\tremoteAddr := getRemoteAddr(req)\n\ttags := []string{\"action:callback\"}\n\n\t// finish the oauth cycle\n\terr := req.ParseForm()\n\tif err != nil {\n\t\tp.StatsdClient.Incr(\"application_error\", tags, 1.0)\n\t\tp.ErrorPage(rw, req, http.StatusInternalServerError, \"Internal Error\", err.Error())\n\t\treturn\n\t}\n\terrorString := req.Form.Get(\"error\")\n\tif errorString != \"\" {\n\t\ttags = append(tags, \"error:callback_error_exists\")\n\t\tp.StatsdClient.Incr(\"application_error\", tags, 1.0)\n\t\tp.ErrorPage(rw, req, http.StatusForbidden, \"Permission Denied\", errorString)\n\t\treturn\n\t}\n\n\t// We begin the process of redeeming the code for an access token.\n\tsession, err := p.redeemCode(req.Host, req.Form.Get(\"code\"))\n\tif err != nil {\n\t\ttags = append(tags, \"error:redeem_code_error\")\n\t\tp.StatsdClient.Incr(\"provider_error\", tags, 1.0)\n\t\tlogger.WithRemoteAddress(remoteAddr).Error(\n\t\t\terr, \"error redeeming authorization code\")\n\t\tp.ErrorPage(rw, req, http.StatusInternalServerError, \"Internal Error\", \"Internal Error\")\n\t\treturn\n\t}\n\n\tencryptedState := req.Form.Get(\"state\")\n\tstateParameter := &StateParameter{}\n\terr = p.cookieCipher.Unmarshal(encryptedState, stateParameter)\n\tif err != nil {\n\t\ttags = append(tags, \"error:state_parameter_error\")\n\t\tp.StatsdClient.Incr(\"application_error\", tags, 1.0)\n\t\tlogger.WithRemoteAddress(remoteAddr).Error(\n\t\t\terr, \"could not unmarshal state parameter value\")\n\t\tp.ErrorPage(rw, req, http.StatusInternalServerError, \"Internal Error\", \"Internal Error\")\n\t\treturn\n\t}\n\n\tc, err := p.csrfStore.GetCSRF(req)\n\tif err != nil {\n\t\ttags = append(tags, \"error:csrf_cookie_error\")\n\t\tp.StatsdClient.Incr(\"application_error\", tags, 1.0)\n\t\tp.ErrorPage(rw, req, http.StatusBadRequest, \"Bad Request\", err.Error())\n\t\treturn\n\t}\n\n\tencryptedCSRF := c.Value\n\tcsrfParameter := &StateParameter{}\n\terr = p.cookieCipher.Unmarshal(encryptedCSRF, csrfParameter)\n\tif err != nil {\n\t\ttags = append(tags, \"error:csrf_parameter_error\")\n\t\tp.StatsdClient.Incr(\"application_error\", tags, 1.0)\n\t\tlogger.WithRemoteAddress(remoteAddr).Error(\n\t\t\terr, \"couldn't unmarshal CSRF parameter value\")\n\t\tp.ErrorPage(rw, req, http.StatusInternalServerError, \"Internal Error\", \"Internal Error\")\n\t\treturn\n\t}\n\n\tif encryptedState == encryptedCSRF {\n\t\ttags = append(tags, \"error:equal_encrypted_state_and_csrf\")\n\t\tp.StatsdClient.Incr(\"application_error\", tags, 1.0)\n\t\tlogger.WithRemoteAddress(remoteAddr).Info(\n\t\t\t\"encrypted state value and encrypted CSRF value are unexpectedly equal\")\n\t\tp.ErrorPage(rw, req, http.StatusBadRequest, \"Bad Request\", \"Bad Request\")\n\t\treturn\n\t}\n\n\tif !reflect.DeepEqual(stateParameter, csrfParameter) {\n\t\ttags = append(tags, \"error:state_csrf_mismatch\")\n\t\tp.StatsdClient.Incr(\"application_error\", tags, 1.0)\n\t\tlogger.WithRemoteAddress(remoteAddr).Info(\n\t\t\t\"state parameter and CSRF parameters are unexpectedly not equal\")\n\t\tp.ErrorPage(rw, req, http.StatusBadRequest, \"Bad Request\", \"Bad Request\")\n\t\treturn\n\t}\n\n\t// We validate the user information, and check that this user has proper authorization\n\t// for the resources requested.\n\t//\n\t// set cookie, or deny\n\n\terrors := validators.RunValidators(p.Validators, session)\n\tif len(errors) == len(p.Validators) {\n\t\ttags = append(tags, \"error:validation_failed\")\n\t\tp.StatsdClient.Incr(\"application_error\", tags, 1.0)\n\t\tlogger.WithRemoteAddress(remoteAddr).WithUser(session.Email).Info(\n\t\t\tfmt.Sprintf(\"permission denied: unauthorized: %q\", errors))\n\n\t\tformattedErrors := make([]string, 0, len(errors))\n\t\tfor _, err := range errors {\n\t\t\tformattedErrors = append(formattedErrors, err.Error())\n\t\t}\n\t\terrorMsg := fmt.Sprintf(\"We ran into some issues while validating your account: \\\"%s\\\"\",\n\t\t\tstrings.Join(formattedErrors, \", \"))\n\t\tp.ErrorPage(rw, req, http.StatusForbidden, \"Permission Denied\", errorMsg)\n\t\treturn\n\t}\n\n\tlogger.WithRemoteAddress(remoteAddr).WithUser(session.Email).WithInGroups(session.Groups).Info(\n\t\tfmt.Sprintf(\"oauth callback: user validated \"))\n\n\t// We add the request host into the session to allow us to validate that each request has\n\t// been authorized for the upstream it's requesting.\n\t// e.g. if a request is authenticated while trying to reach 'foo' upstream, it should not\n\t// automatically be seen as authorized with 'bar' upstream. Each upstream may set different\n\t// validators, so the request should be reauthenticated.\n\tsession.AuthorizedUpstream = req.Host\n\n\t// We store the session in a cookie and redirect the user back to the application\n\terr = p.sessionStore.SaveSession(rw, req, session)\n\tif err != nil {\n\t\ttags = append(tags, \"error:save_session_error\")\n\t\tp.StatsdClient.Incr(\"application_error\", tags, 1.0)\n\t\tlogger.WithRemoteAddress(remoteAddr).Error(err, \"error saving session\")\n\t\tp.ErrorPage(rw, req, http.StatusInternalServerError, \"Internal Error\", \"Internal Error\")\n\t\treturn\n\t}\n\n\t// Now that we know the request and user is valid, clear the CSRF token\n\tp.csrfStore.ClearCSRF(rw, req)\n\n\t// This is the redirect back to the original requested application\n\thttp.Redirect(rw, req, stateParameter.RedirectURI, http.StatusFound)\n}", "func (app *testbot) Callback(w http.ResponseWriter, r *http.Request) {\n\tevents, err := app.bot.ParseRequest(r)\n\tif err != nil {\n\t\tif err == linebot.ErrInvalidSignature {\n\t\t\tw.WriteHeader(400)\n\t\t} else {\n\t\t\tw.WriteHeader(500)\n\t\t}\n\t\treturn\n\t}\n\tfor _, event := range events {\n\t\tlog.Printf(\"Got event %v\", event)\n\t\tswitch event.Type {\n\t\tcase linebot.EventTypeMessage:\n\t\t\tswitch message := event.Message.(type) {\n\t\t\tcase *linebot.TextMessage:\n\t\t\t\tif err := app.handleText(message, event.ReplyToken, event.Source); err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t}\n\t\t\tcase *linebot.ImageMessage:\n\t\t\t\tif err := app.handleImage(message, event.ReplyToken); err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t}\n\t\t\tcase *linebot.VideoMessage:\n\t\t\t\tif err := app.handleVideo(message, event.ReplyToken); err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t}\n\t\t\tcase *linebot.AudioMessage:\n\t\t\t\tif err := app.handleAudio(message, event.ReplyToken); err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t}\n\t\t\tcase *linebot.LocationMessage:\n\t\t\t\tif err := app.handleLocation(message, event.ReplyToken); err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t}\n\t\t\tcase *linebot.StickerMessage:\n\t\t\t\tif err := app.handleSticker(message, event.ReplyToken); err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tlog.Printf(\"Unknown message: %v\", message)\n\t\t\t}\n\t\tcase linebot.EventTypeFollow:\n\t\t\tif err := app.replyText(event.ReplyToken, \"Got followed event\"); err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t}\n\t\tcase linebot.EventTypeUnfollow:\n\t\t\tlog.Printf(\"Unfollowed this bot: %v\", event)\n\t\tcase linebot.EventTypeJoin:\n\t\t\tif err := app.replyText(event.ReplyToken, \"Joined \"+string(event.Source.Type)); err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t}\n\t\tcase linebot.EventTypeLeave:\n\t\t\tlog.Printf(\"Left: %v\", event)\n\n\t\tcase linebot.EventTypeBeacon:\n\t\t\tif err := app.replyText(event.ReplyToken, \"Got beacon: \"+event.Beacon.Hwid); err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tlog.Printf(\"Unknown event: %v\", event)\n\t\t}\n\t}\n}", "func CallbackHandler(callbackChan chan Callback) http.HandlerFunc {\n\treturn http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {\n\t\tvar cb Callback\n\t\tcb.Parse(req)\n\t\tresp.WriteHeader(http.StatusOK)\n\t\tgo func() {\n\t\t\tcallbackChan <- cb\n\t\t}()\n\t})\n}", "func Callback(w http.ResponseWriter, r *http.Request) {\n\tcode := ParseResponse(w, r)\n\taccess := AccessToken(code, w, r)\n\tfmt.Fprintf(w, access.Token)\n\tGetData(access.Token, w, r)\n}", "func (client *Client) QueryCustomerAddressListWithCallback(request *QueryCustomerAddressListRequest, callback func(response *QueryCustomerAddressListResponse, err error)) (<-chan int) {\nresult := make(chan int, 1)\nerr := client.AddAsyncTask(func() {\nvar response *QueryCustomerAddressListResponse\nvar err error\ndefer close(result)\nresponse, err = client.QueryCustomerAddressList(request)\ncallback(response, err)\nresult <- 1\n})\nif err != nil {\ndefer close(result)\ncallback(nil, err)\nresult <- 0\n}\nreturn result\n}", "func (c *SearchCall) Callback(callback string) *SearchCall {\n\tc.urlParams_.Set(\"callback\", callback)\n\treturn c\n}", "func (client *Client) OperateLorneTaskStatusWithCallback(request *OperateLorneTaskStatusRequest, callback func(response *OperateLorneTaskStatusResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *OperateLorneTaskStatusResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.OperateLorneTaskStatus(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (s *Service) GetStatus(ctx context.Context, req *request.Status) (*response.Status, error) {\n\tres := &response.Status{Callback: req.Callback}\n\tif req.Callback == \"I don't like launch pad\" {\n\t\treturn res, errors.New(\"launch pad is the best and you know it\")\n\t}\n\treturn res, nil\n}", "func Callback(c *gin.Context) {\n\tprovider := c.Param(\"provider\")\n\n\tvar logincode vo.LoginReq\n\tif err := c.ShouldBindQuery(&logincode); err != nil {\n\t\tfmt.Println(\"xxxx\", err)\n\t}\n\n\tfmt.Println(\"provider\", provider, logincode)\n\n\tuserInfo := vo.GetUserInfoFromOauth(provider, logincode.Code, logincode.State)\n\tfmt.Println(\"get user info\", userInfo)\n\n\tif userInfo == nil {\n\t\tc.JSON(http.StatusOK, sailor.HTTPAirdbResponse{\n\t\t\tCode: enum.AirdbSuccess,\n\t\t\tSuccess: true,\n\t\t\tData: vo.LoginResp{\n\t\t\t\tNickname: \"xxx\",\n\t\t\t\tHeadimgurl: \"xxx.png\",\n\t\t\t},\n\t\t})\n\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, sailor.HTTPAirdbResponse{\n\t\tCode: enum.AirdbSuccess,\n\t\tSuccess: true,\n\t\tData: vo.LoginResp{\n\t\t\tNickname: userInfo.Login,\n\t\t\tHeadimgurl: userInfo.AvatarURL,\n\t\t},\n\t})\n}", "func parseCallback(r *http.Request, cb *callback) (int, error) {\n\tdefer func() {\n\t\terr := r.Body.Close()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"body close: %v\", err)\n\t\t}\n\t}()\n\n\tcontentType := r.Header.Get(\"Content-Type\")\n\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tswitch contentType {\n\t\tcase \"application/json\":\n\t\t\t// unmarshall JSON to callback struct\n\t\t\tdecoder := json.NewDecoder(r.Body)\n\t\t\tif err := decoder.Decode(cb); err != nil {\n\t\t\t\treturn http.StatusBadRequest, err\n\t\t\t}\n\n\t\tdefault:\n\t\t\treturn http.StatusBadRequest, fmt.Errorf(\"Unsupported content type: %s\", contentType)\n\t\t}\n\n\tdefault:\n\t\treturn http.StatusMethodNotAllowed, fmt.Errorf(\"Unsupported request method: %s\", r.Method)\n\t}\n\n\treturn 0, nil\n}", "func (h *GitHubOAuth) Callback(c *router.Control) {\n\tstate := c.Get(\"state\")\n\tcode := c.Get(\"code\")\n\n\tif state != h.state {\n\t\th.log.Errorf(\"Wrong state %s with code %s\", state, code)\n\t\thttp.Redirect(c.Writer, c.Request, \"/\", http.StatusMovedPermanently)\n\t\treturn\n\t}\n\n\tctx := context.Background()\n\ttoken, err := h.oAuthConf.Exchange(ctx, code)\n\n\tif err != nil {\n\t\th.log.Errorf(\"Exchange failed for code %s: %+v\", code, err)\n\t\thttp.Redirect(c.Writer, c.Request, \"/\", http.StatusMovedPermanently)\n\t\treturn\n\t}\n\n\toauthClient := h.oAuthConf.Client(ctx, token)\n\tgithubClient := ghClient.NewClient(oauthClient)\n\tuser, _, err := githubClient.Users.Get(ctx, \"\")\n\tif err != nil || user.Login == nil {\n\t\th.log.Errorf(\"Couldn't get user for code %s: %+v\", code, err)\n\t\thttp.Redirect(c.Writer, c.Request, \"/\", http.StatusMovedPermanently)\n\t\treturn\n\t}\n\n\th.log.WithField(\"user\", *user.Login).Info(\"GitHub user was authorized in oauth-proxy\")\n\n\tsessionData := session.NewSessionOptions(&session.SessOptions{\n\t\tCAttrs: map[string]interface{}{\"Login\": *user.Login, \"Source\": models.SourceGitHub},\n\t\tAttrs: map[string]interface{}{\"Activated\": false, \"HasError\": false},\n\t})\n\tsession.Add(sessionData, c.Writer)\n\n\tgo h.syncUser(*user.Login, sessionData, c.Writer)\n\n\thttp.Redirect(c.Writer, c.Request, \"/\", http.StatusMovedPermanently)\n}", "func (s *Service) SendCallbackRetry(c context.Context, cb *model.Callback, payload *model.Payload) (err error) {\n\tattempts := 3\n\tfor i := 0; i < attempts; i++ {\n\t\terr = s.dao.SendCallback(c, cb, payload)\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\n\t\tif i >= (attempts - 1) {\n\t\t\tbreak\n\t\t}\n\n\t\ttime.Sleep(time.Second * 1)\n\t}\n\treturn errors.Wrapf(err, \"after %d attempts\", attempts)\n}", "func CallbackUrl(val string) Argument {\n\treturn func(request *requests.Request) error {\n\t\tif len(val) > 256 {\n\t\t\treturn errors.New(\"callback url too long, max 256 caracteres\")\n\t\t}\n\t\trequest.AddArgument(\"callback_url\", val)\n\t\treturn nil\n\t}\n}", "func CallbackHandler(c *gin.Context) {\n\t// Retrieve query params for state and code\n\tstate := c.Query(\"state\")\n\tcode := c.Query(\"code\")\n\tprovider := c.Param(\"provider\")\n\n\t// Handle callback and check for errors\n\tuser, _, err := config.Gocial.Handle(state, code)\n\tif err != nil {\n\t\tc.Writer.Write([]byte(\"Error: \" + err.Error()))\n\t\treturn\n\t}\n\n\tvar newUser = getOrRegisterUser(provider, user)\n\tvar jtwToken = createToken(&newUser)\n\n\tc.JSON(200, gin.H{\n\t\t\"data\": newUser,\n\t\t\"token\": jtwToken,\n\t\t\"message\": \"berhasil login\",\n\t})\n}", "func CallbackHandler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"In CallbackHandler!\")\n\tfmt.Println(r.URL.Query().Get(\"hub.verify_token\"))\n\tif r.URL.Query().Get(\"hub.verify_token\") == cfg.Instagram.Verify {\n\t\tfmt.Println(\"verify hihi OK!!\")\n\t\tw.Write([]byte(r.URL.Query().Get(\"hub.challenge\")))\n\t} else {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t}\n}", "func Status(expected int) echo.Checker {\n\texpectedStr := \"\"\n\tif expected > 0 {\n\t\texpectedStr = strconv.Itoa(expected)\n\t}\n\treturn Each(func(r echoClient.Response) error {\n\t\tif r.Code != expectedStr {\n\t\t\treturn fmt.Errorf(\"expected response code `%s`, got %q. Response: %s\", expectedStr, r.Code, r)\n\t\t}\n\t\treturn nil\n\t})\n}", "func oauthCallbackHandler(w http.ResponseWriter, r *http.Request) {\n\ttransport := &oauth.Transport{Config: &oauthProviderConfig.oauthConfig}\n\ttransport.Exchange(r.FormValue(\"code\"))\n\tclient := transport.Client()\n\tresponse, err := client.Get(oauthProviderConfig.UserInfoAPI)\n\tif err != nil {\n\t\tlog.Printf(\"Error while contacting '%s': %s\\n\", oauthProviderConfig.UserInfoAPI, err)\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Error while parsing response from '%s': %s\\n\", oauthProviderConfig.UserInfoAPI, err)\n\t\thttp.Error(w, http.StatusText(http.StatusBadGateway), http.StatusBadGateway)\n\t\treturn\n\t}\n\tresponse.Body.Close()\n\tauthorized, email := isAuthorized(body)\n\tif authorized {\n\t\tauthorizeEmail(email, w, r)\n\t\tlog.Println(\"User\", email, \"logged in\")\n\t\tsession, _ := store.Get(r, serverConfig.CookieName)\n\t\tif next, ok := session.Values[\"next\"]; ok {\n\t\t\thttp.Redirect(w, r, next.(string), http.StatusFound)\n\t\t}\n\t} else {\n\t\tlog.Println(\"Access Denied: Couldn't match an email address in the server response.\")\n\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t}\n}", "func determineStatus(out map[string]Status) int {\n\tstatus := http.StatusOK\n\n\tfor _, evt := range out {\n\t\tif len(evt.Error) > 0 {\n\t\t\tstatus = http.StatusServiceUnavailable\n\t\t\tbreak\n\t\t}\n\t\tif !evt.Ready {\n\t\t\tstatus = http.StatusAccepted\n\t\t}\n\t}\n\n\treturn status\n}", "func (c *Channel) ValidateCallback(r *http.Request) bool {\n\tif c.token != \"\" {\n\t\treqToken := r.Header.Get(\"Authorization\")\n\t\treqToken = strings.TrimPrefix(reqToken, \"Bearer \")\n\n\t\tif c.token != reqToken {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (f *Friend) RequestStatus() int {\n\treturn f.status\n}", "func startCallbackServer(port string, redirectURI string, callback func(w http.ResponseWriter, r *http.Request) (interface{}, error)) (interface{}, error) {\n\t// Set a channel we will block on and wait for the result.\n\tresultCh := make(chan CallbackResult)\n\n\t// Setup the server.\n\tm := http.NewServeMux()\n\ts := &http.Server{Addr: \":\" + port, Handler: m}\n\n\t// This is the handler for the path we specified, it calls the provided\n\t// callback as soon as a request arrives and moves the result of the callback\n\t// on to the resultCh.\n\tm.HandleFunc(redirectURI, func(w http.ResponseWriter, r *http.Request) {\n\t\t// Got a response, call the callback function.\n\t\ti, err := callback(w, r)\n\t\tresultCh <- CallbackResult{i, err}\n\t})\n\n\t// Start the server\n\tgo startServer(s)\n\n\t// Block till the callback gives us a result.\n\tr := <-resultCh\n\n\t// Shutdown the server.\n\ts.Shutdown(context.Background())\n\n\t// Return the result.\n\treturn r.Interface, r.Error\n}", "func (h *Handler) CallbackHandler(w http.ResponseWriter, r *http.Request, p webapi.CallbackParams) error {\n\tswitch p.Type {\n\tcase gravityLoginAction: // login via tele login\n\t\turl, err := h.constructConsoleResponse(p.ClientRedirectURL, p.Username)\n\t\tif err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t\thttp.Redirect(w, r, url.String(), http.StatusFound)\n\t\treturn nil\n\tdefault: // call the base (open-source) handler for web sign in\n\t\treturn h.Handler.CallbackHandler(w, r, p)\n\t}\n}", "func (o *KnoxCallbackDefault) Code() int {\n\treturn o._statusCode\n}", "func Status(r *http.Request, status int) { helpers.Status(r, status) }", "func (p *Proxy) nginxCallback(w http.ResponseWriter, r *http.Request) error {\n\tencryptedSession := r.FormValue(urlutil.QuerySessionEncrypted)\n\tif _, err := p.saveCallbackSession(w, r, encryptedSession); err != nil {\n\t\treturn httputil.NewError(http.StatusBadRequest, err)\n\t}\n\treturn httputil.NewError(http.StatusUnauthorized, errors.New(\"mock error to restart redirect flow\"))\n}", "func (s *Server) HandlerCallback(w http.ResponseWriter, r *http.Request) {\n\tsession, err := s.SessionStore.Get(r, s.Config.SessionName)\n\tif err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": err.Error(),\n\t\t}).Error(\"failed to get session\")\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tvar val interface{}\n\tvar ok bool\n\n\tvar provider string\n\tval = session.Values[\"provider\"]\n\tif provider, ok = val.(string); !ok {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": \"provider is not found\",\n\t\t}).Info(\"session is broken.\")\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar callback string\n\tval = session.Values[\"callback\"]\n\tif callback, ok = val.(string); !ok {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": \"callback is not found\",\n\t\t}).Info(\"session is broken.\")\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar next string\n\tval = session.Values[\"next\"]\n\tif next, ok = val.(string); !ok {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": \"next is not found\",\n\t\t}).Info(\"session is broken.\")\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar state string\n\tval = session.Values[\"state\"]\n\tif state, ok = val.(string); !ok {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": \"state is not found\",\n\t\t}).Info(\"session is broken.\")\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tproviderConfig, ok := s.ProviderConfigs[provider]\n\tif !ok {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": \"provider is not found\",\n\t\t}).Info(\"session is broken.\")\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\tconf := providerConfig.Config()\n\tconf.RedirectURL = callback\n\n\tquery := r.URL.Query()\n\n\tif state != query.Get(\"state\") {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": \"state is not correct\",\n\t\t}).Info(\"session is broken.\")\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tcode := query.Get(\"code\")\n\tt, err := conf.Exchange(r.Context(), code)\n\tif err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": err.Error(),\n\t\t}).Info(\"oauth verification failed\")\n\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t\treturn\n\t}\n\n\tvar uid string\n\tvar info map[string]interface{}\n\tif infoctx, ok := providerConfig.(ProviderInfoContext); ok {\n\t\tuid, info, err = infoctx.InfoContext(r.Context(), &conf, t)\n\t} else {\n\t\tuid, info, err = providerConfig.Info(&conf, t)\n\t}\n\tif err != nil {\n\t\tif err == ErrForbidden {\n\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\"err\": err.Error(),\n\t\t\t}).Warn(\"access forbidden\")\n\t\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t\t} else {\n\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\"err\": err.Error(),\n\t\t\t}).Warn(\"user info cannot get\")\n\t\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\tjsonInfo, encodedInfo, err := encodeInfo(info)\n\tif err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": err.Error(),\n\t\t}).Info(\"failed encoding info\")\n\t}\n\n\tsession.Values[\"uid\"] = uid\n\tsession.Values[\"info\"] = encodedInfo\n\tsession.Values[\"logged_in_at\"] = time.Now()\n\n\tif err := session.Save(r, w); err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": err.Error(),\n\t\t}).Error(\"failed to save session\")\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"user\": uid,\n\t\t\"info\": jsonInfo,\n\t}).Info(\"user login\")\n\n\thttp.Redirect(w, r, next, http.StatusFound)\n}", "func handleCallback(w http.ResponseWriter, r *http.Request) {\n\t// in the real world you should check the state query parameter, but this is omitted for brevity reasons.\n\n\t// Exchange the access code for an access (and optionally) a refresh token\n\ttoken, err := client.GetOAuth2Config().Exchange(context.Background(), r.URL.Query().Get(\"code\"))\n\tif err != nil {\n\t\thttp.Error(w, errors.Wrap(err, \"Could not exhange token\").Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Render the output\n\trenderTemplate(w, \"callback.html\", struct {\n\t\t*oauth2.Token\n\t\tIDToken interface{}\n\t}{\n\t\tToken: token,\n\t\tIDToken: token.Extra(\"id_token\"),\n\t})\n}", "func StatusHandler(srv *httpServ) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif !validMethod(w, r, srv.Gateway, []string{http.MethodGet}) {\n\t\t\treturn\n\t\t}\n\n\t\tskyAddr := r.URL.Query().Get(\"skyaddr\")\n\t\tif skyAddr == \"\" {\n\t\t\terrorResponse(w, srv.Gateway, http.StatusBadRequest, \"Missing skyaddr\")\n\t\t\treturn\n\t\t}\n\n\t\tif !verifySkycoinAddress(w, srv.Gateway, skyAddr) {\n\t\t\treturn\n\t\t}\n\n\t\tif !readyToStart(w, srv.Gateway, srv.StartAt) {\n\t\t\treturn\n\t\t}\n\n\t\tcxt, cancel := context.WithTimeout(r.Context(), proxyRequestTimeout)\n\t\tdefer cancel()\n\n\t\tstReq := daemon.StatusRequest{SkyAddress: skyAddr}\n\n\t\tsrv.Println(\"Sending StatusRequest to teller, skyaddr\", skyAddr)\n\n\t\trsp, err := srv.Gateway.GetDepositStatuses(cxt, &stReq)\n\t\tif err != nil {\n\t\t\thandleGatewayResponseError(w, srv.Gateway, err)\n\t\t\treturn\n\t\t}\n\n\t\tsrv.Printf(\"Received response to StatusRequest: %+v\\n\", *rsp)\n\n\t\tif rsp.Error != \"\" {\n\t\t\thttputil.ErrResponse(w, http.StatusBadRequest, rsp.Error)\n\t\t\tsrv.Println(rsp.Error)\n\t\t\treturn\n\t\t}\n\n\t\tif err := httputil.JSONResponse(w, makeStatusHTTPResponse(*rsp)); err != nil {\n\t\t\tsrv.Println(err)\n\t\t}\n\t}\n}", "func (p *Proxy) OAuthCallback(w http.ResponseWriter, r *http.Request) {\n\terr := r.ParseForm()\n\tif err != nil {\n\t\tlog.FromRequest(r).Error().Err(err).Msg(\"proxy: failed parsing request form\")\n\t\thttputil.ErrorResponse(w, r, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\terrorString := r.Form.Get(\"error\")\n\tif errorString != \"\" {\n\t\thttputil.ErrorResponse(w, r, errorString, http.StatusForbidden)\n\t\treturn\n\t}\n\t// We begin the process of redeeming the code for an access token.\n\tsession, err := p.AuthenticateClient.Redeem(r.Context(), r.Form.Get(\"code\"))\n\tif err != nil {\n\t\tlog.FromRequest(r).Error().Err(err).Msg(\"proxy: error redeeming authorization code\")\n\t\thttputil.ErrorResponse(w, r, \"Internal error\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tencryptedState := r.Form.Get(\"state\")\n\tstateParameter := &StateParameter{}\n\terr = p.cipher.Unmarshal(encryptedState, stateParameter)\n\tif err != nil {\n\t\tlog.FromRequest(r).Error().Err(err).Msg(\"proxy: could not unmarshal state\")\n\t\thttputil.ErrorResponse(w, r, \"Internal error\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tc, err := p.csrfStore.GetCSRF(r)\n\tif err != nil {\n\t\tlog.FromRequest(r).Error().Err(err).Msg(\"proxy: failed parsing csrf cookie\")\n\t\thttputil.ErrorResponse(w, r, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tp.csrfStore.ClearCSRF(w, r)\n\n\tencryptedCSRF := c.Value\n\tcsrfParameter := &StateParameter{}\n\terr = p.cipher.Unmarshal(encryptedCSRF, csrfParameter)\n\tif err != nil {\n\t\tlog.FromRequest(r).Error().Err(err).Msg(\"proxy: couldn't unmarshal CSRF\")\n\t\thttputil.ErrorResponse(w, r, \"Internal error\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif encryptedState == encryptedCSRF {\n\t\tlog.FromRequest(r).Error().Msg(\"encrypted state and CSRF should not be equal\")\n\t\thttputil.ErrorResponse(w, r, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tif !reflect.DeepEqual(stateParameter, csrfParameter) {\n\t\tlog.FromRequest(r).Error().Msg(\"state and CSRF should be equal\")\n\t\thttputil.ErrorResponse(w, r, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// We store the session in a cookie and redirect the user back to the application\n\terr = p.sessionStore.SaveSession(w, r, session)\n\tif err != nil {\n\t\tlog.FromRequest(r).Error().Msg(\"error saving session\")\n\t\thttputil.ErrorResponse(w, r, \"Error saving session\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tlog.FromRequest(r).Debug().\n\t\tStr(\"code\", r.Form.Get(\"code\")).\n\t\tStr(\"state\", r.Form.Get(\"state\")).\n\t\tStr(\"RefreshToken\", session.RefreshToken).\n\t\tStr(\"session\", session.AccessToken).\n\t\tStr(\"RedirectURI\", stateParameter.RedirectURI).\n\t\tMsg(\"session\")\n\n\t// This is the redirect back to the original requested application\n\thttp.Redirect(w, r, stateParameter.RedirectURI, http.StatusFound)\n}", "func (ctr *RegistRequestController) UpdateRequestStatus(c echo.Context) error {\n\tuserProfile := c.Get(\"user_profile\").(m.User)\n\n\tacceptOrDenyParam := new(param.UpdateRequestStatusParams)\n\n\tif err := c.Bind(acceptOrDenyParam); err != nil {\n\t\tmsgErrBind := err.Error()\n\t\tfieldErr := utils.GetFieldBindForm(msgErrBind)\n\n\t\tif fieldErr != \"\" {\n\t\t\treturn c.JSON(http.StatusOK, cf.JsonResponse{\n\t\t\t\tStatus: cf.FailResponseCode,\n\t\t\t\tMessage: \"Invalid field \" + fieldErr,\n\t\t\t})\n\t\t}\n\t}\n\n\trequestObj, err := ctr.RegistRequestRepo.GetRegRequestsByID(acceptOrDenyParam.RequestID)\n\tif err != nil {\n\t\tif err.Error() == pg.ErrNoRows.Error() {\n\t\t\treturn c.JSON(http.StatusOK, cf.JsonResponse{\n\t\t\t\tStatus: cf.FailResponseCode,\n\t\t\t\tMessage: \"Request is not exists\",\n\t\t\t})\n\t\t}\n\n\t\treturn c.JSON(http.StatusInternalServerError, cf.JsonResponse{\n\t\t\tStatus: cf.FailResponseCode,\n\t\t\tMessage: \"System error\",\n\t\t})\n\t}\n\n\torg, err := ctr.OrgRepo.SelectEmailAndPassword(userProfile.OrganizationID)\n\tif err != nil {\n\t\treturn c.JSON(http.StatusInternalServerError, cf.JsonResponse{\n\t\t\tStatus: cf.FailResponseCode,\n\t\t\tMessage: \"System error\",\n\t\t})\n\t}\n\tif org.Email == \"\" || org.EmailPassword == \"\" {\n\t\treturn c.JSON(http.StatusUnprocessableEntity, cf.JsonResponse{\n\t\t\tStatus: cf.FailResponseCode,\n\t\t\tMessage: \"Your organization must have an email\",\n\t\t})\n\t}\n\tctr.InitSmtp(org.Email, org.EmailPassword)\n\n\tacceptOrDenyParam.Email = requestObj.Email\n\n\tif acceptOrDenyParam.Status == cf.AcceptRequestStatus {\n\t\tregCode, err := ctr.RegistRequestRepo.AcceptRequest(ctr.RegCodeRepo, acceptOrDenyParam)\n\n\t\tif err != nil {\n\t\t\treturn c.JSON(http.StatusInternalServerError, cf.JsonResponse{\n\t\t\t\tStatus: cf.FailResponseCode,\n\t\t\t\tMessage: \"System error\",\n\t\t\t})\n\t\t}\n\n\t\tsampleData := new(param.SampleData)\n\t\tsampleData.SendTo = []string{acceptOrDenyParam.Email}\n\t\tsampleData.URL = os.Getenv(\"BASE_SPA_URL\") + \"/organization/create-organization/\" + regCode.Code\n\t\tif err := ctr.SendMail(\"Micro Erp Registration Email\", sampleData, cf.CreateOrganizationTemplate); err != nil {\n\t\t\tctr.Logger.Error(err)\n\t\t\treturn c.JSON(http.StatusInternalServerError, cf.JsonResponse{\n\t\t\t\tStatus: cf.FailResponseCode,\n\t\t\t\tMessage: \"System error\",\n\t\t\t})\n\t\t}\n\t} else if acceptOrDenyParam.Status == cf.DenyRequestStatus {\n\t\terr = ctr.RegistRequestRepo.DenyRequest(acceptOrDenyParam)\n\n\t\tif err != nil {\n\t\t\treturn c.JSON(http.StatusInternalServerError, cf.JsonResponse{\n\t\t\t\tStatus: cf.FailResponseCode,\n\t\t\t\tMessage: \"System error\",\n\t\t\t})\n\t\t}\n\n\t\tsampleData := new(param.SampleData)\n\t\tsampleData.SendTo = []string{acceptOrDenyParam.Email}\n\t\tsampleData.Content = \"Your request join to \" + userProfile.Organization.Name + \" organization is deny. We sorry about that.\"\n\n\t\terr = ctr.SendMail(\"Announce from Micro Erp\", sampleData, cf.TemplateSendMailAnnounce)\n\t}\n\n\treturn c.JSON(http.StatusOK, cf.JsonResponse{\n\t\tStatus: cf.SuccessResponseCode,\n\t\tMessage: \"Success\",\n\t})\n}", "func (client *Client) ListProgramTypeCountWithCallback(request *ListProgramTypeCountRequest, callback func(response *ListProgramTypeCountResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ListProgramTypeCountResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ListProgramTypeCount(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (c *Conn) callNetInfoCallback(ni *tailcfg.NetInfo) {\n\tc.netInfoMu.Lock()\n\tdefer c.netInfoMu.Unlock()\n\tif ni.BasicallyEqual(c.netInfoLast) {\n\t\treturn\n\t}\n\tc.netInfoLast = ni\n\tif c.netInfoFunc != nil {\n\t\tc.logf(\"netInfo update: %+v\", ni)\n\t\tgo c.netInfoFunc(ni)\n\t}\n}", "func (server *Server) SendCallback(clientID uint64, msg []byte) {\n\n\t// TODO - what if client no longer in list of current clients?\n\tvar (\n\t\tlocalIOR ioReply\n\t)\n\tserver.Lock()\n\tlci, ok := server.perClientInfo[clientID]\n\tif !ok {\n\t\tserver.logger.Printf(\"SERVER: SendCallback() - unable to find client UniqueID: %v\\n\", clientID)\n\t\tserver.Unlock()\n\t\treturn\n\t}\n\tserver.Unlock()\n\n\tlci.Lock()\n\tcurrentCtx := lci.cCtx\n\tlci.Unlock()\n\n\tlocalIOR.JResult = msg\n\tsetupHdrReply(&localIOR, Upcall)\n\n\tserver.returnResults(&localIOR, currentCtx)\n}", "func SpotifyCallback(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"Received Request: /SpotifyCallback\")\n\turlParams := r.URL.Query()\n\tAuthCode = urlParams.Get(\"code\")\n\n\tgetSpotToken()\n\tgetCurrentUser()\n\tcreatePlaylist()\n}", "func AuthCallback(w http.ResponseWriter, r *http.Request) {\n\tcode := r.FormValue(\"code\")\n\tcallbackState := r.FormValue(\"state\")\n\n\tclientID, err := state.DecryptState(callbackState, os.Getenv(\"SECRET\"))\n\tif err != nil {\n\t\thttp.Error(w, \"State could not be verified\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tconfigValue, err := config.ReadConfigFromEnv(clientID)\n\tif err != nil {\n\t\tlog.Printf(\"Error while verifying state: %v\", err)\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttoken, err := github.Exchange(configValue.ClientID, configValue.ClientSecretID, code)\n\tif err != nil {\n\t\tlog.Printf(\"Error while exchange code %s for client %s with Github: %v\", code, configValue.ClientID, err)\n\t\thttp.Error(w, fmt.Sprintf(\"Code %s for client %s was not accepted by the Oauth provider\", code, configValue.ClientID), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tredirectURLWithToken := fmt.Sprintf(\"%s?token=%s\", configValue.RedirectURL, token)\n\n\tw.Header().Set(\"Location\", redirectURLWithToken)\n\tw.WriteHeader(http.StatusTemporaryRedirect)\n}", "func CallbackURL(\n\tauthenticatePrivateKey *hpke.PrivateKey,\n\tproxyPublicKey *hpke.PublicKey,\n\trequestParams url.Values,\n\tprofile *identity.Profile,\n\tencryptURLValues hpke.EncryptURLValuesFunc,\n) (string, error) {\n\tredirectURL, err := ParseAndValidateURL(requestParams.Get(QueryRedirectURI))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"invalid %s: %w\", QueryRedirectURI, err)\n\t}\n\n\tvar callbackURL *url.URL\n\tif requestParams.Has(QueryCallbackURI) {\n\t\tcallbackURL, err = ParseAndValidateURL(requestParams.Get(QueryCallbackURI))\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"invalid %s: %w\", QueryCallbackURI, err)\n\t\t}\n\t} else {\n\t\tcallbackURL, err = DeepCopy(redirectURL)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"error copying %s: %w\", QueryRedirectURI, err)\n\t\t}\n\t\tcallbackURL.Path = \"/.pomerium/callback/\"\n\t\tcallbackURL.RawQuery = \"\"\n\t}\n\n\tcallbackParams := callbackURL.Query()\n\tif requestParams.Has(QueryIsProgrammatic) {\n\t\tcallbackParams.Set(QueryIsProgrammatic, \"true\")\n\t}\n\tcallbackParams.Set(QueryRedirectURI, redirectURL.String())\n\n\trawProfile, err := protojson.Marshal(profile)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error marshaling identity profile: %w\", err)\n\t}\n\tcallbackParams.Set(QueryIdentityProfile, string(rawProfile))\n\tcallbackParams.Set(QueryVersion, versionStr())\n\n\tBuildTimeParameters(callbackParams, signInExpiry)\n\n\tcallbackParams, err = encryptURLValues(authenticatePrivateKey, proxyPublicKey, callbackParams)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error encrypting callback params: %w\", err)\n\t}\n\tcallbackURL.RawQuery = callbackParams.Encode()\n\n\treturn callbackURL.String(), nil\n}", "func (client *Client) ChangeMediaStatusWithCallback(request *ChangeMediaStatusRequest, callback func(response *ChangeMediaStatusResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ChangeMediaStatusResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ChangeMediaStatus(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (a *GoogleAuth) GoogleCallbackHandler(w http.ResponseWriter, r *http.Request) {\n\tstate := r.FormValue(\"state\")\n\n\tif !a.GetAndDelete(state) {\n\t\tlog.Printf(\"Invalid oauth state, possibly timeout. remoteAddr='%s'\", r.RemoteAddr)\n\t\thttp.Redirect(w, r, a.errorURL, http.StatusTemporaryRedirect)\n\t\treturn\n\t}\n\n\tcode := r.FormValue(\"code\")\n\ttoken, err := a.config.Exchange(oauth2.NoContext, code)\n\tif err != nil {\n\t\tlog.Printf(\"Code exchange failed: %v\", err)\n\t\thttp.Redirect(w, r, a.errorURL, http.StatusTemporaryRedirect)\n\t\treturn\n\t}\n\n\tresponse, err := http.Get(\"https://www.googleapis.com/oauth2/v2/userinfo?access_token=\" + token.AccessToken)\n\tif err != nil {\n\t\tlog.Printf(\"Getting userinfo failed: %v\", err)\n\t\thttp.Redirect(w, r, a.errorURL, http.StatusTemporaryRedirect)\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\tcontents, err := ioutil.ReadAll(response.Body)\n\n\tu := &Userinfo{}\n\terr = json.Unmarshal(contents, u)\n\tif err != nil {\n\t\tlog.Printf(\"Unable to unmarshal userinfo: %v\", err)\n\t\thttp.Redirect(w, r, a.errorURL, http.StatusTemporaryRedirect)\n\t\treturn\n\t}\n\n\ta.loginCallback(w, r, u)\n}", "func RetryStatuses(statuses ...int) RetryFn {\n\treturn func(attempt Attempt) bool {\n\t\tif attempt.Response == nil {\n\t\t\treturn false\n\t\t}\n\t\tfor _, st := range statuses {\n\t\t\tif st == attempt.Response.StatusCode {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n}", "func (_AnchorChain *AnchorChainTransactor) Callback(opts *bind.TransactOpts, state bool, _result []string) (*types.Transaction, error) {\n\treturn _AnchorChain.contract.Transact(opts, \"callback\", state, _result)\n}", "func (h *UserAuthHandler) OAuthCallback() echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tctx := c.Request().Context()\n\t\tlog := h.Logger.With().Logger()\n\n\t\tstate := c.QueryParam(\"state\")\n\t\tif state == \"\" {\n\t\t\tlog.Warn().Msg(\"error verifying state\")\n\t\t\treturn c.String(http.StatusBadRequest, \"error invalid state\")\n\t\t}\n\n\t\tcode := c.QueryParam(\"code\")\n\t\tif code == \"\" {\n\t\t\tlog.Warn().Msg(\"error verifying state\")\n\t\t\treturn c.String(http.StatusBadRequest, \"error invalid code\")\n\t\t}\n\n\t\tredirectURL, err := h.OAuth.VerifyState(state)\n\t\tif err != nil {\n\t\t\tlog.Error().Err(err).Msg(\"error verifying state\")\n\t\t\treturn c.String(http.StatusUnauthorized, \"error verifying state\")\n\t\t}\n\n\t\ttoken, err := h.OAuth.Exchange(ctx, code)\n\t\tif err != nil {\n\t\t\tlog.Error().Err(err).Msg(\"error during token exchange\")\n\t\t\treturn c.String(http.StatusUnauthorized, \"error during token exchange\")\n\t\t}\n\t\tsetCookies(c, token)\n\n\t\treturn c.Redirect(http.StatusPermanentRedirect, redirectURL)\n\t}\n}", "func (h BaseHandler) Status(ctx *fasthttp.RequestCtx, status int) error {\n\treturn Status(ctx, status)\n}", "func SuccessUrl(val string) Argument {\n\treturn func(request *requests.Request) error {\n\t\tif len(val) > 256 {\n\t\t\treturn errors.New(\"callback url too long, max 256 caracteres\")\n\t\t}\n\t\trequest.AddArgument(\"success_url\", val)\n\t\treturn nil\n\t}\n}", "func callback(nlm *C.struct_nl_msg, nla unsafe.Pointer) C.int {\n\tcbID := uintptr(nla)\n\tcallbacksLock.RLock()\n\tcbArg := callbacks[cbID]\n\tcallbacksLock.RUnlock()\n\n\tif cbArg == nil {\n\t\tpanic(fmt.Sprintf(\"No netlink callback with ID %d\", cbID))\n\t}\n\n\tcbMsg := &Message{nlm: nlm}\n\tif err := cbArg.fn(cbMsg, cbArg.arg); err != nil {\n\t\tcbArg.err = err\n\t\treturn C.NL_STOP\n\t}\n\treturn C.NL_OK\n}", "func (client *Client) ModifyOcspStatusWithCallback(request *ModifyOcspStatusRequest, callback func(response *ModifyOcspStatusResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ModifyOcspStatusResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ModifyOcspStatus(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (p *MyThriftClient) CallBack(callTime int64, name string, paramMap map[string]string) (r []string, err error) {\n if err = p.sendCallBack(callTime, name, paramMap); err != nil { return }\n return p.recvCallBack()\n}", "func (handler *AuthHandler) Callback(c *gin.Context) {\n\tstate := c.Query(\"state\")\n\tcode := c.Query(\"code\")\n\n\ttoken, err := handler.GetAccessToken(state, code)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\tc.AbortWithError(400, err)\n\t}\n\n\tmarshalledToken, err := json.Marshal(token)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\tc.AbortWithError(400, err)\n\t}\n\n\ttokenString := base64.StdEncoding.EncodeToString(marshalledToken)\n\n\tc.SetCookie(\"token\", tokenString, 15*60*60, \"/\", \"\", true, true)\n\n\tc.Redirect(http.StatusMovedPermanently, \"/index\")\n\tc.Abort()\n}", "func (client *Client) QueryDataTrackResultDownloadStatusWithCallback(request *QueryDataTrackResultDownloadStatusRequest, callback func(response *QueryDataTrackResultDownloadStatusResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *QueryDataTrackResultDownloadStatusResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.QueryDataTrackResultDownloadStatus(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (s *Strava) SetCallbackHandler(\n\tsuccess func(auth *strava.AuthorizationResponse, w http.ResponseWriter, r *http.Request),\n\tfailure func(err error, w http.ResponseWriter, r *http.Request)) {\n\tpath, _ := s.authenticator.CallbackPath()\n\thttp.HandleFunc(path, s.authenticator.HandlerFunc(success, failure))\n}", "func checkrequestStatus(d *schema.ResourceData, config Config, requestID string, timeOut int) error {\n\ttimeout := time.After(time.Duration(timeOut) * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(1 * time.Second):\n\t\t\tstatus, state, err := checkServiceRequestStatus(config, requestID)\n\t\t\tif err == nil {\n\t\t\t\tif state == \"finished\" && status == \"Ok\" {\n\t\t\t\t\tlog.Println(\"[DEBUG] Service order added SUCCESSFULLY\")\n\t\t\t\t\td.SetId(requestID)\n\t\t\t\t\treturn nil\n\t\t\t\t} else if status == \"Error\" {\n\t\t\t\t\tlog.Println(\"[ERROR] Failed\")\n\t\t\t\t\treturn fmt.Errorf(\"[Error] Failed execution\")\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(\"[DEBUG] Request state is :\", state)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase <-timeout:\n\t\t\tlog.Println(\"[DEBUG] Timeout occured\")\n\t\t\treturn fmt.Errorf(\"[ERROR] Timeout\")\n\t\t}\n\t}\n}", "func (g *HTTPGateway) Call(req *retryablehttp.Request, statusCode int) ([]byte, error) {\n\tresBytes, err := g.Execute(req)\n\tif err == nil {\n\t\treturn resBytes, nil\n\t}\n\tr, ok := err.(*platform.RequestError)\n\tif !ok {\n\t\treturn nil, err\n\t}\n\tif r.StatusCode() != statusCode {\n\t\treturn nil, fmt.Errorf(r.GetResponse())\n\t}\n\treturn nil, err\n\n}", "func (r *ClusterPollRequest) Status(value int) *ClusterPollRequest {\n\tr.statuses = append(r.statuses, value)\n\treturn r\n}", "func (s *scanCoordinator) serverCallback(protoReq interface{}, ctx interface{}, conn net.Conn,\n\tcancelCh <-chan bool) {\n\n\tif protoReq == queryport.Ping {\n\t\tif ctx != nil {\n\t\t\tif conCtx := ctx.(*ConnectionContext); conCtx != nil {\n\t\t\t\tconCtx.ResetCache()\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\tttime := time.Now()\n\n\treq, err := NewScanRequest(protoReq, ctx, cancelCh, s)\n\tatime := time.Now()\n\tw := NewProtoWriter(req.ScanType, conn)\n\tdefer func() {\n\t\ts.handleError(req.LogPrefix, w.Done())\n\t\treq.Done()\n\t}()\n\n\tif req.ScanType == HeloReq {\n\t\ts.handleHeloRequest(req, w)\n\t\treturn\n\t}\n\n\tlogging.LazyVerbose(func() string {\n\t\treturn fmt.Sprintf(\"%s REQUEST %s\", req.LogPrefix, logging.TagStrUD(req))\n\t})\n\n\tif req.Consistency != nil {\n\t\tlogging.LazyVerbose(func() string {\n\t\t\treturn fmt.Sprintf(\"%s requested timestamp: %s => %s Crc64 => %v\", req.LogPrefix,\n\t\t\t\tstrings.ToLower(req.Consistency.String()), ScanTStoString(req.Ts), req.Ts.GetCrc64())\n\t\t})\n\t}\n\n\tif req.hasRollback != nil && req.hasRollback.Load() == true {\n\t\ts.handleError(req.LogPrefix, w.Error(ErrIndexRollback))\n\t\treturn\n\t}\n\n\tif s.tryRespondWithError(w, req, err) {\n\t\treturn\n\t}\n\n\tif req.Stats != nil {\n\t\treq.Stats.scanReqAllocDuration.Add(time.Now().Sub(atime).Nanoseconds())\n\t}\n\n\tif err := s.isScanAllowed(*req.Consistency, req); err != nil {\n\t\ts.tryRespondWithError(w, req, err)\n\t\treturn\n\t}\n\n\tif req.Stats != nil {\n\t\telapsed := time.Now().Sub(ttime).Nanoseconds()\n\t\treq.Stats.scanReqInitDuration.Add(elapsed)\n\t\treq.Stats.scanReqInitLatDist.Add(elapsed)\n\n\t\tnow := time.Now().UnixNano()\n\t\treq.Stats.numRequests.Add(1)\n\t\treq.Stats.lastScanTime.Set(now)\n\t\tif req.GroupAggr != nil {\n\t\t\treq.Stats.numRequestsAggr.Add(1)\n\t\t} else {\n\t\t\treq.Stats.numRequestsRange.Add(1)\n\t\t}\n\n\t\tfor _, partitionId := range req.PartitionIds {\n\t\t\treq.Stats.updatePartitionStats(partitionId,\n\t\t\t\tfunc(stats *IndexStats) {\n\t\t\t\t\tstats.numRequests.Add(1)\n\t\t\t\t\tstats.lastScanTime.Set(now)\n\t\t\t\t})\n\t\t}\n\t}\n\n\tt0 := time.Now()\n\tis, err := s.getRequestedIndexSnapshot(req)\n\tif err != nil {\n\t\tlogging.Infof(\"%s Error in getRequestedIndexSnapshot %v\", req.LogPrefix, err)\n\t\tif s.tryRespondWithError(w, req, err) {\n\t\t\treturn\n\t\t}\n\t}\n\n\tdefer DestroyIndexSnapshot(is)\n\n\tlogging.LazyVerbose(func() string {\n\t\treturn fmt.Sprintf(\"%s snapshot timestamp: %s\",\n\t\t\treq.LogPrefix, ScanTStoString(is.Timestamp()))\n\t})\n\n\tdefer func() {\n\t\tif req.Stats != nil {\n\t\t\telapsed := time.Now().Sub(ttime).Nanoseconds()\n\t\t\treq.Stats.scanReqDuration.Add(elapsed)\n\t\t\treq.Stats.scanReqLatDist.Add(elapsed)\n\t\t}\n\t}()\n\n\tif len(req.Ctxs) != 0 {\n\t\tvar err error\n\t\tdonech := make(chan bool)\n\t\tvar mutex sync.Mutex\n\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-req.getTimeoutCh():\n\t\t\t\tmutex.Lock()\n\t\t\t\tdefer mutex.Unlock()\n\n\t\t\t\tselect {\n\t\t\t\tcase <-donech:\n\t\t\t\tdefault:\n\t\t\t\t\terr = common.ErrScanTimedOut\n\t\t\t\t\tclose(donech)\n\t\t\t\t}\n\t\t\tcase <-req.CancelCh:\n\t\t\t\tmutex.Lock()\n\t\t\t\tdefer mutex.Unlock()\n\n\t\t\t\tselect {\n\t\t\t\tcase <-donech:\n\t\t\t\tdefault:\n\t\t\t\t\terr = common.ErrClientCancel\n\t\t\t\t\tclose(donech)\n\t\t\t\t}\n\t\t\tcase <-donech:\n\t\t\t}\n\t\t}()\n\n\t\tnumCtxs := 0\n\t\tfor _, ctx := range req.Ctxs {\n\t\t\tif !ctx.Init(donech) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnumCtxs++\n\t\t}\n\n\t\tcont := func() bool {\n\t\t\tmutex.Lock()\n\t\t\tdefer mutex.Unlock()\n\n\t\t\tif s.tryRespondWithError(w, req, err) {\n\t\t\t\tfor i := 0; i < numCtxs; i++ {\n\t\t\t\t\treq.Ctxs[i].Done()\n\t\t\t\t}\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tclose(donech)\n\t\t\treturn true\n\t\t}()\n\n\t\tif !cont {\n\t\t\treturn\n\t\t}\n\t}\n\n\ts.processRequest(req, w, is, t0)\n\n\tif len(req.Ctxs) != 0 {\n\t\tfor _, ctx := range req.Ctxs {\n\t\t\tctx.Done()\n\t\t}\n\t}\n}", "func (_m *GetBalanceCallback) OnBalanceAvailable(status int, value int64, info string) {\n\t_m.Called(status, value, info)\n}", "func (p *ProviderReceiver) OnRequest(ctx hb2.Context) error {\n\tid, req := ctx.ID, ctx.Request\n\tdata := req.FormValue(\"data\")\n\tlog.Println(\"Receive data:\", data)\n\tvar v struct {\n\t\tStatus string `json:\"status\"`\n\t\tUdid string `json:\"udid\"`\n\t\tProviderForwardedPort int `json:\"providerForwardedPort\"`\n\t}\n\tif err := json.Unmarshal([]byte(data), &v); err != nil {\n\t\treturn err\n\t}\n\tstatus, udid := v.Status, v.Udid\n\tif status == \"\" || udid == \"\" {\n\t\treturn errors.New(\"status or udid is empty\")\n\t}\n\tprovider, err := db.ProviderGet(id)\n\tif err != nil {\n\t\tlog.Println(\"Unexpect err:\", err)\n\t\treturn err\n\t}\n\tvar providerId = &provider.Id\n\tif status == \"online\" {\n\t\tlog.Printf(\"Device: %s is plugged-in\", udid)\n\t} else if status == \"offline\" {\n\t\tlog.Printf(\"Device: %s is plugged-off\", udid)\n\t\tproviderId = nil\n\t} else {\n\t\tlog.Printf(\"Invalid status: %s, only <offline|online> is allowed.\", status)\n\t\treturn errors.New(\"status is required\")\n\t}\n\n\treturn db.DeviceUpdate(udid, map[string]interface{}{\n\t\t\"provider_id\": providerId,\n\t\t\"providerForwardedPort\": v.ProviderForwardedPort,\n\t})\n}", "func (_AnchorChain *AnchorChainTransactorSession) Callback(state bool, _result []string) (*types.Transaction, error) {\n\treturn _AnchorChain.Contract.Callback(&_AnchorChain.TransactOpts, state, _result)\n}", "func (client *Client) StartNotaryWithCallback(request *StartNotaryRequest, callback func(response *StartNotaryResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *StartNotaryResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.StartNotary(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (_AnchorChain *AnchorChainSession) Callback(state bool, _result []string) (*types.Transaction, error) {\n\treturn _AnchorChain.Contract.Callback(&_AnchorChain.TransactOpts, state, _result)\n}", "func (g *gateway) ProcessRequest(ctx context.Context, rawRequest []byte) (rawResponse []byte, httpStatusCode int) {\n\t// decode\n\tmsg, err := g.codec.DecodeRequest(rawRequest)\n\tif err != nil {\n\t\treturn newError(g.codec, \"\", api.UserMessageParseError, err.Error())\n\t}\n\tif err = msg.Validate(); err != nil {\n\t\treturn newError(g.codec, msg.Body.MessageId, api.UserMessageParseError, err.Error())\n\t}\n\t// find correct handler\n\thandler, ok := g.handlers[msg.Body.DonId]\n\tif !ok {\n\t\treturn newError(g.codec, msg.Body.MessageId, api.UnsupportedDONIdError, \"unsupported DON ID\")\n\t}\n\t// send to the handler\n\tresponseCh := make(chan handlers.UserCallbackPayload, 1)\n\terr = handler.HandleUserMessage(ctx, msg, responseCh)\n\tif err != nil {\n\t\treturn newError(g.codec, msg.Body.MessageId, api.InternalHandlerError, err.Error())\n\t}\n\t// await response\n\tvar response handlers.UserCallbackPayload\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn newError(g.codec, msg.Body.MessageId, api.RequestTimeoutError, \"handler timeout\")\n\tcase response = <-responseCh:\n\t\tbreak\n\t}\n\tif response.ErrCode != api.NoError {\n\t\treturn newError(g.codec, msg.Body.MessageId, response.ErrCode, response.ErrMsg)\n\t}\n\t// encode\n\trawResponse, err = g.codec.EncodeResponse(response.Msg)\n\tif err != nil {\n\t\treturn newError(g.codec, msg.Body.MessageId, api.NodeReponseEncodingError, \"\")\n\t}\n\treturn rawResponse, api.ToHttpErrorCode(api.NoError)\n}", "func (p *DemoThriftClient) CallBack(callTime int64, name string, paramMap map[int64]string) (r []string, err error) {\n\tif err = p.sendCallBack(callTime, name, paramMap); err != nil {\n\t\treturn\n\t}\n\treturn p.recvCallBack()\n}", "func (api *CoreHandler) AuthorizeCallback(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"Reaceived callback from Instagram oauth\")\n\n\t// Get the query string\n\tvals := r.URL.Query()\n\n\t// If \"error\" is not an empty string we have not received our access code\n\t// This is error param is specified by the Reddit API\n\tif val, ok := vals[\"error\"]; ok {\n\t\tif len(val) != 0 {\n\t\t\tlog.Printf(\"Did not receive authorization. Error: %v\\n\", vals[\"error\"][0])\n\t\t\t// This is the case where the user likely denied us access\n\t\t\t// TODO: should redirect back to appropriate page in front-end\n\t\t\treturn\n\t\t}\n\t}\n\n\tvar instaAuth *InstagramAuthResponse\n\tvar err error\n\t// Make sure the code exists\n\tif len(vals[\"code\"]) > 0 {\n\t\t// Now request bearer token using the code we received\n\t\tinstaAuth, err = api.requestToken(vals[\"code\"][0])\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Unable to receive bearer token: %v\\n\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tlog.Printf(\"Received the following auth from instagram: %+v\", *instaAuth)\n\n\t// Post code back to core async as the rest is not dependant on this -- vals[\"state\"] should be userID\n\tgo api.postInstaAuth(instaAuth, vals[\"state\"][0])\n\n\t// Redirect to frontend\n\thttp.Redirect(w, r, api.conf.FrontendURL, http.StatusMovedPermanently)\n}", "func StatusValidator(s Status) error {\n\tswitch s {\n\tcase StatusPENDING, StatusPOSTED:\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"transaction: invalid enum value for status field: %q\", s)\n\t}\n}", "func (c *Operation) callback(w http.ResponseWriter, r *http.Request) { //nolint: funlen,gocyclo\n\tif len(r.URL.Query()[\"error\"]) != 0 {\n\t\tif r.URL.Query()[\"error\"][0] == \"access_denied\" {\n\t\t\thttp.Redirect(w, r, c.homePage, http.StatusTemporaryRedirect)\n\t\t}\n\t}\n\n\ttk, err := c.tokenIssuer.Exchange(r)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to exchange code for token: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to exchange code for token: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\t// user info from token will be used for to retrieve data from cms\n\tinfo, err := c.tokenResolver.Resolve(tk.AccessToken)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to get token info: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to get token info: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tuserID, subject, err := c.getCMSData(tk, \"email=\"+info.Subject, info.Scope)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to get cms data: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to get cms data: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tcallbackURLCookie, err := r.Cookie(callbackURLCookie)\n\tif err != nil && !errors.Is(err, http.ErrNoCookie) {\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to get authMode cookie: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tif callbackURLCookie != nil && callbackURLCookie.Value != \"\" {\n\t\ttxnID := uuid.NewString()\n\t\tdata := txnData{\n\t\t\tUserID: userID,\n\t\t\tScope: info.Scope,\n\t\t\tToken: tk.AccessToken,\n\t\t}\n\n\t\tdataBytes, mErr := json.Marshal(data)\n\t\tif mErr != nil {\n\t\t\tc.writeErrorResponse(w, http.StatusInternalServerError,\n\t\t\t\tfmt.Sprintf(\"failed to marshal txn data: %s\", mErr.Error()))\n\t\t\treturn\n\t\t}\n\n\t\terr = c.store.Put(txnID, dataBytes)\n\t\tif err != nil {\n\t\t\tc.writeErrorResponse(w, http.StatusInternalServerError,\n\t\t\t\tfmt.Sprintf(\"failed to save txn data: %s\", err.Error()))\n\n\t\t\treturn\n\t\t}\n\n\t\thttp.Redirect(w, r, callbackURLCookie.Value+\"?txnID=\"+txnID, http.StatusTemporaryRedirect)\n\n\t\treturn\n\t}\n\n\tvcsProfileCookie, err := r.Cookie(vcsProfileCookie)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to get cookie: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to get cookie: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tcred, err := c.prepareCredential(subject, info.Scope, vcsProfileCookie.Value)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to create credential: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusInternalServerError,\n\t\t\tfmt.Sprintf(\"failed to create credential: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\n\tt, err := template.ParseFiles(c.didAuthHTML)\n\tif err != nil {\n\t\tlogger.Errorf(err.Error())\n\t\tc.writeErrorResponse(w, http.StatusInternalServerError,\n\t\t\tfmt.Sprintf(\"unable to load html: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tif err := t.Execute(w, map[string]interface{}{\n\t\t\"Path\": generate + \"?\" + \"profile=\" + vcsProfileCookie.Value,\n\t\t\"Cred\": string(cred),\n\t}); err != nil {\n\t\tlogger.Errorf(fmt.Sprintf(\"failed execute qr html template: %s\", err.Error()))\n\t}\n}", "func (client *Client) QuerySingleDomainApiStatusCodeGroupTrendWithCallback(request *QuerySingleDomainApiStatusCodeGroupTrendRequest, callback func(response *QuerySingleDomainApiStatusCodeGroupTrendResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *QuerySingleDomainApiStatusCodeGroupTrendResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.QuerySingleDomainApiStatusCodeGroupTrend(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (s *Controller) HandleGoogleCallback(c *gin.Context) {\n\tcustomerInfo, err := getCustomerInfo(c.Request.FormValue(\"state\"), c.Request.FormValue(\"code\"))\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"message\": \"failed to get customer info from google oauth\",\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn\n\t}\n\n\tcustomer, err := s.FindOrCreateCustomer(context.TODO(), *customerInfo)\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"message\": \"failed to find or create customer\",\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn\n\t}\n\n\tcreateTokenRequest := token.CreateTokenRequest{\n\t\tUserID: customer.ID,\n\t\tRole: \"CUSTOMER\",\n\t}\n\ttoken, err := s.token.CreateToken(context.TODO(), &createTokenRequest)\n\n\tc.JSON(http.StatusOK, token)\n}", "func oauth2callbackHandler(w http.ResponseWriter, r *http.Request) {\n\t// Create an oauth transport with a urlfetch.Transport embedded inside.\n\tt := &oauth.Transport{Config: config(r.Host)}\n\n\t// Exchange the code for access and refresh tokens.\n\ttok, err := t.Exchange(r.FormValue(\"code\"))\n\tif err != nil {\n\t\tw.WriteHeader(500)\n\t\tLogPrintf(\"oauth: exchange\")\n\t\treturn\n\t}\n\to, err := oauth2.New(t.Client())\n\tif err != nil {\n\t\tw.WriteHeader(500)\n\t\tLogPrintf(\"oauth: oauth get\")\n\t\treturn\n\t}\n\tu, err := o.Userinfo.Get().Do()\n\tif err != nil {\n\t\tw.WriteHeader(500)\n\t\tLogPrintf(\"oauth: userinfo get\")\n\t\treturn\n\t}\n\tuserId := fmt.Sprintf(\"%s_%s\", strings.Split(clientId, \".\")[0], u.Id)\n\tif err = storeUserID(w, r, userId); err != nil {\n\t\tw.WriteHeader(500)\n\t\tLogPrintf(\"oauth: store userid\")\n\t\treturn\n\t}\n\tuserSer, err := json.Marshal(u)\n\tif err != nil {\n\t\tw.WriteHeader(500)\n\t\tLogPrintf(\"oauth: json marshal\")\n\t\treturn\n\t}\n\tstoreCredential(userId, tok, string(userSer))\n\thttp.Redirect(w, r, fullUrl, http.StatusFound)\n}", "func (bcR *BlockchainReactor) sendStatusRequest() {\n\tbcR.Switch.BroadcastEnvelope(p2p.Envelope{\n\t\tChannelID: BlockchainChannel,\n\t\tMessage: &bcproto.StatusRequest{},\n\t})\n}", "func (m *RedirectPostRequestBody) SetCallbackUri(value *string)() {\n m.callbackUri = value\n}", "func (c *app) requestListenType(outgoing message, expecting string) (message, error) {\n\tc.Queue(outgoing)\n\n\twait := make(chan message, 1)\n\tid, _ := requestID(outgoing)\n\n\tc.listenersLock.Lock()\n\tc.listeners[id] = wait\n\tc.listenersLock.Unlock()\n\n\tdefer func() {\n\t\tc.listenersLock.Lock()\n\t\tdelete(c.listeners, id)\n\t\tc.listenersLock.Unlock()\n\t}()\n\n\tselect {\n\tcase msg := <-wait:\n\t\tif e, ok := msg.(*errorMessage); ok {\n\t\t\t// If only one argument is passed through, format it nicely for\n\t\t\t// transmission to the crust\n\t\t\t//\n\t\t\t// TODO: Pass along multiple pieces of information (the error type\n\t\t\t// and the error message, at least). However, the whole chain of\n\t\t\t// functions relying on requestListType expect a simple 'error'\n\t\t\t// object.\n\t\t\tif len(e.Arguments) >= 1 {\n\t\t\t\treturn nil, fmt.Errorf(\"%v: %v\", e.Error, e.Arguments[0])\n\t\t\t} else {\n\t\t\t\treturn nil, fmt.Errorf(\"%v\", e.Error)\n\t\t\t}\n\t\t} else if reflect.TypeOf(msg).String() != expecting {\n\t\t\treturn nil, fmt.Errorf(formatUnexpectedMessage(msg, expecting))\n\t\t} else {\n\t\t\treturn msg, nil\n\t\t}\n\tcase <-time.After(MessageTimeout):\n\t\treturn nil, fmt.Errorf(\"Timeout while waiting for message\")\n\t}\n}", "func notifyStatus(curStatus WorkerStatus, tid int) {\n\targs := CallArgs{}\n\targs.CurrentStatus = curStatus\n\targs.TaskID = tid\n\treply := CallReply{}\n\tcall(\"Coordinator.Response\", &args, &reply)\n}", "func (provider WechatWorkProvider) Callback(context *auth.Context) {\n\tcontext.Auth.LoginHandler(context, provider.AuthorizeHandler)\n}", "func Status(mode string) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\toutput, err := blockchain.GetBlockchainInfo(mode)\n\n\t\tvar data blockchain.CheckResponse\n\t\tif output != nil {\n\t\t\tdata = blockchain.CheckResponse{*output, \"\"}\n\t\t} else {\n\t\t\tdata = blockchain.CheckResponse{blockchain.BlockchainInfo{}, err.Error()}\n\t\t}\n\n\t\tc.JSON(http.StatusOK, data)\n\t}\n}", "func (ac *ApiConfig) CheckStatus(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodGet {\n\t\thttp.Error(w, r.Method+\" is not available\", http.StatusInternalServerError)\n\t\tzerolog.Error().Msg(r.Method + \" is not available\")\n\t\treturn\n\t}\n\n\tstat := &models.StatusIdentifier{\n\t\tOk: true,\n\t\tMessage: \"Everything is alright\",\n\t}\n\n\terr := dResponseWriter(w, stat, http.StatusOK)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\treturn\n\t}\n\n\treturn\n}", "func NewKnoxCallbackDefault(code int) *KnoxCallbackDefault {\n\treturn &KnoxCallbackDefault{\n\t\t_statusCode: code,\n\t}\n}", "func CallbackServer(addr string) *http.Server {\n\th := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintln(w, r.FormValue(\"code\"))\n\t})\n\n\treturn &http.Server{\n\t\tAddr: addr,\n\t\tHandler: h,\n\t}\n}", "func (p *OIDCProvider) CallbackHandler() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := oidc.ClientContext(r.Context(), p.client)\n\n\t\tif errMsg := r.URL.Query().Get(\"error\"); errMsg != \"\" {\n\t\t\tdesc := r.URL.Query().Get(\"error_description\")\n\t\t\tmsg := fmt.Sprintf(\"%s: %s\", errMsg, desc)\n\t\t\tlevel.Debug(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tqueryCode := r.URL.Query().Get(\"code\")\n\t\tif queryCode == \"\" {\n\t\t\tconst msg = \"no code in request\"\n\t\t\tlevel.Debug(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tqueryState := r.URL.Query().Get(\"state\")\n\t\tif queryState != state {\n\t\t\tconst msg = \"incorrect state in request\"\n\t\t\tlevel.Debug(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\ttoken, err := p.oauth2Config.Exchange(ctx, queryCode)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"failed to get token: %v\", err)\n\t\t\tlevel.Warn(p.logger).Log(\"msg\", msg, \"err\", err)\n\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\trawIDToken, ok := token.Extra(\"id_token\").(string)\n\t\tif !ok {\n\t\t\tconst msg = \"no id_token in token response\"\n\t\t\tlevel.Warn(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t_, err = p.verifier.Verify(ctx, rawIDToken)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"failed to verify ID token: %v\", err)\n\t\t\tlevel.Warn(p.logger).Log(\"msg\", msg)\n\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\tName: p.cookieName,\n\t\t\tValue: rawIDToken,\n\t\t\tPath: \"/\",\n\t\t\tExpires: token.Expiry,\n\t\t})\n\n\t\thttp.Redirect(w, r, p.redirectURL, http.StatusFound)\n\t})\n}", "func StatusValidator(s Status) error {\n\tswitch s {\n\tcase StatusWAITING, StatusIN_PROGRESS, StatusDONE, StatusERROR:\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"operation: invalid enum value for status field: %q\", s)\n\t}\n}", "func ChangeStatus(r *request.R) error {\n\tif e := r.Valid(); e != nil {\n\t\treturn e\n\t}\n\ts, e := StatusChanger(amqpURI)\n\tif e != nil {\n\t\treturn e\n\t}\n\tm, e := json.Marshal(r)\n\tif e != nil {\n\t\treturn e\n\t}\n\treturn s.Produce(m)\n}", "func bungieCallback(c *gin.Context) {\n code := c.Query(\"code\")\n state := c.Query(\"state\")\n\n // Now use the code to receive an access token\n client := &http.Client{}\n data := url.Values{}\n data.Set(\"grant_type\", \"authorization_code\")\n data.Set(\"code\", code)\n req, _ := http.NewRequest(\"POST\", \"https://www.bungie.net/platform/app/oauth/token/\", strings.NewReader(data.Encode()))\n req.Header.Add(\"Authorization\", \"Basic \" + base64.StdEncoding.EncodeToString([]byte(os.Getenv(\"CLIENT_ID\") + \":\" + os.Getenv(\"CLIENT_SECRET\"))))\n req.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n resp, _ := client.Do(req)\n\n // Assess GetToken Response Code\n if resp.StatusCode == http.StatusOK {\n var tokenResponse TokenResponse\n // This could potentialy be changed to use unmarshalling to save memory\n err := json.NewDecoder(resp.Body).Decode(&tokenResponse)\n // err := json.Unmarshal(resp.Body, &tokenResponse)\n resp.Body.Close()\n if err != nil {\n fmt.Println(err)\n }\n\n deleteUser(state)\n\n // Collect the available destiny membership id(s) as an array\n req, _ = http.NewRequest(\"GET\", \"https://www.bungie.net/platform/User/GetBungieAccount/\" + tokenResponse.Membership_id + \"/254/\", nil)\n req.Header.Add(\"X-API-Key\", os.Getenv(\"API_KEY\"))\n resp, _ = client.Do(req)\n\n // Assess GetBungieAccount Response Code\n if resp.StatusCode == http.StatusOK {\n destinyMemberships := make([]Membership, 0)\n\n // Determine which Destiny membership IDs are associated with the Bungie account\n var accountResponse interface{}\n err = json.NewDecoder(resp.Body).Decode(&accountResponse)\n resp.Body.Close()\n\n accountMap := accountResponse.(map[string]interface{})\n responseMap := accountMap[\"Response\"].(map[string]interface{})\n destinyMembershipsArray := responseMap[\"destinyMemberships\"].([]interface{})\n\n activeMembership := \"-1\"\n for _, u := range destinyMembershipsArray {\n valuesMap := u.(map[string]interface{})\n\n\n //////\n ////\n //// For now, we assume PC is the active membership\n activeMembershipType := valuesMap[\"membershipType\"].(float64)\n if ( activeMembershipType == 3 ) {\n activeMembership = valuesMap[\"membershipId\"].(string)\n fmt.Println( \"Active Membership: \" + valuesMap[\"displayName\"].(string) )\n }\n //// Replace with getActiveMembership() implementation\n ////\n //////\n\n\n tmpMembership := Membership{activeMembershipType, valuesMap[\"membershipId\"].(string)}\n destinyMemberships = append(destinyMemberships, tmpMembership)\n }\n\n // Empty User Values\n loadouts := make([]Loadout, 0)\n\n // Insert new user entry\n newUser := User{loadouts, destinyMemberships, state, activeMembership, \"-1\", tokenResponse.Access_token, tokenResponse.Refresh_token}\n createUser(newUser)\n } else {\n // Error in GetBungieAccount\n fmt.Println(resp.StatusCode)\n }\n\n } else {\n // Error in GetTokenResponse\n fmt.Println(resp.StatusCode)\n }\n}", "func (a *Api) Status(w http.ResponseWriter, r *http.Request) error {\n\treturn nil\n}", "func (r *ManagedServicePollRequest) Status(value int) *ManagedServicePollRequest {\n\tr.statuses = append(r.statuses, value)\n\treturn r\n}", "func PersonalyCallback(\n\tgetUserByID dependencyGetUserByID,\n\tgetNumberOfPersonalyOffers dependencyGetNumberOfPersonalyOffers,\n\tgetSystemConfig dependencyGetSystemConfig,\n\tcreatePersonalyIncome dependencyCreatePersonalyIncome,\n\tbroadcast dependencyBroadcast,\n) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tpayload := personalyPayload{}\n\t\tif err := c.BindWith(&payload, binding.Form); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"event\": models.EventPersonalyCallback,\n\t\t\t\"query\": c.Request.URL.Query().Encode(),\n\t\t\t\"user_id\": payload.UserID,\n\t\t\t\"amount\": payload.Amount,\n\t\t\t\"offer_id\": payload.OfferID,\n\t\t}).Debug(\"get superrewards callback\")\n\n\t\tuser, err := getUserByID(payload.UserID)\n\t\tif err != nil {\n\t\t\tc.AbortWithError(http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tcount, err := getNumberOfPersonalyOffers(payload.OfferID, payload.UserID)\n\t\tif err != nil {\n\t\t\tc.AbortWithError(http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tif count > 0 {\n\t\t\tc.String(http.StatusOK, \"1\")\n\t\t\treturn\n\t\t}\n\n\t\t// create income personaly\n\t\tamount := payload.Amount / 1e8\n\t\tincome := models.Income{\n\t\t\tUserID: user.ID,\n\t\t\tRefererID: user.RefererID,\n\t\t\tType: models.IncomeTypePersonaly,\n\t\t\tIncome: amount,\n\t\t\tRefererIncome: amount * getSystemConfig().RefererRewardRate,\n\t\t}\n\t\tif err := createPersonalyIncome(income, payload.OfferID); err != nil {\n\t\t\tc.AbortWithError(http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\t// broadcast delta income to all clients\n\t\tdeltaIncome := struct {\n\t\t\tAddress string `json:\"address\"`\n\t\t\tAmount float64 `json:\"amount\"`\n\t\t\tType string `json:\"type\"`\n\t\t\tTime time.Time `json:\"time\"`\n\t\t}{user.Address, amount, \"personaly\", time.Now()}\n\t\tmsg, _ := json.Marshal(models.WebsocketMessage{DeltaIncome: deltaIncome})\n\t\tbroadcast(msg)\n\n\t\tc.String(http.StatusOK, \"1\")\n\t}\n}", "func (mt *RankdbCallback) Validate() (err error) {\n\tif mt.ID == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"id\"))\n\t}\n\tif mt.CallbackURL == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"callback_url\"))\n\t}\n\treturn\n}", "func Status(ctx *fasthttp.RequestCtx, status int) error {\n\tctx.SetStatusCode(status)\n\treturn nil\n}", "func (client *Client) ListCasesWithCallback(request *ListCasesRequest, callback func(response *ListCasesResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ListCasesResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ListCases(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func TestProcessStatusCode(t *testing.T) {\n\tassert := assert.New(t)\n\treq := NewRequest(\"GET\", \"url.com\", *auth)\n\treq.Response = new(http.Response)\n\treq.Response.StatusCode = 404\n\terr := req.ProcessStatusCode()\n\tassert.NotNil(err)\n\treq.Response.StatusCode = 450\n\terr = req.ProcessStatusCode()\n\tassert.NotNil(err)\n\treq.Response.StatusCode = 550\n\terr = req.ProcessStatusCode()\n\tassert.NotNil(err)\n\treq.Response.StatusCode = 200\n\terr = req.ProcessStatusCode()\n\tassert.Nil(err)\n}", "func (o LookupAppResultOutput) CallbackUrl() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupAppResult) string { return v.CallbackUrl }).(pulumi.StringOutput)\n}", "func (_m *Notifier) Status(message string) {\n\t_m.Called(message)\n}", "func (ew *Eyewitness) RegisterCallbackURL(callbackURL string) {\n\tresult, _, err := ew.s2t.RegisterCallback(\n\t\t&stv1.RegisterCallbackOptions{\n\t\t\tCallbackURL: core.StringPtr(callbackURL),\n\t\t},\n\t)\n\n\tif result != nil {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"url\": *result.URL,\n\t\t\t\"status\": *result.Status,\n\t\t}).Infoln(\"Callback registration complete\")\n\t}\n\n\tif err != nil {\n\t\tlog.WithError(err).Fatalln(\"Error registering callback URL\")\n\t}\n\n\tlog.WithField(\"url\", callbackURL).Infoln(\"Callback URL registered\")\n}", "func (uh *UserHandler) HandleGoogleCallback(w http.ResponseWriter, r *http.Request) {\n\tstatCheck := r.FormValue(\"state\")\n\tif statCheck != OauthStateString {\n\t\thttp.Error(w, fmt.Sprintf(\"Wrong state string: Expected %s, got %s. Please, try again\", OauthStateString, statCheck), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tcode := r.FormValue(\"code\")\n\ttoken, err := googleOauthConfig.Exchange(oauth2.NoContext, code)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"Code exchange failed with %s\", err.Error()), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tresponse, err := http.Get(\"https://www.googleapis.com/oauth2/v2/userinfo?access_token=\" + token.AccessToken)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\n\tcontents, err := ioutil.ReadAll(response.Body)\n\tvar tempUser struct {\n\t\tSub string `json:\"sub\"`\n\t\tName string `json:\"name\"`\n\t\tGivenName string `json:\"given_name\"`\n\t\tFamilyName string `json:\"family_name\"`\n\t\tProfile string `json:\"profile\"`\n\t\tPicture string `json:\"picture\"`\n\t\tEmail string `json:\"email\"`\n\t\tEmailVerified string `json:\"email_verified\"`\n\t\tGender string `json:\"gender\"`\n\t}\n\terr = json.Unmarshal(contents, &tempUser)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tServerAUT = stringTools.RandomStringGN(20)\n\tpostQuery := \"/Register?thirdParty=true\" +\n\t\t\"&firstname=\" + tempUser.GivenName +\n\t\t\"&lastname=\" + tempUser.FamilyName +\n\t\t\"&email=\" + tempUser.Email +\n\t\t\"&from=google\" +\n\t\t\"&serverAUT=\" + ServerAUT\n\thttp.Redirect(w, r, postQuery, http.StatusSeeOther)\n\n}", "func (_BaseAccessWallet *BaseAccessWalletCaller) ExecStatusSendFail(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _BaseAccessWallet.contract.Call(opts, &out, \"execStatusSendFail\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}" ]
[ "0.5872548", "0.57046133", "0.55930907", "0.5435281", "0.5425485", "0.5421743", "0.53905284", "0.5348863", "0.5294707", "0.5120781", "0.50986767", "0.50361735", "0.5031683", "0.5016335", "0.50109285", "0.50042754", "0.49984872", "0.49728155", "0.48938978", "0.4870915", "0.48503366", "0.48293453", "0.48192516", "0.48020625", "0.47820804", "0.4779019", "0.47648787", "0.4723495", "0.4714914", "0.47118783", "0.4664978", "0.46449247", "0.46354493", "0.46188587", "0.46032536", "0.45736858", "0.45729664", "0.4569571", "0.4566886", "0.454791", "0.4537633", "0.4536705", "0.45318156", "0.45315266", "0.4526953", "0.4510278", "0.45035893", "0.45032278", "0.44949558", "0.44909647", "0.44846508", "0.44816357", "0.44777024", "0.44748056", "0.4463223", "0.44593558", "0.44479778", "0.44468", "0.44450915", "0.44349873", "0.44345522", "0.44211823", "0.44201154", "0.44134027", "0.44075552", "0.439416", "0.43861", "0.43816698", "0.4378683", "0.43594292", "0.4358839", "0.43583253", "0.43460286", "0.43440345", "0.43410996", "0.43375418", "0.43365204", "0.43305132", "0.4328412", "0.43189576", "0.43113798", "0.43076715", "0.43013686", "0.4294647", "0.42929104", "0.42908615", "0.42761394", "0.42750913", "0.42722523", "0.42646208", "0.42612806", "0.42545295", "0.4244191", "0.42392927", "0.4239093", "0.42382514", "0.42350093", "0.42344344", "0.4233511", "0.42321387" ]
0.6648428
0
Add indexes into MongoDB
func addIndexes() { var err error ufIndex1 := mgo.Index{ Key: []string{"codigo"}, Unique: true, Background: true, Sparse: true, } municipioIndex1 := mgo.Index{ Key: []string{"codigo"}, Unique: true, Background: true, Sparse: true, } // Add indexes into MongoDB session := Session.Copy() defer session.Close() ufCol := session.DB(commons.AppConfig.Database).C("ufs") municipioCol := session.DB(commons.AppConfig.Database).C("municipios") // cria indice codigo para UF err = ufCol.EnsureIndex(ufIndex1) if err != nil { log.Fatalf("[addIndexes]: %s\n", err) } log.Println("Indice para UF criado com sucesso") // cria indice codigo para Municipio err = municipioCol.EnsureIndex(municipioIndex1) if err != nil { log.Fatalf("[addIndexes]: %s\n", err) } log.Println("Indice para Municipio criado com sucesso") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func addIndexes() {\n\tvar err error\n\tuserIndex := mgo.Index{\n\t\tKey: []string{\"email\"},\n\t\tUnique: true,\n\t\tBackground: true,\n\t\tSparse: true,\n\t}\n\n\tauthIndex := mgo.Index{\n\t\tKey: []string{\"sender_id\"},\n\t\tUnique: true,\n\t\tBackground: true,\n\t\tSparse: true,\n\t}\n\n\t// Add indexes into MongoDB\n\tsession := GetSession().Copy()\n\tdefer session.Close()\n\tuserCol := session.DB(AppConfig.MongoDBName).C(\"users\")\n\tauthCol := session.DB(AppConfig.MongoDBName).C(\"auth\")\n\n\terr = userCol.EnsureIndex(userIndex)\n\tif err != nil {\n\t\tlog.Fatalf(\"[addIndexes]: %s\\n\", err)\n\t}\n\n\terr = authCol.EnsureIndex(authIndex)\n\tif err != nil {\n\t\tlog.Fatalf(\"[addIndexes]: %s\\n\", err)\n\t}\n\n}", "func addIndexes() {\n\tvar err error\n\tuserIndex := mgo.Index{\n\t\tKey: []string{\"email\"},\n\t\tUnique: true,\n\t\tBackground: true,\n\t\tSparse: true,\n\t}\n\t// Add indexes into MongoDB\n\tsession := GetSession().Copy()\n\tdefer session.Close()\n\tuserCol := session.DB(AppConfig.Database).C(\"users\")\n\n\terr = userCol.EnsureIndex(userIndex)\n\tif err != nil {\n\t\tlog.Fatalf(\"[addIndexes]: %s\\n\", err)\n\t}\n}", "func (api *Api) createIndexes() {\n\t// username and email will be unique.\n\tkeys := bsonx.Doc{\n\t\t{Key: \"username\", Value: bsonx.Int32(1)},\n\t\t{Key: \"email\", Value: bsonx.Int32(1)},\n\t}\n\tpeople := api.DB.Collection(\"people\")\n\tdb.SetIndexes(people, keys)\n}", "func (b *Backend) createMongoIndexes() error {\n\top, err := b.connect()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn op.Do(func() error {\n\t\tindexes := []mgo.Index{\n\t\t\t{\n\t\t\t\tKey: []string{\"state\"},\n\t\t\t\tBackground: true, // can be used while index is being built\n\t\t\t\tExpireAfter: time.Duration(b.GetConfig().ResultsExpireIn) * time.Second,\n\t\t\t},\n\t\t\t{\n\t\t\t\tKey: []string{\"lock\"},\n\t\t\t\tBackground: true, // can be used while index is being built\n\t\t\t\tExpireAfter: time.Duration(b.GetConfig().ResultsExpireIn) * time.Second,\n\t\t\t},\n\t\t}\n\n\t\tfor _, index := range indexes {\n\t\t\t// Check if index already exists, if it does, skip\n\t\t\tif err := op.tasksCollection.EnsureIndex(index); err == nil {\n\t\t\t\tlog.INFO.Printf(\"%s index already exist, skipping create step\", index.Key[0])\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Create index (keep in mind EnsureIndex is blocking operation)\n\t\t\tlog.INFO.Printf(\"Creating %s index\", index.Key[0])\n\t\t\tif err := op.tasksCollection.DropIndex(index.Key[0]); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := op.tasksCollection.EnsureIndex(index); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}", "func (m *Mongo) Index(gid string, background bool) error {\n\tm.Session.ResetIndexCache()\n\n\tsessionCopy := m.Session.Copy()\n\tdefer sessionCopy.Close()\n\tcol := sessionCopy.DB(m.DBName).C(gid)\n\n\tcInfo := &mgo.CollectionInfo{DisableIdIndex: true}\n\terr := col.Create(cInfo)\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\n\t/*\n\t\t// TODO figure out the magic of mongo indexes\n\t\tindex := mgo.Index{\n\t\t\tKey: []string{\"g\", \"s\", \"p\", \"o\"},\n\t\t\tBackground: false,\n\t\t\tSparse: true,\n\t\t\tUnique: true,\n\t\t\tDropDups: true,\n\t\t}\n\t\terr := col.EnsureIndex(index)\n\t\treturn err\n\t*/\n\n\tindex := mgo.Index{\n\t\tKey: []string{\"g\", \"s\"},\n\t\tBackground: background,\n\t\tSparse: true,\n\t}\n\terr = col.EnsureIndex(index)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\t//return err\n\t}\n\tlog.V(2).Infof(\"%+v\", index)\n\n\tindex.Key = []string{\"g\", \"o\"}\n\terr = col.EnsureIndex(index)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\t//return err\n\t}\n\tlog.V(2).Infof(\"%+v\", index)\n\n\tindex.Key = []string{\"g\", \"p\"}\n\terr = col.EnsureIndex(index)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\t//return err\n\t}\n\tlog.V(2).Infof(\"%+v\", index)\n\n\tindex.Key = []string{\"g\", \"s\", \"p\"}\n\terr = col.EnsureIndex(index)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\t//return err\n\t}\n\tlog.V(2).Infof(\"%+v\", index)\n\n\tindex.Key = []string{\"g\", \"s\", \"o\"}\n\terr = col.EnsureIndex(index)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\t//return err\n\t}\n\tlog.V(2).Infof(\"%+v\", index)\n\n\tindex.Key = []string{\"g\", \"p\", \"o\"}\n\terr = col.EnsureIndex(index)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\t//return err\n\t}\n\tlog.V(2).Infof(\"%+v\", index)\n\n\tindex.Key = []string{\"g\", \"s\", \"p\", \"o\"}\n\tindex.Unique = true\n\tindex.DropDups = true\n\terr = col.EnsureIndex(index)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\t//return err\n\t}\n\tlog.V(2).Infof(\"%+v\", index)\n\n\treturn nil\n}", "func ConfigureIndexes(mClient *mongo.Client) {\n\tcollection := getMarkdownCollection(mClient)\n\n\tindex := []mongo.IndexModel{\n\t\t{\n\t\t\tKeys: bsonx.Doc{\n\t\t\t\t{Key: \"title\", Value: bsonx.String(\"text\")},\n\t\t\t\t{Key: \"body\", Value: bsonx.String(\"text\")},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tKeys: bsonx.Doc{{Key: \"createDate\", Value: bsonx.Int32(1)}},\n\t\t},\n\t}\n\tname, err := collection.Indexes().CreateMany(context.TODO(), index)\n\tif err != nil {\n\t\tfmt.Printf(\"Error Creating Text Index: %s\", err)\n\t\treturn\n\t}\n\tfmt.Printf(\"Index Created: %s\\n\", name)\n}", "func SetIndexes(collection *mongo.Collection, keys bsonx.Doc) {\n\tindex := mongo.IndexModel{}\n\tindex.Keys = keys\n\tunique := true\n\tindex.Options = &options.IndexOptions{\n\t\tUnique: &unique,\n\t}\n\topts := options.CreateIndexes().SetMaxTime(10 * time.Second)\n\t_, err := collection.Indexes().CreateOne(context.Background(), index, opts)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error while creating indexs: %v\", err)\n\t}\n}", "func (db *MongoDb) Setup(indexes []mgo.Index) error {\n\t// Copy mongo session (thread safe) and close after function\n\tconn := db.Conn.Copy()\n\tdefer conn.Close()\n\n\t// Ensure indexes\n\tcol := conn.DB(db.Name).C(db.Collection)\n\n\tfor _, i := range indexes {\n\t\tif err := col.EnsureIndex(i); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func AddIndex(db MongoDB, m metrics.Metrics, col string, indexes ...mgo.Index) error {\n\tdefer m.CollectMetrics(\"DB.AddIndex\")\n\n\tif len(indexes) == 0 {\n\t\treturn nil\n\t}\n\n\tdatabase, session, err := db.New(false)\n\tif err != nil {\n\t\tm.Emit(metrics.Errorf(\"Failed to create session for index\"), metrics.With(\"collection\", col), metrics.With(\"error\", err.Error()))\n\t\treturn err\n\t}\n\n\tdefer session.Close()\n\n\tcollection := database.C(col)\n\n\tfor _, index := range indexes {\n\t\tif err := collection.EnsureIndex(index); err != nil {\n\t\t\tm.Emit(metrics.Errorf(\"Failed to ensure session index\"), metrics.With(\"collection\", col), metrics.With(\"index\", index), metrics.With(\"error\", err.Error()))\n\t\t\treturn err\n\t\t}\n\n\t\tm.Emit(metrics.Info(\"Succeeded in ensuring collection index\"), metrics.With(\"collection\", col), metrics.With(\"index\", index))\n\t}\n\n\tm.Emit(metrics.Info(\"Finished adding index\"), metrics.With(\"collection\", col))\n\treturn nil\n}", "func (b *Bucket) createIndexes(ctx context.Context) error {\n\t// must use primary read pref mode to check if files coll empty\n\tcloned, err := b.filesColl.Clone(options.Collection().SetReadPreference(readpref.Primary()))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdocRes := cloned.FindOne(ctx, bson.D{}, options.FindOne().SetProjection(bson.D{{\"_id\", 1}}))\n\n\t_, err = docRes.DecodeBytes()\n\tif err != mongo.ErrNoDocuments {\n\t\t// nil, or error that occurred during the FindOne operation\n\t\treturn err\n\t}\n\n\tfilesIv := b.filesColl.Indexes()\n\tchunksIv := b.chunksColl.Indexes()\n\n\tfilesModel := mongo.IndexModel{\n\t\tKeys: bson.D{\n\t\t\t{\"filename\", int32(1)},\n\t\t\t{\"uploadDate\", int32(1)},\n\t\t},\n\t}\n\n\tchunksModel := mongo.IndexModel{\n\t\tKeys: bson.D{\n\t\t\t{\"files_id\", int32(1)},\n\t\t\t{\"n\", int32(1)},\n\t\t},\n\t\tOptions: options.Index().SetUnique(true),\n\t}\n\n\tif err = createNumericalIndexIfNotExists(ctx, filesIv, filesModel); err != nil {\n\t\treturn err\n\t}\n\treturn createNumericalIndexIfNotExists(ctx, chunksIv, chunksModel)\n}", "func CreateIndexes(collections []models.Collection) {\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\n\tfor _, collection := range collections {\n\t\tmodels := []mongo.IndexModel{}\n\n\t\tfor _, field := range collection.Fields {\n\t\t\tmodels = append(models, mongo.IndexModel{\n\t\t\t\tKeys: bson.M{field.FieldName: 1},\n\t\t\t\tOptions: options.Index().SetUnique(field.Unique),\n\t\t\t})\n\t\t}\n\n\t\tdatabase := ConnectDB().Database(utils.Config.DatabaseName).Collection(collection.CollectionName)\n\t\t_, err := database.Indexes().CreateMany(ctx, models)\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t\treturn\n\t\t}\n\t}\n}", "func addUserIndexes(db *mgo.Session) error {\n\tsession := db.Copy()\n\tdefer session.Close()\n\tc := session.DB(\"\").C(userCollection)\n\ti := mgo.Index{\n\t\tKey: []string{\"phone\"},\n\t\tUnique: true,\n\t}\n\terr := c.EnsureIndex(i)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to add index: %v\", err)\n\t}\n\n\treturn nil\n}", "func TestBaseModel_EnsureIndexes(t *testing.T) {\n\tconfig := mongo.Configuration{\n\t\tURL: \"mongodb://localhost:27017/some-test-db\",\n\t\tUseSSL: false,\n\t\tSSLCert: []byte{},\n\t\tPingFrequency: 100,\n\t}\n\n\tdb, teardown := mongo.InitMongoFromConfig(config)\n\tdefer teardown()\n\n\t// initialize the collection..\n\tdb.C(\"some-collection\").Insert(&testDocument{})\n\n\tmodel := &BaseModel{\n\t\tDB: db,\n\t\tCollectionName: \"some-collection\",\n\t\tIndexes: []*mgo.Index{\n\t\t\t{\n\t\t\t\tUnique: true,\n\t\t\t\tName: \"test_1\",\n\t\t\t\tKey: []string{\"first_key\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tEnsureIndexes([]Model{model}, false)\n\n\tindexes, err := db.C(\"some-collection\").Indexes()\n\tassert.Nil(t, err)\n\tassert.Equal(t, []mgo.Index{\n\t\t{Key: []string{\"_id\"}, Name: \"_id_\"},\n\t\t{Key: []string{\"first_key\"}, Name: \"test_1\", Unique: true},\n\t}, indexes)\n}", "func (c *Collection) addIndex(schema *jsonschema.Schema, index Index, opts ...Option) error {\n\targs := &Options{}\n\tfor _, opt := range opts {\n\t\topt(args)\n\t}\n\n\t// Don't allow the default index to be overwritten\n\tif index.Path == idFieldName {\n\t\tif _, ok := c.indexes[idFieldName]; ok {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// Validate path and type.\n\tjt, err := getSchemaTypeAtPath(schema, index.Path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar valid bool\n\tfor _, t := range indexTypes {\n\t\tif jt.Type == t {\n\t\t\tvalid = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !valid {\n\t\treturn ErrNotIndexable\n\t}\n\n\t// Skip if nothing to do\n\tif x, ok := c.indexes[index.Path]; ok && index.Unique == x.Unique {\n\t\treturn nil\n\t}\n\n\t// Ensure collection does not contain multiple instances with the same value at path\n\tif index.Unique && index.Path != idFieldName {\n\t\tvals := make(map[interface{}]struct{})\n\t\tall, err := c.Find(&Query{}, WithTxnToken(args.Token))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, i := range all {\n\t\t\tres := gjson.GetBytes(i, index.Path)\n\t\t\tif !res.Exists() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, ok := vals[res.Value()]; ok {\n\t\t\t\treturn ErrCantCreateUniqueIndex\n\t\t\t} else {\n\t\t\t\tvals[res.Value()] = struct{}{}\n\t\t\t}\n\t\t}\n\t}\n\n\tc.indexes[index.Path] = index\n\treturn c.saveIndexes()\n}", "func init() {\n\tindexFields := []string{\"name\"}\n\tconfig.CreateHashIndexedCollection(CollectionName, indexFields)\n}", "func (c *MongoStoreClient) EnsureIndexes() error {\n\tmedtronicIndexDateTime, _ := time.Parse(medtronicDateFormat, medtronicIndexDate)\n\tdataIndexes := []mongo.IndexModel{\n\t\t{\n\t\t\tKeys: bson.D{{Key: \"_userId\", Value: 1}, {Key: \"deviceModel\", Value: 1}, {Key: \"fakefield\", Value: 1}},\n\t\t\tOptions: options.Index().\n\t\t\t\tSetName(\"GetLoopableMedtronicDirectUploadIdsAfter_v2_DateTime\").\n\t\t\t\tSetBackground(true).\n\t\t\t\tSetPartialFilterExpression(\n\t\t\t\t\tbson.D{\n\t\t\t\t\t\t{Key: \"_active\", Value: true},\n\t\t\t\t\t\t{Key: \"type\", Value: \"upload\"},\n\t\t\t\t\t\t{Key: \"deviceModel\", Value: bson.M{\n\t\t\t\t\t\t\t\"$exists\": true,\n\t\t\t\t\t\t}},\n\t\t\t\t\t\t{Key: \"time\", Value: bson.M{\n\t\t\t\t\t\t\t\"$gte\": medtronicIndexDateTime,\n\t\t\t\t\t\t}},\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t},\n\t\t{\n\t\t\tKeys: bson.D{{Key: \"_userId\", Value: 1}, {Key: \"origin.payload.device.manufacturer\", Value: 1}, {Key: \"fakefield\", Value: 1}},\n\t\t\tOptions: options.Index().\n\t\t\t\tSetName(\"HasMedtronicLoopDataAfter_v2_DateTime\").\n\t\t\t\tSetBackground(true).\n\t\t\t\tSetPartialFilterExpression(\n\t\t\t\t\tbson.D{\n\t\t\t\t\t\t{Key: \"_active\", Value: true},\n\t\t\t\t\t\t{Key: \"origin.payload.device.manufacturer\", Value: \"Medtronic\"},\n\t\t\t\t\t\t{Key: \"time\", Value: bson.M{\n\t\t\t\t\t\t\t\"$gte\": medtronicIndexDateTime,\n\t\t\t\t\t\t}},\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t},\n\t\t{\n\t\t\tKeys: bson.D{{Key: \"_userId\", Value: 1}, {Key: \"time\", Value: -1}, {Key: \"type\", Value: 1}},\n\t\t\tOptions: options.Index().\n\t\t\t\tSetName(\"UserIdTimeWeighted_v2\").\n\t\t\t\tSetBackground(true).\n\t\t\t\tSetPartialFilterExpression(\n\t\t\t\t\tbson.D{\n\t\t\t\t\t\t{Key: \"_active\", Value: true},\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t},\n\t}\n\n\tif _, err := dataCollection(c).Indexes().CreateMany(context.Background(), dataIndexes); err != nil {\n\t\tlog.Fatal(dataStoreAPIPrefix, fmt.Sprintf(\"Unable to create indexes: %s\", err))\n\t}\n\n\t// Not sure if all these indexes need to also be on the deviceDataSets collection.\n\tdataSetsIndexes := []mongo.IndexModel{\n\t\t{\n\t\t\tKeys: bson.D{{Key: \"_userId\", Value: 1}, {Key: \"deviceModel\", Value: 1}, {Key: \"fakefield\", Value: 1}},\n\t\t\tOptions: options.Index().\n\t\t\t\tSetName(\"GetLoopableMedtronicDirectUploadIdsAfter_v2_DateTime\").\n\t\t\t\tSetBackground(true).\n\t\t\t\tSetPartialFilterExpression(\n\t\t\t\t\tbson.D{\n\t\t\t\t\t\t{Key: \"_active\", Value: true},\n\t\t\t\t\t\t{Key: \"type\", Value: \"upload\"},\n\t\t\t\t\t\t{Key: \"deviceModel\", Value: bson.M{\n\t\t\t\t\t\t\t\"$exists\": true,\n\t\t\t\t\t\t}},\n\t\t\t\t\t\t{Key: \"time\", Value: bson.M{\n\t\t\t\t\t\t\t\"$gte\": medtronicIndexDateTime,\n\t\t\t\t\t\t}},\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t},\n\t\t{\n\t\t\tKeys: bson.D{{Key: \"_userId\", Value: 1}, {Key: \"origin.payload.device.manufacturer\", Value: 1}, {Key: \"fakefield\", Value: 1}},\n\t\t\tOptions: options.Index().\n\t\t\t\tSetName(\"HasMedtronicLoopDataAfter_v2_DateTime\").\n\t\t\t\tSetBackground(true).\n\t\t\t\tSetPartialFilterExpression(\n\t\t\t\t\tbson.D{\n\t\t\t\t\t\t{Key: \"_active\", Value: true},\n\t\t\t\t\t\t{Key: \"origin.payload.device.manufacturer\", Value: \"Medtronic\"},\n\t\t\t\t\t\t{Key: \"time\", Value: bson.M{\n\t\t\t\t\t\t\t\"$gte\": medtronicIndexDateTime,\n\t\t\t\t\t\t}},\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t},\n\t\t{\n\t\t\tKeys: bson.D{{Key: \"_userId\", Value: 1}, {Key: \"time\", Value: -1}, {Key: \"type\", Value: 1}},\n\t\t\tOptions: options.Index().\n\t\t\t\tSetName(\"UserIdTimeWeighted_v2\").\n\t\t\t\tSetBackground(true).\n\t\t\t\tSetPartialFilterExpression(\n\t\t\t\t\tbson.D{\n\t\t\t\t\t\t{Key: \"_active\", Value: true},\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t},\n\t}\n\n\tif _, err := dataSetsCollection(c).Indexes().CreateMany(context.Background(), dataSetsIndexes); err != nil {\n\t\tlog.Fatal(dataStoreAPIPrefix, fmt.Sprintf(\"Unable to create indexes: %s\", err))\n\t}\n\n\treturn nil\n}", "func (m *DataRepositoryMongo) CreateIndex(collectionName string, indexes map[string]interface{}) <-chan error {\n\tresult := make(chan error)\n\tgo func() {\n\n\t\tvar (\n\t\t\terr error\n\t\t\tcollection *mongo.Collection\n\t\t\tctx context.Context\n\t\t)\n\n\t\tcollection, err = m.Client.GetCollection(collectionName)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Get collection %s err (%s)! \\n\", collectionName, err.Error())\n\t\t\tresult <- err\n\t\t}\n\n\t\tctx, err = m.Client.GetContext()\n\t\tif err != nil {\n\t\t\tlog.Error(\"Get context err (%s)! \\n\", err.Error())\n\t\t\tresult <- err\n\t\t}\n\n\t\tvar indexList []mongo.IndexModel\n\n\t\tfor key, value := range indexes {\n\t\t\tindexOption := &options.IndexOptions{}\n\t\t\tindexOption = indexOption.SetBackground(true)\n\t\t\tindex := mongo.IndexModel{Keys: bson.M{key: value}, Options: indexOption}\n\t\t\tindexList = append(indexList, index)\n\t\t}\n\n\t\t_, err = collection.Indexes().CreateMany(ctx, indexList)\n\t\tresult <- err\n\t\tclose(result)\n\t}()\n\n\treturn result\n}", "func EnsureIndex(db *mongo.Database, collectionName string, keys bson.M, opt *options.IndexOptions) {\n\tvar keyIndex []string\n\tfor k := range keys {\n\t\tkeyIndex = append(keyIndex, k)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\n\tcollection := db.Collection(collectionName)\n\n\tindexes := collection.Indexes()\n\tcursor, err := indexes.List(ctx)\n\tif err != nil {\n\t\tlog.Panicf(\"index list error %v\", err)\n\t}\n\n\tif cursor != nil {\n\t\tfor cursor.Next(ctx) {\n\t\t\tvar index []primitive.E\n\t\t\terrCursor := cursor.Decode(&index)\n\t\t\tif errCursor != nil {\n\t\t\t\tlog.Panicf(\"index list error %v\", errCursor)\n\t\t\t}\n\n\t\t\t// skip creating index if key field already exist\n\t\t\tkeyIsExist := keyFieldIndexIsExist(index, keyIndex)\n\t\t\tif keyIsExist {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tmod := mongo.IndexModel{\n\t\t\tKeys: keys,\n\t\t\tOptions: opt,\n\t\t}\n\n\t\topts := options.CreateIndexes().SetMaxTime(5 * time.Second)\n\t\t_, err = collection.Indexes().CreateOne(ctx, mod, opts)\n\t\tif err != nil {\n\t\t\tlog.Panicf(\"ensure index error %v\", err)\n\t\t}\n\t}\n}", "func updateDBIndexes(mi *modelInfo) {\n\tadapter := adapters[db.DriverName()]\n\t// update column indexes\n\tfor colName, fi := range mi.fields.registryByJSON {\n\t\tif !fi.index {\n\t\t\tcontinue\n\t\t}\n\t\tif !adapter.indexExists(mi.tableName, fmt.Sprintf(\"%s_%s_index\", mi.tableName, colName)) {\n\t\t\tcreateColumnIndex(mi.tableName, colName)\n\t\t}\n\t}\n}", "func EnsureIndex(cd *mongo.Collection, indexQuery []string) error {\n\n\t// options for index\n\topts := options.CreateIndexes().SetMaxTime(5 * time.Second)\n\n\t// index model\n\tindex := []mongo.IndexModel{}\n\n\t// creating multiple index query\n\tfor _, val := range indexQuery {\n\t\ttemp := mongo.IndexModel{}\n\t\ttemp.Keys = bsonx.Doc{{Key: val, Value: bsonx.Int32(1)}}\n\t\tindex = append(index, temp)\n\t}\n\n\t// executng index query\n\t_, err := cd.Indexes().CreateMany(context.Background(), index, opts)\n\tif err != nil {\n\t\tfmt.Errorf(\"Error while executing index Query\", err.Error())\n\t\treturn err\n\t}\n\n\t// if executed successfully then return nil\n\treturn nil\n}", "func (c *Collection) indexAdd(tx ds.Txn, key ds.Key, data []byte) error {\n\tfor path, index := range c.indexes {\n\t\terr := c.indexUpdate(path, index, tx, key, data, false)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func ExampleMongoDBIndexes() {\n\ta := fixture\n\tconst x = \"myReplicaSet_1\"\n\ti, found := search.MongoDBIndexes(a.IndexConfigs, func(r *opsmngr.IndexConfig) bool {\n\t\treturn r.RSName == x\n\t})\n\n\tif i < len(a.IndexConfigs) && found {\n\t\tfmt.Printf(\"found %v at index %d\\n\", x, i)\n\t} else {\n\t\tfmt.Printf(\"%s not found\\n\", x)\n\t}\n\t// Output:\n\t// found myReplicaSet_1 at index 0\n}", "func (m *MongoDB) CreateIndex(name, key string, order int) (string, error) {\n\tcoll, ok := m.coll[name]\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"not defined collection %s\", name)\n\t}\n\n\tasscending := 1\n\tif order == -1 {\n\t\tasscending = -1\n\t}\n\n\tmodel := mongo.IndexModel{\n\t\tKeys: bson.D{{Key: key, Value: asscending}},\n\t\t//Options: options.Index().SetBackground(true),\n\t}\n\n\topts := options.CreateIndexes().SetMaxTime(2 * time.Second)\n\n\treturn coll.Indexes().CreateOne(m.ctx, model, opts)\n}", "func GetIndexesFromDB(client *mongo.Client, dbName string) string {\n\tvar err error\n\tvar cur *mongo.Cursor\n\tvar icur *mongo.Cursor\n\tvar scur *mongo.Cursor\n\tvar buffer bytes.Buffer\n\tvar ctx = context.Background()\n\t// var pipeline = mongo.Pipeline{{{Key: \"$indexStats\", Value: bson.M{}}}}\n\tvar pipeline = MongoPipeline(`{\"$indexStats\": {}}`)\n\tif cur, err = client.Database(dbName).ListCollections(ctx, bson.M{}); err != nil {\n\t\treturn err.Error()\n\t}\n\tdefer cur.Close(ctx)\n\n\tfor cur.Next(ctx) {\n\t\tvar elem = bson.M{}\n\t\tif err = cur.Decode(&elem); err != nil {\n\t\t\tfmt.Println(\"0.1\", err)\n\t\t\tcontinue\n\t\t}\n\t\tcoll := fmt.Sprintf(\"%v\", elem[\"name\"])\n\t\tcollType := fmt.Sprintf(\"%v\", elem[\"type\"])\n\t\tif strings.Index(coll, \"system.\") == 0 || (elem[\"type\"] != nil && collType != \"collection\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tbuffer.WriteString(\"\\n\")\n\t\tbuffer.WriteString(dbName)\n\t\tbuffer.WriteString(\".\")\n\t\tbuffer.WriteString(coll)\n\t\tbuffer.WriteString(\":\\n\")\n\n\t\tif scur, err = client.Database(dbName).Collection(coll).Aggregate(ctx, pipeline); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tvar indexStats = []bson.M{}\n\t\tfor scur.Next(ctx) {\n\t\t\tvar result = bson.M{}\n\t\t\tif err = scur.Decode(&result); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tindexStats = append(indexStats, result)\n\t\t}\n\t\tscur.Close(ctx)\n\t\tindexView := client.Database(dbName).Collection(coll).Indexes()\n\t\tif icur, err = indexView.List(ctx); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdefer icur.Close(ctx)\n\t\tvar list []IndexStatsDoc\n\n\t\tfor icur.Next(ctx) {\n\t\t\tvar idx = bson.D{}\n\t\t\tif err = icur.Decode(&idx); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar keys bson.D\n\t\t\tvar indexName string\n\t\t\tfor _, v := range idx {\n\t\t\t\tif v.Key == \"name\" {\n\t\t\t\t\tindexName = v.Value.(string)\n\t\t\t\t} else if v.Key == \"key\" {\n\t\t\t\t\tkeys = v.Value.(bson.D)\n\t\t\t\t}\n\t\t\t}\n\t\t\tvar strbuf bytes.Buffer\n\t\t\tfor n, value := range keys {\n\t\t\t\tif n == 0 {\n\t\t\t\t\tstrbuf.WriteString(\"{ \")\n\t\t\t\t}\n\t\t\t\tstrbuf.WriteString(value.Key + \": \" + fmt.Sprint(value.Value))\n\t\t\t\tif n == len(keys)-1 {\n\t\t\t\t\tstrbuf.WriteString(\" }\")\n\t\t\t\t} else {\n\t\t\t\t\tstrbuf.WriteString(\", \")\n\t\t\t\t}\n\t\t\t}\n\t\t\to := IndexStatsDoc{Key: strbuf.String()}\n\t\t\to.EffectiveKey = strings.Replace(o.Key[:len(o.Key)-2], \": -1\", \": 1\", -1)\n\t\t\to.Usage = []UsageDoc{}\n\t\t\tfor _, result := range indexStats {\n\t\t\t\tif result[\"name\"].(string) == indexName {\n\t\t\t\t\tdoc := result[\"accesses\"].(bson.M)\n\t\t\t\t\thost := result[\"host\"].(string)\n\t\t\t\t\tb, _ := bson.Marshal(doc)\n\t\t\t\t\tvar accesses UsageDoc\n\t\t\t\t\tbson.Unmarshal(b, &accesses)\n\t\t\t\t\taccesses.Hostname = host\n\t\t\t\t\to.Usage = append(o.Usage, accesses)\n\t\t\t\t}\n\t\t\t}\n\t\t\tlist = append(list, o)\n\t\t}\n\t\ticur.Close(ctx)\n\t\tsort.Slice(list, func(i, j int) bool { return (list[i].EffectiveKey <= list[j].EffectiveKey) })\n\t\tfor i, o := range list {\n\t\t\tfont := \"\\x1b[0m \"\n\t\t\tif o.Key != \"{ _id: 1 }\" {\n\t\t\t\tif i < len(list)-1 && strings.Index(list[i+1].EffectiveKey, o.EffectiveKey) == 0 {\n\t\t\t\t\tfont = \"\\x1b[31;1mx \" // red\n\t\t\t\t} else {\n\t\t\t\t\tsum := 0\n\t\t\t\t\tfor _, u := range o.Usage {\n\t\t\t\t\t\tsum += u.Ops\n\t\t\t\t\t}\n\t\t\t\t\tif sum == 0 {\n\t\t\t\t\t\tfont = \"\\x1b[34;1m? \" // blue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tbuffer.WriteString(font + o.Key + \"\\x1b[0m\")\n\t\t\tfor _, u := range o.Usage {\n\t\t\t\tbuffer.Write([]byte(\"\\n\\thost: \" + u.Hostname + \", ops: \" + fmt.Sprintf(\"%v\", u.Ops) + \", since: \" + fmt.Sprintf(\"%v\", u.Since)))\n\t\t\t}\n\t\t\tbuffer.WriteString(\"\\n\")\n\t\t}\n\t}\n\treturn buffer.String()\n}", "func (g *Graph) addIndexes(schema *load.Schema) {\n\ttyp, _ := g.typ(schema.Name)\n\tfor _, idx := range schema.Indexes {\n\t\tcheck(typ.AddIndex(idx), \"invalid index for schema %q\", schema.Name)\n\t}\n}", "func (indexStore *IndexStore) InitIndex() {\n\tdocs := indexStore.db.getAllDocs()\n\tuniqueIndexes := make(map[string]int)\n\ti := 0\n\tfor _, doc := range docs {\n\t\tif _, present := indexStore.store[doc.Index]; !present {\n\t\t\tindexStore.NewIndex(doc.Index)\n\t\t}\n\t\tindexStore.AddDocument(doc.Index, doc.Title, doc.Contents, doc.Id)\n\t\tuniqueIndexes[doc.Index] = 1\n\n\t\tif i%50 == 0 {\n\t\t\tfmt.Printf(\"%v documents indexed\\n\", i)\n\t\t}\n\t\ti++\n\t}\n\n\tfor index := range uniqueIndexes {\n\t\tindexStore.UpdateIndex(index)\n\t}\n}", "func Initialize() {\n\tsession := db.Connection.Session.Copy()\n\tdefer session.Close()\n\tc := session.DB(conf.MONGODB_DATABASE).C(\"Nodes\")\n\tc.EnsureIndex(mgo.Index{Key: []string{\"acl.owner\"}, Background: true})\n\tc.EnsureIndex(mgo.Index{Key: []string{\"acl.read\"}, Background: true})\n\tc.EnsureIndex(mgo.Index{Key: []string{\"acl.write\"}, Background: true})\n\tc.EnsureIndex(mgo.Index{Key: []string{\"acl.delete\"}, Background: true})\n\tc.EnsureIndex(mgo.Index{Key: []string{\"created_on\"}, Background: true})\n\tc.EnsureIndex(mgo.Index{Key: []string{\"expiration\"}, Background: true})\n\tc.EnsureIndex(mgo.Index{Key: []string{\"type\"}, Background: true})\n\tc.EnsureIndex(mgo.Index{Key: []string{\"priority\"}, Background: true})\n\tc.EnsureIndex(mgo.Index{Key: []string{\"file.path\"}, Background: true})\n\tc.EnsureIndex(mgo.Index{Key: []string{\"file.virtual_parts\"}, Background: true})\n\tc.EnsureIndex(mgo.Index{Key: []string{\"file.checksum.md5\"}, Background: true})\n\tc.EnsureIndex(mgo.Index{Key: []string{\"id\"}, Unique: true})\n\tif conf.MONGODB_ATTRIBUTE_INDEXES != \"\" {\n\t\tfor _, v := range strings.Split(conf.MONGODB_ATTRIBUTE_INDEXES, \",\") {\n\t\t\tv = \"attributes.\" + strings.TrimSpace(v)\n\t\t\tc.EnsureIndex(mgo.Index{Key: []string{v}, Background: true})\n\t\t}\n\t}\n}", "func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts ...*options.CreateIndexesOptions) ([]string, error) {\n\tnames := make([]string, 0, len(models))\n\n\tvar indexes bsoncore.Document\n\taidx, indexes := bsoncore.AppendArrayStart(indexes)\n\n\tfor i, model := range models {\n\t\tif model.Keys == nil {\n\t\t\treturn nil, fmt.Errorf(\"index model keys cannot be nil\")\n\t\t}\n\n\t\tif isUnorderedMap(model.Keys) {\n\t\t\treturn nil, ErrMapForOrderedArgument{\"keys\"}\n\t\t}\n\n\t\tkeys, err := marshal(model.Keys, iv.coll.bsonOpts, iv.coll.registry)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tname, err := getOrGenerateIndexName(keys, model)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tnames = append(names, name)\n\n\t\tvar iidx int32\n\t\tiidx, indexes = bsoncore.AppendDocumentElementStart(indexes, strconv.Itoa(i))\n\t\tindexes = bsoncore.AppendDocumentElement(indexes, \"key\", keys)\n\n\t\tif model.Options == nil {\n\t\t\tmodel.Options = options.Index()\n\t\t}\n\t\tmodel.Options.SetName(name)\n\n\t\toptsDoc, err := iv.createOptionsDoc(model.Options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tindexes = bsoncore.AppendDocument(indexes, optsDoc)\n\n\t\tindexes, err = bsoncore.AppendDocumentEnd(indexes, iidx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tindexes, err := bsoncore.AppendArrayEnd(indexes, aidx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsess := sessionFromContext(ctx)\n\n\tif sess == nil && iv.coll.client.sessionPool != nil {\n\t\tsess = session.NewImplicitClientSession(iv.coll.client.sessionPool, iv.coll.client.id)\n\t\tdefer sess.EndSession()\n\t}\n\n\terr = iv.coll.client.validSession(sess)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\twc := iv.coll.writeConcern\n\tif sess.TransactionRunning() {\n\t\twc = nil\n\t}\n\tif !writeconcern.AckWrite(wc) {\n\t\tsess = nil\n\t}\n\n\tselector := makePinnedSelector(sess, iv.coll.writeSelector)\n\n\toption := options.MergeCreateIndexesOptions(opts...)\n\n\top := operation.NewCreateIndexes(indexes).\n\t\tSession(sess).WriteConcern(wc).ClusterClock(iv.coll.client.clock).\n\t\tDatabase(iv.coll.db.name).Collection(iv.coll.name).CommandMonitor(iv.coll.client.monitor).\n\t\tDeployment(iv.coll.client.deployment).ServerSelector(selector).ServerAPI(iv.coll.client.serverAPI).\n\t\tTimeout(iv.coll.client.timeout).MaxTime(option.MaxTime)\n\tif option.CommitQuorum != nil {\n\t\tcommitQuorum, err := marshalValue(option.CommitQuorum, iv.coll.bsonOpts, iv.coll.registry)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\top.CommitQuorum(commitQuorum)\n\t}\n\n\terr = op.Execute(ctx)\n\tif err != nil {\n\t\t_, err = processWriteError(err)\n\t\treturn nil, err\n\t}\n\n\treturn names, nil\n}", "func (i *Index) Index(docs []index.Document, opts interface{}) error {\n blk := i.conn.Bulk()\n\tfor _, doc := range docs {\n //fmt.Println(\"indexing \", doc.Id)\n\t\treq := elastic.NewBulkIndexRequest().Index(i.name).Type(\"doc\").Id(doc.Id).Doc(doc.Properties)\n\t\tblk.Add(req)\n\t\t/*_, err := i.conn.Index().Index(i.name).Type(\"doc\").Id(doc.Id).BodyJson(doc.Properties).Do()\n if err != nil {\n // Handle error\n panic(err)\n }*/\n //fmt.Printf(\"Indexed tweet %s to index %s, type %s\\n\", put2.Id, put2.Index, put2.Type)\n\t}\n\t//_, err := blk.Refresh(true).Do()\n\t_, err := blk.Refresh(false).Do()\n if err != nil {\n panic(err)\n fmt.Println(\"Get Error during indexing\", err)\n }\n\treturn err\n\t//return nil\n}", "func createIndexes(ts *Schema, ti *Info, idxs []schema.Index, store *stor.Stor) {\n\tif len(idxs) == 0 {\n\t\treturn\n\t}\n\tts.Indexes = slices.Clip(ts.Indexes) // copy on write\n\tnold := len(ts.Indexes)\n\tfor i := range idxs {\n\t\tix := &idxs[i]\n\t\tif ts.FindIndex(ix.Columns) != nil {\n\t\t\tpanic(\"duplicate index: \" +\n\t\t\t\tstr.Join(\"(,)\", ix.Columns) + \" in \" + ts.Table)\n\t\t}\n\t\tts.Indexes = append(ts.Indexes, *ix)\n\t}\n\tidxs = ts.SetupNewIndexes(nold)\n\tn := len(ti.Indexes)\n\tti.Indexes = slices.Clip(ti.Indexes) // copy on write\n\tfor i := range idxs {\n\t\tbt := btree.CreateBtree(store, &ts.Indexes[n+i].Ixspec)\n\t\tti.Indexes = append(ti.Indexes, index.OverlayFor(bt))\n\t}\n}", "func EnsureIndex(ctx context.Context, c *mongo.Collection, keys []bson.E, unique bool) error {\n\tks := bson.D{}\n\tindexNames := []string{}\n\tfor _, k := range keys {\n\t\tindexNames = append(indexNames, fmt.Sprintf(\"%v_%v\", k.Key, k.Value))\n\t\tks = append(ks, k)\n\t}\n\tidxoptions := &options.IndexOptions{}\n\tidxoptions.SetBackground(true)\n\tidxoptions.SetUnique(unique)\n\tidm := mongo.IndexModel{\n\t\tKeys: ks,\n\t\tOptions: idxoptions,\n\t}\n\n\tidxs := c.Indexes()\n\tcur, err := idxs.List(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tindexName := strings.Join(indexNames, \"_\")\n\tfound := false\n\tfor cur.Next(ctx) {\n\t\td := bson.D{}\n\t\tcur.Decode(&d)\n\n\t\tfor _, v := range d {\n\t\t\tif v.Key == \"name\" && v.Value == indexName {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif found {\n\t\treturn nil\n\t}\n\n\t_, err = idxs.CreateOne(ctx, idm)\n\tif err != nil {\n\t\tfmt.Printf(\"create index error, name: %v, err: %v\", indexName, err)\n\t}\n\n\treturn err\n}", "func (repo *mongoBaseRepo) CreateIndexes(indexes interface{}, args ...interface{}) ([]string, error) {\n\ttimeout := DefaultTimeout\n\topts := &options.CreateIndexesOptions{}\n\n\tfor i := 0; i < len(args); i++ {\n\t\tswitch val := args[i].(type) {\n\t\tcase time.Duration:\n\t\t\ttimeout = val\n\t\tcase *options.CreateIndexesOptions:\n\t\t\topts = val\n\t\t}\n\t}\n\n\t// Convert indexModels\n\tindexModels, ok := indexes.([]mongo.IndexModel)\n\tif !ok {\n\t\treturn []string{}, ErrIndexConvert\n\t}\n\n\t// create indexes\n\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\tdefer cancel()\n\n\treturn repo.collection.Indexes().CreateMany(ctx, indexModels, opts)\n}", "func (i *Index) Index(docs []index.Document, options interface{}) error {\n\n\tvar opts IndexingOptions\n\thasOpts := false\n\tif options != nil {\n\t\tif opts, hasOpts = options.(IndexingOptions); !hasOpts {\n\t\t\treturn errors.New(\"invalid indexing options\")\n\t\t}\n\t}\n\n\tconn := i.getConn()\n\tdefer conn.Close()\n\n\tn := 0\n\n\tfor _, doc := range docs {\n\t\targs := make(redis.Args, 0, len(i.md.Fields)*2+4)\n\t\targs = append(args, i.name, doc.Id, doc.Score)\n\t\t// apply options\n\t\tif hasOpts {\n\t\t\tif opts.NoSave {\n\t\t\t\targs = append(args, \"NOSAVE\")\n\t\t\t}\n\t\t\tif opts.Language != \"\" {\n\t\t\t\targs = append(args, \"LANGUAGE\", opts.Language)\n\t\t\t}\n\t\t}\n\n\t\targs = append(args, \"FIELDS\")\n\n\t\tfor k, f := range doc.Properties {\n\t\t\targs = append(args, k, f)\n\t\t}\n\n\t\tif err := conn.Send(i.commandPrefix+\".ADD\", args...); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tn++\n\t}\n\n\tif err := conn.Flush(); err != nil {\n\t\treturn err\n\t}\n\n\tfor n > 0 {\n\t\tif _, err := conn.Receive(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tn--\n\t}\n\n\treturn nil\n}", "func addAllFieldIndexes(ctx context.Context, indexer client.FieldIndexer) error {\n\tif err := indexer.IndexField(ctx, &gardencorev1beta1.Project{}, gardencore.ProjectNamespace, func(obj client.Object) []string {\n\t\tproject, ok := obj.(*gardencorev1beta1.Project)\n\t\tif !ok {\n\t\t\treturn []string{\"\"}\n\t\t}\n\t\tif project.Spec.Namespace == nil {\n\t\t\treturn []string{\"\"}\n\t\t}\n\t\treturn []string{*project.Spec.Namespace}\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"failed to add indexer to Project Informer: %w\", err)\n\t}\n\n\tif err := indexer.IndexField(ctx, &gardencorev1beta1.Shoot{}, gardencore.ShootSeedName, func(obj client.Object) []string {\n\t\tshoot, ok := obj.(*gardencorev1beta1.Shoot)\n\t\tif !ok {\n\t\t\treturn []string{\"\"}\n\t\t}\n\t\tif shoot.Spec.SeedName == nil {\n\t\t\treturn []string{\"\"}\n\t\t}\n\t\treturn []string{*shoot.Spec.SeedName}\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"failed to add indexer to Shoot Informer: %w\", err)\n\t}\n\n\tif err := indexer.IndexField(ctx, &seedmanagementv1alpha1.ManagedSeed{}, seedmanagement.ManagedSeedShootName, func(obj client.Object) []string {\n\t\tms, ok := obj.(*seedmanagementv1alpha1.ManagedSeed)\n\t\tif !ok {\n\t\t\treturn []string{\"\"}\n\t\t}\n\t\tif ms.Spec.Shoot == nil {\n\t\t\treturn []string{\"\"}\n\t\t}\n\t\treturn []string{ms.Spec.Shoot.Name}\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"failed to add indexer to ManagedSeed Informer: %w\", err)\n\t}\n\n\treturn nil\n}", "func createIndexes(db *sql.DB, table string) error {\n\tindexes := []string{}\n\n\tswitch table {\n\tcase \"dfp\":\n\t\tindexes = []string{\n\t\t\t\"CREATE INDEX IF NOT EXISTS dfp_metrics ON dfp (CODE, ID_CIA, YEAR, VL_CONTA);\",\n\t\t\t\"CREATE INDEX IF NOT EXISTS dfp_year_ver ON dfp (ID_CIA, YEAR, VERSAO);\",\n\t\t}\n\tcase \"itr\":\n\t\tindexes = []string{\n\t\t\t\"CREATE INDEX IF NOT EXISTS itr_metrics ON itr (CODE, ID_CIA, YEAR, VL_CONTA);\",\n\t\t\t\"CREATE INDEX IF NOT EXISTS itr_quarter_ver ON itr (ID_CIA, DT_FIM_EXERC, VERSAO);\",\n\t\t}\n\tcase \"stock_quotes\":\n\t\tindexes = []string{\n\t\t\t\"CREATE UNIQUE INDEX IF NOT EXISTS stock_quotes_stockdate ON stock_quotes (stock, date);\",\n\t\t}\n\tcase \"fii_dividends\":\n\t\tindexes = []string{\n\t\t\t\"CREATE UNIQUE INDEX IF NOT EXISTS fii_dividends_pk ON fii_dividends (trading_code, base_date);\",\n\t\t}\n\t}\n\n\tfor _, idx := range indexes {\n\t\t_, err := db.Exec(idx)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"erro ao criar índice\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (st *Schema) addCreateIndex(ci sql.CreateIndexStmt) {\n\tst.Indexes = append(st.Indexes, SchemaIndex{\n\t\tIndex: ci.Index,\n\t\tColumns: st.toIndexColumns(ci.IndexedColumns),\n\t})\n}", "func createIndex(collection *mongo.Collection, field string, unique bool) bool {\n\tmod := mongo.IndexModel{\n\t\tKeys: bson.M{field: 1}, // index in ascending order or -1 for descending order\n\t\tOptions: options.Index().SetUnique(unique),\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\n\t_, err := collection.Indexes().CreateOne(ctx, mod)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (db *MongoDbBridge) updateIndexes(col *mongo.Collection, list []mongo.IndexModel, sig chan bool) error {\n\tview := col.Indexes()\n\n\t// load list of all indexes known\n\tknown, err := view.ListSpecifications(context.Background())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// loop prescriptions and make sure each index exists by name\n\tfor _, ix := range list {\n\t\t// respect possible terminate signal\n\t\tselect {\n\t\tcase <-sig:\n\t\t\tsig <- true\n\t\t\treturn nil\n\t\tdefault:\n\t\t}\n\n\t\t// create missing index\n\t\terr = db.createIndexIfNotExists(col, &view, ix, known)\n\t\tif err != nil {\n\t\t\tdb.log.Errorf(err.Error())\n\t\t}\n\t}\n\n\t// loop indexes and remove indexes missing in the prescriptions\n\tfor _, spec := range known {\n\t\terr = db.removeIndexIfShouldNotExists(col, &view, spec, list)\n\t\tif err != nil {\n\t\t\tdb.log.Errorf(err.Error())\n\t\t}\n\t}\n\n\treturn nil\n}", "func InitDB(db *mgo.Database) {\n\tfor i := range workIndexes {\n\t\terr := db.C(workIndexes[i].Name).EnsureIndex(workIndexes[i].Index)\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t}\n\t}\n}", "func (s *Server) getIndexes(w http.ResponseWriter, r *http.Request) {\n\tfs, err := s.db.List(\"file\")\n\tif err != nil {\n\t\ts.logf(\"error listing files from mpd for building indexes: %v\", err)\n\t\twriteXML(w, errGeneric)\n\t\treturn\n\t}\n\tfiles := indexFiles(fs)\n\n\twriteXML(w, func(c *container) {\n\t\tc.Indexes = &indexesContainer{\n\t\t\tLastModified: time.Now().Unix(),\n\t\t}\n\n\t\t// Incremented whenever it's time to create a new index for a new\n\t\t// initial letter\n\t\tidx := -1\n\n\t\tvar indexes []index\n\n\t\t// A set of initial characters, used to deduplicate the addition of\n\t\t// nwe indexes\n\t\tseenChars := make(map[rune]struct{}, 0)\n\n\t\tfor _, f := range files {\n\t\t\t// Filter any non-top level items\n\t\t\tif strings.Contains(f.Name, string(os.PathSeparator)) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Initial rune is used to create an index name\n\t\t\tc, _ := utf8.DecodeRuneInString(f.Name)\n\t\t\tname := string(c)\n\n\t\t\t// If initial rune is a digit, put index under a numeric section\n\t\t\tif unicode.IsDigit(c) {\n\t\t\t\tc = '#'\n\t\t\t\tname = \"#\"\n\t\t\t}\n\n\t\t\t// If a new rune appears, create a new index for it\n\t\t\tif _, ok := seenChars[c]; !ok {\n\t\t\t\tseenChars[c] = struct{}{}\n\t\t\t\tindexes = append(indexes, index{Name: name})\n\t\t\t\tidx++\n\t\t\t}\n\n\t\t\tindexes[idx].Artists = append(indexes[idx].Artists, artist{\n\t\t\t\tName: f.Name,\n\t\t\t\tID: strconv.Itoa(f.ID),\n\t\t\t})\n\t\t}\n\n\t\tc.Indexes.Indexes = indexes\n\t})\n}", "func indexHandler(s *search.SearchServer, w http.ResponseWriter, r *http.Request) {\n\tparams := r.URL.Query()\n\tcollection := params.Get(\"collection\")\n\n\tif collection == \"\" {\n\t\trespondWithError(w, r, \"Collection query parameter is required\")\n\t\treturn\n\t}\n\n\tif !s.Exists(collection) {\n\t\trespondWithError(w, r, \"Collection does not exist\")\n\t\treturn\n\t}\n\n\tbytes, err := ioutil.ReadAll(r.Body)\n\n\tif err != nil {\n\t\trespondWithError(w, r, \"Error reading body\")\n\t\treturn\n\t}\n\n\tif len(bytes) == 0 {\n\t\trespondWithError(w, r, \"Error document missing\")\n\t\treturn\n\t}\n\n\tvar doc document\n\terr = json.Unmarshal(bytes, &doc)\n\tif err != nil {\n\t\trespondWithError(w, r, \"Error parsing document JSON\")\n\t\treturn\n\t}\n\n\tif len(doc.Id) == 0 {\n\t\trespondWithError(w, r, fmt.Sprintf(\"Error document id is required, not found in: %v\", string(bytes)))\n\t\treturn\n\t}\n\n\tif len(doc.Fields) == 0 {\n\t\trespondWithError(w, r, \"Error document is missing fields\")\n\t\treturn\n\t}\n\n\td := search.NewDocument()\n\td.Id = doc.Id\n\tfor k, v := range doc.Fields {\n\t\td.Fields[k] = &search.Field{Value: v}\n\t}\n\n\ts.Index(collection, d)\n\trespondWithSuccess(w, r, \"Success, document indexed\")\n}", "func (db *MongoDB) Init() {\n\tindex := mgo.Index{\n\t\tKey: []string{\"date\"},\n\t\tUnique: true,\n\t\tDropDups: true,\n\t\tBackground: true,\n\t\tSparse: true,\n\t}\n\n\terr := Session.DB(db.DatabaseName).C(db.ExchangeCollectionName).EnsureIndex(index)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (m *MongoClient) initAllCollection() {\n\tm.UserCollection = m.Database.Collection(Collections[UserCollection])\n\tm.ProjectCollection = m.Database.Collection(Collections[ProjectCollection])\n\n\t// Initialize chaos infra collection\n\terr := m.Database.CreateCollection(context.TODO(), Collections[ChaosInfraCollection], nil)\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"failed to create chaosInfrastructures collection\")\n\t}\n\n\tm.ChaosInfraCollection = m.Database.Collection(Collections[ChaosInfraCollection])\n\t_, err = m.ChaosInfraCollection.Indexes().CreateMany(backgroundContext, []mongo.IndexModel{\n\t\t{\n\t\t\tKeys: bson.M{\n\t\t\t\t\"infra_id\": 1,\n\t\t\t},\n\t\t\tOptions: options.Index().SetUnique(true),\n\t\t},\n\t\t{\n\t\t\tKeys: bson.M{\n\t\t\t\t\"name\": 1,\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"failed to create indexes for chaosInfrastructures collection\")\n\t}\n\n\t// Initialize chaos experiment collection\n\terr = m.Database.CreateCollection(context.TODO(), Collections[ChaosExperimentCollection], nil)\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"failed to create chaosExperiments collection\")\n\t}\n\n\tm.ChaosExperimentCollection = m.Database.Collection(Collections[ChaosExperimentCollection])\n\t_, err = m.ChaosExperimentCollection.Indexes().CreateMany(backgroundContext, []mongo.IndexModel{\n\t\t{\n\t\t\tKeys: bson.M{\n\t\t\t\t\"experiment_id\": 1,\n\t\t\t},\n\t\t\tOptions: options.Index().SetUnique(true),\n\t\t},\n\t\t{\n\t\t\tKeys: bson.M{\n\t\t\t\t\"name\": 1,\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"failed to create indexes for chaosExperiments collection\")\n\t}\n\n\t// Initialize chaos experiment runs collection\n\terr = m.Database.CreateCollection(context.TODO(), Collections[ChaosExperimentRunsCollection], nil)\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"failed to create chaosExperimentRuns collection\")\n\t}\n\n\tm.ChaosExperimentRunsCollection = m.Database.Collection(Collections[ChaosExperimentRunsCollection])\n\t_, err = m.ChaosExperimentRunsCollection.Indexes().CreateMany(backgroundContext, []mongo.IndexModel{\n\t\t{\n\t\t\tKeys: bson.M{\n\t\t\t\t\"experiment_run_id\": 1,\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"failed to create indexes for chaosExperimentRuns collection\")\n\t}\n\n\t// Initialize chaos hubs collection\n\terr = m.Database.CreateCollection(context.TODO(), Collections[ChaosHubCollection], nil)\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"failed to create chaosHubs collection\")\n\t}\n\n\tm.ChaosHubCollection = m.Database.Collection(Collections[ChaosHubCollection])\n\t_, err = m.ChaosHubCollection.Indexes().CreateMany(backgroundContext, []mongo.IndexModel{\n\t\t{\n\t\t\tKeys: bson.M{\n\t\t\t\t\"hub_id\": 1,\n\t\t\t},\n\t\t\tOptions: options.Index().SetUnique(true),\n\t\t},\n\t\t{\n\t\t\tKeys: bson.M{\n\t\t\t\t\"name\": 1,\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"failed to create indexes for chaosHubs collection\")\n\t}\n\n\tm.GitOpsCollection = m.Database.Collection(Collections[GitOpsCollection])\n\t_, err = m.GitOpsCollection.Indexes().CreateMany(backgroundContext, []mongo.IndexModel{\n\t\t{\n\t\t\tKeys: bson.M{\n\t\t\t\t\"project_id\": 1,\n\t\t\t},\n\t\t\tOptions: options.Index().SetUnique(true),\n\t\t},\n\t})\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"Error Creating Index for GitOps Collection : \", err)\n\t}\n\tm.ImageRegistryCollection = m.Database.Collection(Collections[ImageRegistryCollection])\n\tm.ServerConfigCollection = m.Database.Collection(Collections[ServerConfigCollection])\n\t_, err = m.ServerConfigCollection.Indexes().CreateMany(backgroundContext, []mongo.IndexModel{\n\t\t{\n\t\t\tKeys: bson.M{\n\t\t\t\t\"key\": 1,\n\t\t\t},\n\t\t\tOptions: options.Index().SetUnique(true),\n\t\t},\n\t})\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"Error Creating Index for Server Config Collection : \", err)\n\t}\n\tm.EnvironmentCollection = m.Database.Collection(Collections[EnvironmentCollection])\n\t_, err = m.EnvironmentCollection.Indexes().CreateMany(backgroundContext, []mongo.IndexModel{\n\t\t{\n\t\t\tKeys: bson.M{\n\t\t\t\t\"environment_id\": 1,\n\t\t\t},\n\t\t\tOptions: options.Index().SetUnique(true),\n\t\t},\n\t\t{\n\t\t\tKeys: bson.M{\n\t\t\t\t\"name\": 1,\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"failed to create indexes for environments collection\")\n\t}\n\t// Initialize chaos probes collection\n\terr = m.Database.CreateCollection(context.TODO(), Collections[ChaosProbeCollection], nil)\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"failed to create chaosProbes collection\")\n\t}\n\n\tm.ChaosProbeCollection = m.Database.Collection(Collections[ChaosProbeCollection])\n\t_, err = m.ChaosProbeCollection.Indexes().CreateMany(backgroundContext, []mongo.IndexModel{\n\t\t{\n\t\t\tKeys: bson.M{\n\t\t\t\t\"name\": 1,\n\t\t\t},\n\t\t\tOptions: options.Index().SetUnique(true),\n\t\t},\n\t\t{\n\t\t\tKeys: bson.D{\n\t\t\t\t{\"project_id\", 1},\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"failed to create indexes for chaosProbes collection\")\n\t}\n}", "func (st *Schema) addIndex(pk bool, name string, cols []IndexColumn) bool {\n\tif reflect.DeepEqual(st.PK, cols) {\n\t\treturn false\n\t}\n\tfor _, ind := range st.Indexes {\n\t\tif reflect.DeepEqual(ind.Columns, cols) {\n\t\t\tif pk {\n\t\t\t\tst.PrimaryKey = ind.Index\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\tst.Indexes = append(st.Indexes, SchemaIndex{\n\t\tIndex: name,\n\t\tColumns: cols,\n\t})\n\tif pk {\n\t\tst.PrimaryKey = name\n\t}\n\treturn true\n}", "func (c *Collection) saveIndexes() error {\n\tib, err := json.Marshal(c.indexes)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.db.datastore.Put(dsIndexes.ChildString(c.name), ib)\n}", "func addStoringIndex(ctx context.Context, w io.Writer, adminClient *database.DatabaseAdminClient, database string) error {\n\top, err := adminClient.UpdateDatabaseDdl(ctx, &adminpb.UpdateDatabaseDdlRequest{\n\t\tDatabase: database,\n\t\tStatements: []string{\n\t\t\t\"CREATE INDEX AlbumsByAlbumTitle2 ON Albums(AlbumTitle) STORING (MarketingBudget)\",\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := op.Wait(ctx); err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintf(w, \"Added storing index\\n\")\n\treturn nil\n}", "func (b *Bucket) EnsureIndexes(ctx context.Context, force bool) error {\n\t// acquire mutex\n\tb.indexMutex.Lock()\n\tdefer b.indexMutex.Unlock()\n\n\t// return if indexes have been ensured\n\tif b.indexEnsured {\n\t\treturn nil\n\t}\n\n\t// clone collection with primary read preference\n\tfiles, err := b.files.Clone(options.Collection().SetReadPreference(readpref.Primary()))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// unless force is specified, skip index ensuring if files exists already\n\tif !force {\n\t\terr = files.FindOne(ctx, bson.M{}).Err()\n\t\tif err != nil && err != ErrNoDocuments {\n\t\t\treturn err\n\t\t} else if err == nil {\n\t\t\tb.indexEnsured = true\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// prepare files index\n\tfilesIndex := mongo.IndexModel{\n\t\tKeys: bson.D{\n\t\t\t{Key: \"filename\", Value: 1},\n\t\t\t{Key: \"uploadDate\", Value: 1},\n\t\t},\n\t}\n\n\t// prepare chunks index\n\tchunksIndex := mongo.IndexModel{\n\t\tKeys: bson.D{\n\t\t\t{Key: \"files_id\", Value: 1},\n\t\t\t{Key: \"n\", Value: 1},\n\t\t},\n\t\tOptions: options.Index().SetUnique(true),\n\t}\n\n\t// prepare markers index\n\tmarkersIndex := mongo.IndexModel{\n\t\tKeys: bson.D{\n\t\t\t{Key: \"files_id\", Value: 1},\n\t\t},\n\t\tOptions: options.Index().SetUnique(true),\n\t}\n\n\t// check files index existence\n\thasFilesIndex, err := b.hasIndex(ctx, b.files, filesIndex)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// check chunks index existence\n\thasChunksIndex, err := b.hasIndex(ctx, b.chunks, chunksIndex)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// check markers index existence\n\thasMarkersIndex, err := b.hasIndex(ctx, b.markers, markersIndex)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create files index if missing\n\tif !hasFilesIndex {\n\t\t_, err = b.files.Indexes().CreateOne(ctx, filesIndex)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// create chunks index if missing\n\tif !hasChunksIndex {\n\t\t_, err = b.chunks.Indexes().CreateOne(ctx, chunksIndex)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// create markers index if missing\n\tif !hasMarkersIndex {\n\t\t_, err = b.markers.Indexes().CreateOne(ctx, markersIndex)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// set flag\n\tb.indexEnsured = true\n\n\treturn nil\n}", "func (db *Database) createTimestampIndex() {\n\tindexView := db.database.Collection(TRACKS.String()).Indexes()\n\n\tindexModel := mongo.IndexModel{\n\t\tKeys: bson.NewDocument(bson.EC.Int32(\"ts\", -1))}\n\n\t_, err := indexView.CreateOne(context.Background(), indexModel, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func CreateAllIndexes() error {\n\terr := questionAnswerDAO.CreateIndexes()\n\n\treturn err\n}", "func updateIndex(indexName string, objects []algoliasearch.Object) error {\n\n\tindex := algoliaClient.InitIndex(indexName)\n\terr := populateIndex(index, objects)\n\tif err != nil {\n\t\treturn errors.New(\"Error updating index -\" + err.Error())\n\t}\n\n\treturn nil\n}", "func (mdbc *MongoDbController) initUserCollection(dbName string) error {\n\tdb := mdbc.MongoClient.Database(dbName)\n\n\tjsonSchema := bson.M{\n\t\t\"bsonType\": \"object\",\n\t\t\"required\": []string{\"username\", \"passwordHash\", \"email\", \"enabled\", \"admin\"},\n\t\t\"properties\": bson.M{\n\t\t\t\"username\": bson.M{\n\t\t\t\t\"bsonType\": \"string\",\n\t\t\t\t\"description\": \"username is required and must be a string\",\n\t\t\t},\n\t\t\t\"passwordHash\": bson.M{\n\t\t\t\t\"bsonType\": \"string\",\n\t\t\t\t\"description\": \"passwordHash is required and must be a string\",\n\t\t\t},\n\t\t\t\"email\": bson.M{\n\t\t\t\t\"bsonType\": \"string\",\n\t\t\t\t\"description\": \"email is required and must be a string\",\n\t\t\t},\n\t\t\t\"enabled\": bson.M{\n\t\t\t\t\"bsonType\": \"bool\",\n\t\t\t\t\"description\": \"enabled is required and must be a boolean\",\n\t\t\t},\n\t\t\t\"admin\": bson.M{\n\t\t\t\t\"bsonType\": \"bool\",\n\t\t\t\t\"description\": \"admin is required and must be a boolean\",\n\t\t\t},\n\t\t},\n\t}\n\n\tcolOpts := options.CreateCollection().SetValidator(bson.M{\"$jsonSchema\": jsonSchema})\n\n\tcreateCollectionErr := db.CreateCollection(context.TODO(), \"users\", colOpts)\n\n\tif createCollectionErr != nil {\n\t\treturn dbController.NewDBError(createCollectionErr.Error())\n\t}\n\n\tmodels := []mongo.IndexModel{\n\t\t{\n\t\t\tKeys: bson.D{{Key: \"username\", Value: 1}},\n\t\t\tOptions: options.Index().SetUnique(true),\n\t\t},\n\t\t{\n\t\t\tKeys: bson.D{{Key: \"email\", Value: 1}},\n\t\t\tOptions: options.Index().SetUnique(true),\n\t\t},\n\t}\n\n\topts := options.CreateIndexes().SetMaxTime(2 * time.Second)\n\n\tcollection, _, _ := mdbc.getCollection(\"users\")\n\t_, setIndexErr := collection.Indexes().CreateMany(context.TODO(), models, opts)\n\n\tif setIndexErr != nil {\n\t\treturn dbController.NewDBError(setIndexErr.Error())\n\t}\n\n\thashedPass, hashedPassErr := authUtils.HashPassword(\"password\")\n\n\tif hashedPassErr != nil {\n\t\treturn hashedPassErr\n\t}\n\n\t// Add an administrative user\n\taddUserErr := mdbc.AddUser(dbController.FullUserDocument{\n\t\tUsername: \"admin\",\n\t\tEmail: \"[email protected]\",\n\t\tEnabled: true,\n\t\tAdmin: true,\n\t\tPasswordHash: hashedPass,\n\t},\n\t)\n\n\tif addUserErr != nil {\n\t\tfmt.Println(addUserErr.Error())\n\t\treturn dbController.NewDBError(addUserErr.Error())\n\t}\n\n\treturn nil\n}", "func InitializeIndexes(bucket base.Bucket, useXattrs bool, numReplicas uint, numHousekeepingReplicas uint) error {\n\n\tbase.Logf(\"Initializing indexes with numReplicas: %d\", numReplicas)\n\n\tgocbBucket, ok := bucket.(*base.CouchbaseBucketGoCB)\n\tif !ok {\n\t\tbase.Log(\"Using a non-Couchbase bucket - indexes will not be created.\")\n\t\treturn nil\n\t}\n\n\tfor _, sgIndex := range sgIndexes {\n\t\terr := sgIndex.createIfNeeded(gocbBucket, useXattrs, numReplicas)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to install index %s: %v\", sgIndex.simpleName, err)\n\t\t}\n\t}\n\n\treturn waitForIndexes(gocbBucket, useXattrs)\n}", "func (conf *Config) CreateIndexes(client *pilosa.Client) error {\n\treturn conf.CompareIndexes(client, true, true)\n}", "func TestEnsureIndex(t *testing.T) {\n\ttests.ResetLog()\n\tdefer tests.DisplayLog()\n\n\tconst fixture = \"basic.json\"\n\tset1, err := qfix.Get(fixture)\n\tif err != nil {\n\t\tt.Fatalf(\"\\t%s\\tShould load query record from file : %v\", tests.Failed, err)\n\t}\n\tt.Logf(\"\\t%s\\tShould load query record from file.\", tests.Success)\n\n\tdb, err := db.NewMGO(tests.Context, tests.TestSession)\n\tif err != nil {\n\t\tt.Fatalf(\"\\t%s\\tShould be able to get a Mongo session : %v\", tests.Failed, err)\n\t}\n\tdefer db.CloseMGO(tests.Context)\n\n\tdefer func() {\n\t\tif err := qfix.Remove(db, prefix); err != nil {\n\t\t\tt.Fatalf(\"\\t%s\\tShould be able to remove the query set : %v\", tests.Failed, err)\n\t\t}\n\t\tt.Logf(\"\\t%s\\tShould be able to remove the query set.\", tests.Success)\n\t}()\n\n\tt.Log(\"Given the need to validate ensureing indexes.\")\n\t{\n\t\tt.Log(\"\\tWhen using fixture\", fixture)\n\t\t{\n\t\t\tif err := query.EnsureIndexes(tests.Context, db, set1); err != nil {\n\t\t\t\tt.Fatalf(\"\\t%s\\tShould be able to ensure a query set index : %s\", tests.Failed, err)\n\t\t\t}\n\t\t\tt.Logf(\"\\t%s\\tShould be able to ensure a query set index.\", tests.Success)\n\t\t}\n\t}\n}", "func loadIndexs() {\n\tdb := open()\n\tindexs = make(map[string][]*Index)\n\tdb.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(INDEX_BUCKET))\n\t\tif b == nil {\n\t\t\tlogger.Infof(\"bucket[%s] not exist\", INDEX_BUCKET)\n\t\t\treturn nil\n\t\t}\n\t\tc := b.Cursor()\n\t\tfor k, v := c.First(); k != nil; k, v = c.Next() {\n\t\t\tkey := string(k)\n\t\t\tvar _indexs []string\n\t\t\terr := json.Unmarshal(v, &_indexs)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorf(\"parse index[%s] error -> %v\", k, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t__indexs := make([]*Index, len(_indexs)) \n\t\t\t// parse index\n\t\t\tfor i, _index := range _indexs {\n\t\t\t\tsps :=strings.Split(_index, INDEX_SPLIT) \n\t\t\t\tindex := &Index {\n\t\t\t\t\tbucket: key,\n\t\t\t\t\tindexs: sps,\n\t\t\t\t}\n\t\t\t\t__indexs[i] = index\n\t\t\t}\n\t\t\tindexs[key] = __indexs\n\t\t}\n\t\treturn nil\n\t})\n}", "func (db *DB) AddIndex(indexes ...*Index) error {\n\tfunc() {\n\t\tdb.m.Lock()\n\t\tdefer db.m.Unlock()\n\t\tfor _, v := range indexes {\n\t\t\tdb.indexes[v.name] = v\n\t\t}\n\t}()\n\tdb.m.RLock()\n\tdefer db.m.RUnlock()\n\treturn db.Update(func(tx *Tx) error {\n\t\tfor _, v := range db.indexes {\n\t\t\tif _, err := tx.CreateBucketIfNotExists([]byte(v.name)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := tx.CreateBucketIfNotExists([]byte(v.name + bkkeyssuffix)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}", "func (w *mongoWriter) createCollection(coll *Collection) error {\n\tc := w.session.Database(coll.DB).Collection(coll.Name)\n\n\tif w.append || w.indexOnly {\n\t\treturn nil\n\t}\n\terr := c.Drop(context.Background())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"fail to drop collection '%s'\\ncause %v\", coll.Name, err)\n\t}\n\n\tcreateCommand := bson.D{\n\t\tbson.E{Key: \"create\", Value: coll.Name},\n\t}\n\tif coll.CompressionLevel != \"\" {\n\t\tcreateCommand = append(createCommand, bson.E{Key: \"storageEngine\", Value: bson.M{\"wiredTiger\": bson.M{\"configString\": \"block_compressor=\" + coll.CompressionLevel}}})\n\t}\n\terr = w.session.Database(coll.DB).RunCommand(context.Background(), createCommand).Err()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"coulnd't create collection with compression level '%s'\\n cause: %v\", coll.CompressionLevel, err)\n\t}\n\n\tif coll.ShardConfig.ShardCollection != \"\" {\n\t\tnm := coll.DB + \".\" + coll.Name\n\t\tif coll.ShardConfig.ShardCollection != nm {\n\t\t\treturn fmt.Errorf(\"wrong value for 'shardConfig.shardCollection', should be <database>.<collection>: found '%s', expected '%s'\", coll.ShardConfig.ShardCollection, nm)\n\t\t}\n\t\tif len(coll.ShardConfig.Key) == 0 {\n\t\t\treturn fmt.Errorf(\"wrong value for 'shardConfig.key', can't be null and must be an object like {'_id': 'hashed'}, found: %v\", coll.ShardConfig.Key)\n\t\t}\n\t\terr := w.session.Database(\"admin\").RunCommand(context.Background(), bson.D{bson.E{Key: \"enableSharding\", Value: coll.DB}}).Err()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"fail to enable sharding on db '%s'\\n cause: %v\", coll.DB, err)\n\t\t}\n\t\t// as the collection is empty, no need to create the indexes on the sharded key before creating the collection,\n\t\t// because it will be created automatically by mongodb. See https://docs.mongodb.com/manual/core/sharding-shard-key/#shard-key-indexes\n\t\t// for details\n\t\terr = runMgoCompatCommand(context.Background(), w.session, \"admin\", coll.ShardConfig)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"fail to shard collection '%s' in db '%s'\\n cause: %v\", coll.Name, coll.DB, err)\n\t\t}\n\t}\n\treturn nil\n}", "func TestEnsureGeoIndex(t *testing.T) {\n\tc := createClient(t, nil)\n\tdb := ensureDatabase(nil, c, \"index_test\", nil, t)\n\n\ttestOptions := []*driver.EnsureGeoIndexOptions{\n\t\tnil,\n\t\t{GeoJSON: true},\n\t\t{GeoJSON: false},\n\t}\n\n\tfor i, options := range testOptions {\n\t\tcol := ensureCollection(nil, db, fmt.Sprintf(\"geo_index_test_%d\", i), nil, t)\n\n\t\tidx, created, err := col.EnsureGeoIndex(nil, []string{\"name\"}, options)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to create new index: %s\", describe(err))\n\t\t}\n\t\tif !created {\n\t\t\tt.Error(\"Expected created to be true, got false\")\n\t\t}\n\t\tif idxType := idx.Type(); idxType != driver.GeoIndex {\n\t\t\tt.Errorf(\"Expected GeoIndex, found `%s`\", idxType)\n\t\t}\n\t\tif options != nil && idx.GeoJSON() != options.GeoJSON {\n\t\t\tt.Errorf(\"Expected GeoJSON to be %t, found `%t`\", options.GeoJSON, idx.GeoJSON())\n\t\t}\n\n\t\t// Index must exists now\n\t\tif found, err := col.IndexExists(nil, idx.Name()); err != nil {\n\t\t\tt.Fatalf(\"Failed to check index '%s' exists: %s\", idx.Name(), describe(err))\n\t\t} else if !found {\n\t\t\tt.Errorf(\"Index '%s' does not exist, expected it to exist\", idx.Name())\n\t\t}\n\n\t\t// Ensure again, created must be false now\n\t\t_, created, err = col.EnsureGeoIndex(nil, []string{\"name\"}, options)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to re-create index: %s\", describe(err))\n\t\t}\n\t\tif created {\n\t\t\tt.Error(\"Expected created to be false, got true\")\n\t\t}\n\n\t\t// Remove index\n\t\tif err := idx.Remove(nil); err != nil {\n\t\t\tt.Fatalf(\"Failed to remove index '%s': %s\", idx.Name(), describe(err))\n\t\t}\n\n\t\t// Index must not exists now\n\t\tif found, err := col.IndexExists(nil, idx.Name()); err != nil {\n\t\t\tt.Fatalf(\"Failed to check index '%s' exists: %s\", idx.Name(), describe(err))\n\t\t} else if found {\n\t\t\tt.Errorf(\"Index '%s' does exist, expected it not to exist\", idx.Name())\n\t\t}\n\t}\n}", "func buildIndexes(s N1QLStore, indexNames []string) error {\n\tif len(indexNames) == 0 {\n\t\treturn nil\n\t}\n\n\t// Not using strings.Join because we want to escape each index name\n\tindexNameList := StringSliceToN1QLArray(indexNames, \"`\")\n\n\tbuildStatement := fmt.Sprintf(\"BUILD INDEX ON %s(%s)\", s.EscapedKeyspace(), indexNameList)\n\terr := s.executeStatement(buildStatement)\n\n\t// If indexer reports build will be completed in the background, wait to validate build actually happens.\n\tif IsIndexerRetryBuildError(err) {\n\t\tInfofCtx(context.TODO(), KeyQuery, \"Indexer error creating index - waiting for background build. Error:%v\", err)\n\t\t// Wait for bucket to be created in background before returning\n\t\treturn s.WaitForIndexesOnline(indexNames, false)\n\t}\n\n\treturn err\n}", "func (db *MongoDbBridge) createIndexIfNotExists(col *mongo.Collection, view *mongo.IndexView, ix mongo.IndexModel, known []*mongo.IndexSpecification) error {\n\t// throw if index is not explicitly named\n\tif ix.Options.Name == nil {\n\t\treturn fmt.Errorf(\"index name not defined on %s\", col.Name())\n\t}\n\n\t// do we know the index?\n\tfor _, spec := range known {\n\t\tif spec.Name == *ix.Options.Name {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tcreatedName, err := view.CreateOne(context.Background(), ix)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create index %s on %s\", *ix.Options.Name, col.Name())\n\t}\n\tdb.log.Noticef(\"created index %s on %s\", createdName, col.Name())\n\treturn nil\n}", "func (c *Couchbase) RegisterIndexes(indexes []*Index) {\n\tc.indexes = indexes\n}", "func addToIndex(repo *git.Repository, path string) error {\n\n\tindex, err := repo.Index()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = index.AddByPath(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = index.WriteTree()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = index.Write()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn err\n}", "func (mdbc *MongoDbController) initNonceDatabase(dbName string) error {\n\tdb := mdbc.MongoClient.Database(dbName)\n\n\tjsonSchema := bson.M{\n\t\t\"bsonType\": \"object\",\n\t\t\"required\": []string{\"hash\", \"time\", \"remoteAddress\"},\n\t\t\"properties\": bson.M{\n\t\t\t\"hash\": bson.M{\n\t\t\t\t\"bsonType\": \"string\",\n\t\t\t\t\"description\": \"hash is required and must be a string\",\n\t\t\t},\n\t\t\t\"time\": bson.M{\n\t\t\t\t\"bsonType\": \"long\",\n\t\t\t\t\"description\": \"time is required and must be a 64-bit integer (aka a long)\",\n\t\t\t},\n\t\t\t\"remoteAddress\": bson.M{\n\t\t\t\t\"bsonType\": \"string\",\n\t\t\t\t\"description\": \"remoteAddress is required and must be a string\",\n\t\t\t},\n\t\t},\n\t}\n\n\tcolOpts := options.CreateCollection().SetValidator(bson.M{\"$jsonSchema\": jsonSchema})\n\n\tcreateCollectionErr := db.CreateCollection(context.TODO(), \"authNonces\", colOpts)\n\n\tif createCollectionErr != nil {\n\t\treturn dbController.NewDBError(createCollectionErr.Error())\n\t}\n\n\tmodels := []mongo.IndexModel{\n\t\t{\n\t\t\tKeys: bson.D{{Key: \"hash\", Value: 1}},\n\t\t\tOptions: options.Index().SetUnique(true),\n\t\t},\n\t\t{\n\t\t\tKeys: bson.D{{Key: \"remoteAddress\", Value: 1}},\n\t\t\tOptions: options.Index().SetUnique(true),\n\t\t},\n\t}\n\n\topts := options.CreateIndexes().SetMaxTime(2 * time.Second)\n\n\tcollection, _, _ := mdbc.getCollection(\"authNonces\")\n\tnames, setIndexErr := collection.Indexes().CreateMany(context.TODO(), models, opts)\n\n\tif setIndexErr != nil {\n\t\treturn dbController.NewDBError(setIndexErr.Error())\n\t}\n\n\tfmt.Printf(\"created indexes %v\\n\", names)\n\n\treturn nil\n}", "func TestEnsurePersistentIndex(t *testing.T) {\n\tc := createClient(t, nil)\n\tdb := ensureDatabase(nil, c, \"index_test\", nil, t)\n\n\ttestOptions := []*driver.EnsurePersistentIndexOptions{\n\t\tnil,\n\t\t{Unique: true, Sparse: false},\n\t\t{Unique: true, Sparse: true},\n\t\t{Unique: false, Sparse: false},\n\t\t{Unique: false, Sparse: true},\n\t}\n\n\tfor i, options := range testOptions {\n\t\tcol := ensureCollection(nil, db, fmt.Sprintf(\"persistent_index_test_%d\", i), nil, t)\n\n\t\tidx, created, err := col.EnsurePersistentIndex(nil, []string{\"age\", \"name\"}, options)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to create new index: %s\", describe(err))\n\t\t}\n\t\tif !created {\n\t\t\tt.Error(\"Expected created to be true, got false\")\n\t\t}\n\t\tif idxType := idx.Type(); idxType != driver.PersistentIndex {\n\t\t\tt.Errorf(\"Expected PersistentIndex, found `%s`\", idxType)\n\t\t}\n\t\tif options != nil && idx.Unique() != options.Unique {\n\t\t\tt.Errorf(\"Expected Unique to be %t, found `%t`\", options.Unique, idx.Unique())\n\t\t}\n\t\tif options != nil && idx.Sparse() != options.Sparse {\n\t\t\tt.Errorf(\"Expected Sparse to be %t, found `%t`\", options.Sparse, idx.Sparse())\n\t\t}\n\n\t\t// Index must exist now\n\t\tif found, err := col.IndexExists(nil, idx.Name()); err != nil {\n\t\t\tt.Fatalf(\"Failed to check index '%s' exists: %s\", idx.Name(), describe(err))\n\t\t} else if !found {\n\t\t\tt.Errorf(\"Index '%s' does not exist, expected it to exist\", idx.Name())\n\t\t}\n\n\t\t// Ensure again, created must be false now\n\t\t_, created, err = col.EnsurePersistentIndex(nil, []string{\"age\", \"name\"}, options)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to re-create index: %s\", describe(err))\n\t\t}\n\t\tif created {\n\t\t\tt.Error(\"Expected created to be false, got true\")\n\t\t}\n\n\t\t// Remove index\n\t\tif err := idx.Remove(nil); err != nil {\n\t\t\tt.Fatalf(\"Failed to remove index '%s': %s\", idx.Name(), describe(err))\n\t\t}\n\n\t\t// Index must not exist now\n\t\tif found, err := col.IndexExists(nil, idx.Name()); err != nil {\n\t\t\tt.Fatalf(\"Failed to check index '%s' exists: %s\", idx.Name(), describe(err))\n\t\t} else if found {\n\t\t\tt.Errorf(\"Index '%s' does exist, expected it not to exist\", idx.Name())\n\t\t}\n\t}\n}", "func (i indexer) Index(ctx context.Context, req IndexQuery) (\n\tresp *IndexResult, err error) {\n\n\tlog.Info(\"index [%v] root [%v] len_dirs=%v len_files=%v\",\n\t\treq.Key, req.Root, len(req.Dirs), len(req.Files))\n\tstart := time.Now()\n\t// Setup the response\n\tresp = NewIndexResult()\n\tif err = req.Normalize(); err != nil {\n\t\tlog.Info(\"index [%v] error: %v\", req.Key, err)\n\t\tresp.Error = errs.NewStructError(err)\n\t\treturn\n\t}\n\n\t// create index shards\n\tvar nshards int\n\tif nshards = i.cfg.NumShards; nshards == 0 {\n\t\tnshards = 1\n\t}\n\tnshards = utils.MinInt(nshards, maxShards)\n\ti.shards = make([]index.IndexWriter, nshards)\n\ti.root = getRoot(i.cfg, &req)\n\n\tfor n := range i.shards {\n\t\tname := path.Join(i.root, shardName(req.Key, n))\n\t\tixw, err := getIndexWriter(ctx, name)\n\t\tif err != nil {\n\t\t\tresp.Error = errs.NewStructError(err)\n\t\t\treturn resp, nil\n\t\t}\n\t\ti.shards[n] = ixw\n\t}\n\n\tfs := getFileSystem(ctx, i.root)\n\trepo := newRepoFromQuery(&req, i.root)\n\trepo.SetMeta(i.cfg.RepoMeta, req.Meta)\n\tresp.Repo = repo\n\n\t// Add query Files and scan Dirs for files to index\n\tnames, err := i.scanner(fs, &req)\n\tch := make(chan int, nshards)\n\tchnames := make(chan string, 100)\n\tgo func() {\n\t\tfor _, name := range names {\n\t\t\tchnames <- name\n\t\t}\n\t\tclose(chnames)\n\t}()\n\treqch := make(chan par.RequestFunc, nshards)\n\tfor _, shard := range i.shards {\n\t\treqch <- indexShard(&i, &req, shard, fs, chnames, ch)\n\t}\n\tclose(reqch)\n\terr = par.Requests(reqch).WithConcurrency(nshards).DoWithContext(ctx)\n\tclose(ch)\n\n\t// Await results, each indicating the number of files scanned\n\tfor num := range ch {\n\t\trepo.NumFiles += num\n\t}\n\n\trepo.NumShards = len(i.shards)\n\t// Flush our index shard files\n\tfor _, shard := range i.shards {\n\t\tshard.Flush()\n\t\trepo.SizeIndex += ByteSize(shard.IndexBytes())\n\t\trepo.SizeData += ByteSize(shard.DataBytes())\n\t\tlog.Debug(\"index flush %v (data) %v (index)\",\n\t\t\trepo.SizeData, repo.SizeIndex)\n\t}\n\trepo.ElapsedIndexing = time.Since(start)\n\trepo.TimeUpdated = time.Now().UTC()\n\n\tvar msg string\n\tif err != nil {\n\t\trepo.State = ERROR\n\t\tresp.SetError(err)\n\t\tmsg = \"error: \" + resp.Error.Error()\n\t} else {\n\t\trepo.State = OK\n\t\tmsg = \"ok \" + fmt.Sprintf(\n\t\t\t\"(%v files, %v data, %v index)\",\n\t\t\trepo.NumFiles, repo.SizeData, repo.SizeIndex)\n\t}\n\tlog.Info(\"index [%v] %v [%v]\", req.Key, msg, repo.ElapsedIndexing)\n\treturn\n}", "func (s *searcher) CreateIndex() error {\n\tcolor.Cyan(\"[start] initialize index.\")\n\t// get user\n\tuser, reload, err := s.getUser()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"[err] createIndex %w\", err)\n\t}\n\n\t// check to whether exist starred items or not.\n\tvar isNewIndex bool\n\tif err := s.db.Update(func(tx *bolt.Tx) error {\n\t\tvar err error\n\t\tbucket := tx.Bucket([]byte(starredBucketName(s.gitToken)))\n\t\tif bucket == nil {\n\t\t\tbucket, err = tx.CreateBucket([]byte(starredBucketName(s.gitToken)))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tisNewIndex = true\n\t\t} else {\n\t\t\tisNewIndex = false\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\tClearAll()\n\t\tcolor.Yellow(\"[err] collapse db file, so delete db file\")\n\t\treturn fmt.Errorf(\"[err] createIndex %w\", err)\n\t}\n\n\t// read old database.\n\tvar oldStarredList []*git.Starred\n\toldStarredMap := map[string]*git.Starred{}\n\tif !isNewIndex {\n\t\t// read old starred from db\n\t\ts.db.View(func(tx *bolt.Tx) error {\n\t\t\tbucket := tx.Bucket([]byte(starredBucketName(s.gitToken)))\n\t\t\tbucket.ForEach(func(k, v []byte) error {\n\t\t\t\tvar starred *git.Starred\n\t\t\t\tif err := json.Unmarshal(v, &starred); err != nil {\n\t\t\t\t\tcolor.Yellow(\"[err] parsing %s\", string(k))\n\t\t\t\t} else {\n\t\t\t\t\toldStarredList = append(oldStarredList, starred)\n\t\t\t\t\toldStarredMap[starred.FullName] = starred\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\treturn nil\n\t\t})\n\n\t\t// write old starred to index\n\t\tfor _, starred := range oldStarredList {\n\t\t\tif err := s.index.Index(starred.FullName, starred); err != nil {\n\t\t\t\tcolor.Yellow(\"[err] indexing %s\", starred.FullName)\n\t\t\t}\n\t\t}\n\t}\n\n\t// are you all ready?\n\tif !reload && !isNewIndex {\n\t\tcount, _ := s.index.DocCount()\n\t\tcolor.Green(\"[success][using cache] %d items\", count)\n\t\treturn nil\n\t}\n\n\t// reload new starred list.\n\tnewStarredList, err := s.git.ListStarredAll()\n\tif err != nil {\n\t\tcolor.Yellow(\"[err] don't getting starred list %s\", err.Error())\n\t\tif !isNewIndex {\n\t\t\tcount, _ := s.index.DocCount()\n\t\t\tcolor.Yellow(\"[fail][using cache] %d items\", count)\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"[err] CreateIndex %w\", err)\n\t}\n\tnewStarredMap := map[string]*git.Starred{}\n\tfor _, starred := range newStarredList {\n\t\tnewStarredMap[starred.FullName] = starred\n\t}\n\n\t// update and insert\n\tif isNewIndex {\n\t\tcolor.White(\"[refresh] all repositories\")\n\t\ts.git.SetReadme(newStarredList)\n\t\ts.writeDBAndIndex(newStarredList)\n\t} else {\n\t\t// insert or update starred\n\t\tvar insertList []*git.Starred\n\t\tvar updateList []*git.Starred\n\t\tfor _, newStarred := range newStarredList {\n\t\t\tif oldStarred, ok := oldStarredMap[newStarred.FullName]; !ok {\n\t\t\t\tinsertList = append(insertList, newStarred)\n\t\t\t\tcolor.White(\"[insert] %s repository pushed_at %s\",\n\t\t\t\t\tnewStarred.FullName, newStarred.PushedAt.Format(time.RFC3339))\n\t\t\t} else {\n\t\t\t\tif oldStarred.PushedAt.Unix() != newStarred.PushedAt.Unix() &&\n\t\t\t\t\toldStarred.CachedAt.Unix() < time.Now().Add(-24*7*time.Hour).Unix() { // after 7 days.\n\t\t\t\t\tupdateList = append(updateList, newStarred)\n\t\t\t\t\tcolor.White(\"[update] %s repository pushed_at %s\",\n\t\t\t\t\t\tnewStarred.FullName, newStarred.PushedAt.Format(time.RFC3339))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// insert\n\t\ts.git.SetReadme(insertList)\n\t\ts.writeDBAndIndex(insertList)\n\n\t\t// update\n\t\ts.git.SetReadme(updateList)\n\t\ts.writeDBAndIndex(updateList)\n\n\t\t// delete starred\n\t\tvar deleteList []*git.Starred\n\t\tfor _, oldStarred := range oldStarredList {\n\t\t\tif _, ok := newStarredMap[oldStarred.FullName]; !ok {\n\t\t\t\tdeleteList = append(deleteList, oldStarred)\n\t\t\t\tcolor.White(\"[delete] %s repository pushed_at %s\",\n\t\t\t\t\toldStarred.FullName, oldStarred.PushedAt.Format(time.RFC3339))\n\t\t\t}\n\t\t}\n\t\t// delete\n\t\ts.deleteDBAndIndex(deleteList)\n\t}\n\n\t// rewrite a user to db\n\tuserData, err := json.Marshal(user)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"[err] createIndex %w\", err)\n\t}\n\ts.db.Update(func(tx *bolt.Tx) error {\n\t\tbucket := tx.Bucket([]byte(userBucketName))\n\t\tbucket.Put([]byte(s.gitToken), userData)\n\t\treturn nil\n\t})\n\n\tcount, _ := s.index.DocCount()\n\tcolor.Green(\"[success][new reload] %d items\", count)\n\treturn nil\n}", "func TestSQLSmith_LoadIndexes(t *testing.T) {\n\te := Executor{\n\t\tconn: nil,\n\t\tdb: dbname,\n\t\ttables: make(map[string]*types.Table),\n\t}\n\tindexes[\"users\"] = []types.CIStr{\"idx1\", \"idx2\"}\n\te.loadSchema(schema, indexes)\n\n\tassert.Equal(t, len(e.tables), 6)\n\tassert.Equal(t, len(e.tables[\"users\"].Indexes), 2)\n}", "func (db *MongoDbBridge) updateDatabaseIndexes() {\n\t// define index list loaders\n\tvar ixLoaders = map[string]indexListProvider{\n\t\tcolNetworkNodes: operaNodeCollectionIndexes,\n\t\tcolLockedDelegations: lockedDelegationsIndexes,\n\t}\n\n\t// the DB bridge needs a way to terminate this thread\n\tsig := make(chan bool, 1)\n\tdb.sig = append(db.sig, sig)\n\n\t// prep queue and start the updater\n\tiq := make(chan *IndexList, indexListQueueCapacity)\n\tdb.wg.Add(1)\n\tgo db.indexUpdater(iq, sig)\n\n\t// check indexes\n\tfor cn, ld := range ixLoaders {\n\t\tiq <- &IndexList{\n\t\t\tCollection: db.client.Database(db.dbName).Collection(cn),\n\t\t\tIndexes: ld(),\n\t\t}\n\t}\n\n\t// close the channel as no more updates will be sent\n\tclose(iq)\n}", "func (i *Index) Create() error {\n\n\tdoc := mapping{Properties: map[string]mappingProperty{}}\n\tfor _, f := range i.md.Fields {\n\t\tdoc.Properties[f.Name] = mappingProperty{}\n\t\tfs, err := fieldTypeString(f.Type)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdoc.Properties[f.Name][\"type\"] = fs\n\t}\n\n // Added for apple to apple benchmark\n doc.Properties[\"body\"][\"type\"] = \"text\"\n doc.Properties[\"body\"][\"analyzer\"] = \"my_english_analyzer\"\n doc.Properties[\"body\"][\"search_analyzer\"] = \"whitespace\"\n doc.Properties[\"body\"][\"index_options\"] = \"offsets\"\n //doc.Properties[\"body\"][\"test\"] = \"test\"\n index_map := map[string]int{\n \"number_of_shards\" : 1,\n \"number_of_replicas\" : 0,\n }\n analyzer_map := map[string]interface{}{\n \"my_english_analyzer\": map[string]interface{}{\n \"tokenizer\": \"standard\",\n \"char_filter\": []string{ \"html_strip\" } ,\n \"filter\" : []string{\"english_possessive_stemmer\", \n \"lowercase\", \"english_stop\", \n \"english_stemmer\", \n \"asciifolding\", \"icu_folding\"},\n },\n }\n filter_map := map[string]interface{}{\n \"english_stop\": map[string]interface{}{\n \"type\": \"stop\",\n \"stopwords\": \"_english_\",\n },\n \"english_possessive_stemmer\": map[string]interface{}{\n \"type\": \"stemmer\",\n \"language\": \"possessive_english\",\n },\n \"english_stemmer\" : map[string]interface{}{\n \"type\" : \"stemmer\",\n \"name\" : \"english\",\n },\n \"my_folding\": map[string]interface{}{\n \"type\": \"asciifolding\",\n \"preserve_original\": \"false\",\n },\n }\n analysis_map := map[string]interface{}{\n \"analyzer\": analyzer_map,\n \"filter\" : filter_map,\n }\n settings := map[string]interface{}{\n \"index\": index_map,\n \"analysis\": analysis_map,\n }\n\n // TODO delete?\n\t// we currently manually create the autocomplete mapping\n\t/*ac := mapping{\n\t\tProperties: map[string]mappingProperty{\n\t\t\t\"sugg\": mappingProperty{\n\t\t\t\t\"type\": \"completion\",\n\t\t\t\t\"payloads\": true,\n\t\t\t},\n\t\t},\n\t}*/\n\n\tmappings := map[string]mapping{\n\t\ti.typ: doc,\n //\t\"autocomplete\": ac,\n\t}\n\n fmt.Println(mappings)\n\n\t//_, err := i.conn.CreateIndex(i.name).BodyJson(map[string]interface{}{\"mappings\": mappings}).Do()\n\t_, err := i.conn.CreateIndex(i.name).BodyJson(map[string]interface{}{\"mappings\": mappings, \"settings\": settings}).Do()\n\n if err != nil {\n fmt.Println(\"Error \", err)\n\t\tfmt.Println(\"!!!!Get Error when using client to create index\")\n\t}\n\n\treturn err\n}", "func (e *ElasticSearch) Setup(bangs []Bang) error {\n\tif _, err := e.Client.CreateIndex(e.Index).Body(e.mapping()).Do(context.TODO()); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, b := range bangs {\n\t\tfor _, t := range b.Triggers {\n\t\t\tq := struct {\n\t\t\t\tCompletion *elastic.SuggestField `json:\"bang_suggest\"`\n\t\t\t}{\n\t\t\t\telastic.NewSuggestField().Input(t).Weight(0),\n\t\t\t}\n\n\t\t\t_, err := e.Client.Index().\n\t\t\t\tIndex(e.Index).\n\t\t\t\tType(e.Type).\n\t\t\t\tBodyJson(&q).\n\t\t\t\tDo(context.TODO())\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func CreateCommonIndexes(mgr manager.Manager) error {\n\tfor _, ia := range getIndexArgs() {\n\t\tif err := mgr.GetFieldIndexer().IndexField(context.TODO(), ia.obj, ia.field, ia.extractValue); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (conf *Config) CompareIndexes(client *pilosa.Client, mayCreate, mustCreate bool) error {\n\terrs := make([]error, 0)\n\tschema, err := client.Schema()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"retrieving schema\")\n\t}\n\tdbIndexes := schema.Indexes()\n\tvar dbIndex *pilosa.Index\n\tchanged := false\n\tfor _, index := range conf.indexes {\n\t\tdbIndex = dbIndexes[index.FullName]\n\t\tif dbIndex == nil {\n\t\t\tif !mayCreate {\n\t\t\t\terrs = append(errs, fmt.Errorf(\"index '%s' does not exist\", index.FullName))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tchanged = true\n\t\t\tdbIndex = schema.Index(index.FullName)\n\t\t} else {\n\t\t\tif mustCreate {\n\t\t\t\terrs = append(errs, fmt.Errorf(\"index '%s' already exists\", index.FullName))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\t// if we got here, the index now exists, so we can do the same thing to fields...\n\t\tchangedFields, fieldErrs := conf.CompareFields(client, dbIndex, index, mayCreate, mustCreate)\n\t\tif changedFields {\n\t\t\tchanged = true\n\t\t}\n\t\terrs = append(errs, fieldErrs...)\n\t}\n\tif changed {\n\t\tfmt.Printf(\"changes made to db, syncing...\\n\")\n\t\terr = client.SyncSchema(schema)\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\tif len(errs) > 0 {\n\t\treturn errs[0]\n\t}\n\treturn nil\n}", "func (s *SiteSearchTagsDAO) AddTagsSearchIndex(docID string, doctype string, tags []string) error {\n\n\tfor _, v := range tags {\n\n\t\tcount, err := s.Exists(v)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error attempting to get record count for keyword %s with error %s\", v, err)\n\t\t}\n\n\t\tlog.Info().Msgf(\"Found %d site search tag records for keyworkd %s\", count, v)\n\n\t\t// Determine if the document exists already\n\t\tif count == 0 {\n\t\t\tlog.Info().Msgf(\"]Tag does not exist for %s in the database\", v)\n\t\t\tvar newSTM models.SiteSearchTagsModel\n\t\t\tnewSTM.Name = v\n\t\t\tnewSTM.TagsID = models.GenUUID()\n\t\t\tvar doc models.Documents\n\t\t\tvar docs []models.Documents\n\t\t\tdoc.DocType = doctype\n\t\t\tdoc.DocumentID = docID\n\t\t\tdocs = append(docs, doc)\n\n\t\t\tnewSTM.Documents = docs\n\t\t\tlog.Info().Msgf(\"Inserting new tag %s into database\", v)\n\t\t\terr = s.InsertSiteSearchTags(&newSTM)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error inserting new site search tag for keyword %s with error %s\", v, err)\n\t\t\t}\n\t\t\tlog.Info().Msgf(\"Tag %s inserted successfully\", v)\n\t\t\t// If not, then we add to existing documents\n\t\t} else {\n\n\t\t\tstm, err := s.GetSiteSearchTagByName(v)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error getting current instance of searchtag for keyword %s with error %s\", v, err)\n\t\t\t}\n\t\t\tlog.Info().Msgf(\"Found existing searchtagid record for %s\", stm.Name)\n\t\t\t//fmt.Println(mtm.Documents)\n\n\t\t\t// Get the list of documents\n\t\t\tdocs := stm.Documents\n\n\t\t\t// For the list of documents, find the document ID we are looking for\n\t\t\t// If not found, then we update the document list with the document ID\n\t\t\tfound := false\n\t\t\tfor _, d := range docs {\n\t\t\t\tif d.DocumentID == v {\n\t\t\t\t\tfound = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif found {\n\t\t\t\tlog.Info().Msgf(\"Updating tag, %s with document id %s\", v, docID)\n\t\t\t\tvar doc models.Documents\n\t\t\t\tdoc.DocType = doctype\n\t\t\t\tdoc.DocumentID = docID\n\t\t\t\tdocs = append(docs, doc)\n\t\t\t\tstm.Documents = docs\n\t\t\t\t//fmt.Println(mtm)\n\t\t\t\terr = s.UpdateSiteSearchTags(&stm)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"error updating searchtag for keyword %s with error %s\", v, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (esHandler *ESHandler) Index(verses []Verse) error {\n\tctx := context.Background()\n\tserivce, err := esHandler.Client.BulkProcessor().Name(\"ScriptureProcessor\").Workers(2).BulkActions(1000).Do(ctx)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Error initializing BulkProcessor\")\n\t}\n\tdefer serivce.Close()\n\n\tfor _, v := range verses {\n\t\tid := v.GetID()\n\t\tr := elastic.NewBulkIndexRequest().Index(esHandler.ESIndex).Type(\"Verse\").Id(id).Doc(v)\n\t\tserivce.Add(r)\n\t}\n\treturn nil\n}", "func (c *Couchbase) CreateIndexes(numReplicas int) ([]gocb.IndexInfo, []error) {\n\thosts := strings.Split(c.config.Hosts, \",\")\n\n\tvar indexErrors []error\n\n\tif len(c.indexes) == 0 {\n\t\tnoIndexes := errorNoRegisteredIndexes\n\t\tindexerLog.With(noIndexes)\n\t\tindexErrors = append(indexErrors, noIndexes)\n\t}\n\n\tfor _, index := range c.indexes {\n\n\t\terr := c.createIndex(index, hosts[0], numReplicas, true)\n\n\t\tif err != nil {\n\t\t\tindexerLog.With(\"error\", pkgerr.WithStack(err)).Error()\n\t\t\tindexErrors = append(indexErrors, err)\n\t\t}\n\t}\n\n\tindexes, err := c.Bucket.Manager(Cb.config.BucketName, Cb.config.BucketPassword).GetIndexes()\n\n\tif err != nil {\n\t\tindexErrors = append(indexErrors, err)\n\t}\n\n\tfor _, indexInfo := range indexes {\n\t\tindexerLog.With(structs.Map(indexInfo)).Info(\"Registered index\")\n\t}\n\n\treturn indexes, indexErrors\n}", "func EnsureIndexes(ctx context.Context, adminSvc *apiv1.FirestoreAdminClient, tuples IndexList, indexParent string) error {\n\tl := log.WithFields(log.Fields{trace.Component: BackendName})\n\tvar tasks []indexTask\n\n\t// create the indexes\n\tfor _, tuple := range tuples {\n\t\toperation, err := adminSvc.CreateIndex(ctx, &adminpb.CreateIndexRequest{\n\t\t\tParent: indexParent,\n\t\t\tIndex: &adminpb.Index{\n\t\t\t\tQueryScope: adminpb.Index_COLLECTION,\n\t\t\t\tFields: tuple,\n\t\t\t},\n\t\t})\n\t\tif err != nil && status.Code(err) != codes.AlreadyExists {\n\t\t\treturn ConvertGRPCError(err)\n\t\t}\n\t\t// operation can be nil if error code is codes.AlreadyExists.\n\t\tif operation != nil {\n\t\t\ttasks = append(tasks, indexTask{operation, tuple})\n\t\t}\n\t}\n\n\tstop := periodIndexUpdate(l)\n\tfor _, task := range tasks {\n\t\terr := waitOnIndexCreation(ctx, l, task)\n\t\tif err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t}\n\tstop <- struct{}{}\n\n\treturn nil\n}", "func TestEnsureHashIndex(t *testing.T) {\n\tc := createClient(t, nil)\n\tdb := ensureDatabase(nil, c, \"index_test\", nil, t)\n\n\ttestOptions := []*driver.EnsureHashIndexOptions{\n\t\tnil,\n\t\t{Unique: true, Sparse: false},\n\t\t{Unique: true, Sparse: true},\n\t\t{Unique: false, Sparse: false},\n\t\t{Unique: false, Sparse: true},\n\t}\n\n\tfor i, options := range testOptions {\n\t\tcol := ensureCollection(nil, db, fmt.Sprintf(\"hash_index_test_%d\", i), nil, t)\n\n\t\tidx, created, err := col.EnsureHashIndex(nil, []string{\"name\"}, options)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to create new index: %s\", describe(err))\n\t\t}\n\t\tif !created {\n\t\t\tt.Error(\"Expected created to be true, got false\")\n\t\t}\n\t\tif idxType := idx.Type(); idxType != driver.HashIndex {\n\t\t\tt.Errorf(\"Expected HashIndex, found `%s`\", idxType)\n\t\t}\n\t\tif options != nil && idx.Unique() != options.Unique {\n\t\t\tt.Errorf(\"Expected Unique to be %t, found `%t`\", options.Unique, idx.Unique())\n\t\t}\n\t\tif options != nil && idx.Sparse() != options.Sparse {\n\t\t\tt.Errorf(\"Expected Sparse to be %t, found `%t`\", options.Sparse, idx.Sparse())\n\t\t}\n\n\t\t// Index must exists now\n\t\tif found, err := col.IndexExists(nil, idx.Name()); err != nil {\n\t\t\tt.Fatalf(\"Failed to check index '%s' exists: %s\", idx.Name(), describe(err))\n\t\t} else if !found {\n\t\t\tt.Errorf(\"Index '%s' does not exist, expected it to exist\", idx.Name())\n\t\t}\n\n\t\t// Ensure again, created must be false now\n\t\t_, created, err = col.EnsureHashIndex(nil, []string{\"name\"}, options)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to re-create index: %s\", describe(err))\n\t\t}\n\t\tif created {\n\t\t\tt.Error(\"Expected created to be false, got true\")\n\t\t}\n\n\t\t// Remove index\n\t\tif err := idx.Remove(nil); err != nil {\n\t\t\tt.Fatalf(\"Failed to remove index '%s': %s\", idx.Name(), describe(err))\n\t\t}\n\n\t\t// Index must not exists now\n\t\tif found, err := col.IndexExists(nil, idx.Name()); err != nil {\n\t\t\tt.Fatalf(\"Failed to check index '%s' exists: %s\", idx.Name(), describe(err))\n\t\t} else if found {\n\t\t\tt.Errorf(\"Index '%s' does exist, expected it not to exist\", idx.Name())\n\t\t}\n\t}\n}", "func (db *MongoDB) Init() error {\n\tsess := db.sess.Copy()\n\tdefer sess.Close()\n\ttasks := db.tasks(sess)\n\tnodes := db.nodes(sess)\n\n\tnames, err := sess.DB(db.conf.Database).CollectionNames()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error listing collection names in database %s: %v\", db.conf.Database, err)\n\t}\n\tvar tasksFound bool\n\tvar nodesFound bool\n\tfor _, n := range names {\n\t\tswitch n {\n\t\tcase \"tasks\":\n\t\t\ttasksFound = true\n\t\tcase \"nodes\":\n\t\t\tnodesFound = true\n\t\t}\n\t}\n\n\tif !tasksFound {\n\t\terr = tasks.Create(&mgo.CollectionInfo{})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error creating tasks collection in database %s: %v\", db.conf.Database, err)\n\t\t}\n\n\t\terr = tasks.EnsureIndex(mgo.Index{\n\t\t\tKey: []string{\"-id\", \"-creationtime\"},\n\t\t\tUnique: true,\n\t\t\tDropDups: true,\n\t\t\tBackground: true,\n\t\t\tSparse: true,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif !nodesFound {\n\t\terr = nodes.Create(&mgo.CollectionInfo{})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error creating nodes collection in database %s: %v\", db.conf.Database, err)\n\t\t}\n\n\t\terr = nodes.EnsureIndex(mgo.Index{\n\t\t\tKey: []string{\"id\"},\n\t\t\tUnique: true,\n\t\t\tDropDups: true,\n\t\t\tBackground: true,\n\t\t\tSparse: true,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (h *HTTPApi) listIndex(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tcollections := h.storageNode.Datasources[ps.ByName(\"datasource\")].GetMeta().Databases[ps.ByName(\"dbname\")].ShardInstances[ps.ByName(\"shardinstance\")].Collections[ps.ByName(\"collectionname\")]\n\n\t// Now we need to return the results\n\tif bytes, err := json.Marshal(collections.Indexes); err != nil {\n\t\t// TODO: log this better?\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t} else {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(bytes)\n\t}\n}", "func (m *Mongodb) Execute() {\n\tm.createExpireIndex()\n\tm.backupData()\n}", "func CompressIndex(ctx context.Context, dbo Database) error {\n\tdb := dbo.(*database)\n\tsql := db.getRawDB()\n\n\tconn, err := sql.Conn(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\ttx, err := conn.BeginTx(ctx, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif tx != nil {\n\t\t\ttx.Rollback()\n\t\t}\n\t}()\n\n\t_, err = tx.ExecContext(ctx, `update docs set txt=compress(txt) where not iscompressed(txt)`)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttx = nil\n\treturn nil\n}", "func Index(s *mgo.Session) {\n\tsession := s.Copy()\n\tdefer session.Close()\n\n\tcontext := session.DB(\"store\").C(\"locations\")\n\n\tindex := mgo.Index{\n\t\tKey: []string{\"id\"}, //Index key fields; prefix name with dash (-) for descending order\n\t\tUnique: true, //Prevent two documents from having the same index key\n\t\tDropDups: true, //Drop documents with the same index key as a previously indexed one\n\t\tBackground: true, //Build index in background and return immediately\n\t\tSparse: true, //Only index documents containing the Key fields\n\t}\n\n\terr := context.EnsureIndex(index)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}", "func DBIndexer(db *sql.DB, dbPipe <-chan RollupState) {\n\n\tdbInsertMap := make(map[string]*bulkinsert.BulkInsert)\n\tfor data := range dbPipe {\n\t\ttableName := fmt.Sprintf(\"monitor_metrics_%s_%s\", data.granularity.windowName, data.msg.PluginName)\n\t\tcolumnNames, err := plugins.GetColumnsForTable(tableName, db)\n\t\tif len(columnNames) == 0 || err != nil {\n\t\t\tlog.Error().Err(err).Msgf(\"Table %s does not exist \", tableName)\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, ok := dbInsertMap[tableName]; !ok { //New DB Bulk inserter\n\t\t\tdbInsertMap[tableName] = bulkinsert.NewBulkInsert(\n\t\t\t\tdb,\n\t\t\t\ttableName,\n\t\t\t\tdbConfigMap[data.granularity.windowName].maxBatchSize,\n\t\t\t\tdbConfigMap[data.granularity.windowName].maxBatchAge,\n\t\t\t\tcolumnNames)\n\t\t}\n\t\tmetricNames := []string{\n\t\t\t\"ConnectTimeAvg\",\n\t\t\t\"Es\",\n\t\t\t\"EsResponse\",\n\t\t\t\"EsTimeout\",\n\t\t\t\"FirstByteTimeAvg\",\n\t\t\t\"ResponseTimeAvg\",\n\t\t\t\"ResponseTimeMax\",\n\t\t\t\"ResponseTimeMin\",\n\t\t\t\"SizeAvg\",\n\t\t\t\"SpeedAvg\",\n\t\t}\n\t\tcolumnValues := metricsToString(metricNames,data.msg)\n\t\tif err := dbInsertMap[tableName].Insert(columnValues); err != nil {\n\t\t\tlog.Error().Err(err).Msgf(\"Error Inserting column values to table %s \", tableName)\n\t\t}\n\t}\n}", "func (i ImageIndexer) AddToIndex(request AddToIndexRequest) error {\n\tbuildDir, outDockerfile, cleanup, err := buildContext(request.Generate, request.OutDockerfile)\n\tdefer cleanup()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdatabasePath, err := i.ExtractDatabase(buildDir, request.FromIndex, request.CaFile, request.SkipTLSVerify, request.PlainHTTP)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Run opm registry add on the database\n\taddToRegistryReq := registry.AddToRegistryRequest{\n\t\tBundles: request.Bundles,\n\t\tInputDatabase: databasePath,\n\t\tPermissive: request.Permissive,\n\t\tMode: request.Mode,\n\t\tSkipTLSVerify: request.SkipTLSVerify,\n\t\tPlainHTTP: request.PlainHTTP,\n\t\tContainerTool: i.PullTool,\n\t\tOverwrite: request.Overwrite,\n\t\tEnableAlpha: request.EnableAlpha,\n\t}\n\n\t// Add the bundles to the registry\n\terr = i.RegistryAdder.AddToRegistry(addToRegistryReq)\n\tif err != nil {\n\t\ti.Logger.WithError(err).Debugf(\"unable to add bundle to registry\")\n\t\treturn err\n\t}\n\n\t// generate the dockerfile\n\tdockerfile := i.DockerfileGenerator.GenerateIndexDockerfile(request.BinarySourceImage, databasePath)\n\terr = write(dockerfile, outDockerfile, i.Logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif request.Generate {\n\t\treturn nil\n\t}\n\n\t// build the dockerfile\n\terr = build(outDockerfile, request.Tag, i.CommandRunner, i.Logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func CreateIndex(context *web.AppContext) *web.AppError {\n\n\tdb := context.MDB\n\tvar input model.Index\n\tjson.NewDecoder(context.Body).Decode(&input)\n\n\terr := db.Session.DB(\"\").C(input.Target).EnsureIndex(input.Index)\n\tif err != nil {\n\t\tmessage := fmt.Sprintf(\"Error creating index [%+v]\", input)\n\t\treturn &web.AppError{err, message, http.StatusInternalServerError}\n\t}\n\n\treturn nil\n}", "func (o MongoDBCollectionResourceOutput) Indexes() MongoIndexArrayOutput {\n\treturn o.ApplyT(func(v MongoDBCollectionResource) []MongoIndex { return v.Indexes }).(MongoIndexArrayOutput)\n}", "func m4accountIndices(db *IndexerDb, state *MigrationState) error {\n\tsqlLines := []string{\n\t\t\"CREATE INDEX IF NOT EXISTS account_asset_by_addr ON account_asset ( addr )\",\n\t\t\"CREATE INDEX IF NOT EXISTS asset_by_creator_addr ON asset ( creator_addr )\",\n\t\t\"CREATE INDEX IF NOT EXISTS app_by_creator ON app ( creator )\",\n\t\t\"CREATE INDEX IF NOT EXISTS account_app_by_addr ON account_app ( addr )\",\n\t}\n\treturn sqlMigration(db, state, sqlLines)\n}", "func TestEnsureFullTextIndex(t *testing.T) {\n\tc := createClient(t, nil)\n\tdb := ensureDatabase(nil, c, \"index_test\", nil, t)\n\n\ttestOptions := []*driver.EnsureFullTextIndexOptions{\n\t\tnil,\n\t\t{MinLength: 2},\n\t\t{MinLength: 20},\n\t}\n\n\tfor i, options := range testOptions {\n\t\tcol := ensureCollection(nil, db, fmt.Sprintf(\"fulltext_index_test_%d\", i), nil, t)\n\n\t\tidx, created, err := col.EnsureFullTextIndex(nil, []string{\"name\"}, options)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to create new index: %s\", describe(err))\n\t\t}\n\t\tif !created {\n\t\t\tt.Error(\"Expected created to be true, got false\")\n\t\t}\n\t\tif idxType := idx.Type(); idxType != driver.FullTextIndex {\n\t\t\tt.Errorf(\"Expected FullTextIndex, found `%s`\", idxType)\n\t\t}\n\t\tif options != nil && idx.MinLength() != options.MinLength {\n\t\t\tt.Errorf(\"Expected %d, found `%d`\", options.MinLength, idx.MinLength())\n\t\t}\n\n\t\t// Index must exists now\n\t\tif found, err := col.IndexExists(nil, idx.Name()); err != nil {\n\t\t\tt.Fatalf(\"Failed to check index '%s' exists: %s\", idx.Name(), describe(err))\n\t\t} else if !found {\n\t\t\tt.Errorf(\"Index '%s' does not exist, expected it to exist\", idx.Name())\n\t\t}\n\n\t\t// Ensure again, created must be false now\n\t\t_, created, err = col.EnsureFullTextIndex(nil, []string{\"name\"}, options)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to re-create index: %s\", describe(err))\n\t\t}\n\t\tif created {\n\t\t\tt.Error(\"Expected created to be false, got true\")\n\t\t}\n\n\t\t// Remove index\n\t\tif err := idx.Remove(nil); err != nil {\n\t\t\tt.Fatalf(\"Failed to remove index '%s': %s\", idx.Name(), describe(err))\n\t\t}\n\n\t\t// Index must not exists now\n\t\tif found, err := col.IndexExists(nil, idx.Name()); err != nil {\n\t\t\tt.Fatalf(\"Failed to check index '%s' exists: %s\", idx.Name(), describe(err))\n\t\t} else if found {\n\t\t\tt.Errorf(\"Index '%s' does exist, expected it not to exist\", idx.Name())\n\t\t}\n\t}\n}", "func DB_IndexAccount(db gorm.DB) {\n\n\tcols := []string{\n\t\t\"acc_active\", \"company\", \"ticker\", \"acc_ref\",\n\t\t\"on_hold\"\t, \"is_client\", \"is_supplier\", \"online\"}\n\n\tfor _, c := range cols {\n\t\tdb.Model(&Account{}).AddIndex(\"idx_\" + c, c)\n\t}\n}", "func (c *Client) Index(d Document, extraArgs url.Values) (*Response, error) {\n\tr := Request{\n\t\tQuery: d.Fields,\n\t\tIndexList: []string{d.Index.(string)},\n\t\tTypeList: []string{d.Type},\n\t\tExtraArgs: extraArgs,\n\t\tMethod: \"POST\",\n\t}\n\n\tif d.ID != nil {\n\t\tr.Method = \"PUT\"\n\t\tr.ID = d.ID.(string)\n\t}\n\n\treturn c.Do(&r)\n}", "func create_index_files(ps []Post, indexname string) {\n\tvar prev, next int\n\tindex_page_flag := false\n\tindex := 1\n\tnum := 0\n\tlength := len(ps)\n\tsort.Sort(ByODate(ps))\n\tsort_index := make([]Post, 0)\n\tfor i := range ps {\n\t\tif ps[i].Changed {\n\t\t\tindex_page_flag = true\n\t\t}\n\t\tsort_index = append(sort_index, ps[i])\n\t\tnum = num + 1\n\t\tif num == POSTN {\n\t\t\tif !check_index(indexname, index) {\n\t\t\t\tindex_page_flag = true\n\t\t\t}\n\n\t\t\t/* Only changed indexes should get rebuild*/\n\t\t\tif index_page_flag == true {\n\t\t\t\tindex_page_flag = false\n\t\t\t\tsort.Sort(ByDate(sort_index))\n\t\t\t\tif index == 1 {\n\t\t\t\t\tprev = 0\n\t\t\t\t} else {\n\t\t\t\t\tprev = index - 1\n\t\t\t\t}\n\t\t\t\tif (index*POSTN) < length && (length-index*POSTN) > POSTN {\n\t\t\t\t\tnext = index + 1\n\t\t\t\t} else if (index * POSTN) == length {\n\t\t\t\t\tnext = -1\n\t\t\t\t} else {\n\t\t\t\t\tnext = 0\n\t\t\t\t}\n\n\t\t\t\tbuild_index(sort_index, index, prev, next, indexname)\n\t\t\t}\n\n\t\t\tsort_index = make([]Post, 0)\n\t\t\tindex = index + 1\n\t\t\tnum = 0\n\n\t\t}\n\t}\n\tif len(sort_index) > 0 {\n\t\tsort.Sort(ByDate(sort_index))\n\t\tbuild_index(sort_index, 0, index-1, -1, indexname)\n\n\t}\n}", "func (oplog *OpLog) init(maxBytes int) {\n\toplogExists := false\n\tobjectsExists := false\n\tnames, _ := oplog.s.DB(\"\").CollectionNames()\n\tfor _, name := range names {\n\t\tswitch name {\n\t\tcase \"oplog_ops\":\n\t\t\toplogExists = true\n\t\tcase \"oplog_states\":\n\t\t\tobjectsExists = true\n\t\t}\n\t}\n\tif !oplogExists {\n\t\tlog.Info(\"OPLOG creating capped collection\")\n\t\terr := oplog.s.DB(\"\").C(\"oplog_ops\").Create(&mgo.CollectionInfo{\n\t\t\tCapped: true,\n\t\t\tMaxBytes: maxBytes,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\tif !objectsExists {\n\t\tlog.Info(\"OPLOG creating objects index\")\n\t\tc := oplog.s.DB(\"\").C(\"oplog_states\")\n\t\t// Replication query\n\t\tif err := c.EnsureIndexKey(\"event\", \"ts\"); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\t// Replication query with a filter on types\n\t\tif err := c.EnsureIndexKey(\"event\", \"data.t\", \"ts\"); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\t// Fallback query\n\t\tif err := c.EnsureIndexKey(\"ts\"); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\t// Fallback query with a filter on types\n\t\tif err := c.EnsureIndexKey(\"data.t\", \"ts\"); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}", "func BackupIndexes(metadataFile *utils.FileWithByteCount) {\n\tgplog.Verbose(\"Writing CREATE INDEX statements to metadata file\")\n\tindexes := GetIndexes(connectionPool)\n\tobjectCounts[\"Indexes\"] = len(indexes)\n\tindexMetadata := GetCommentsForObjectType(connectionPool, TYPE_INDEX)\n\tPrintCreateIndexStatements(metadataFile, globalTOC, indexes, indexMetadata)\n}", "func CreateIndexIfNotExists(e *elastic.Client, index string) error {\n\t// Use the IndexExists service to check if a specified index exists.\n\texists, err := e.IndexExists(index).Do(context.Background())\n\tif err != nil {\n\t\tlog.Printf(\"elastic: unable to check if Index exists - %s\\n\", err)\n\t\treturn err\n\t}\n\n\tif exists {\n\t\treturn nil\n\t}\n\n\t// Create a new index.\n\tv := reflect.TypeOf(Point{})\n\n\tmapping := MapStr{\n\t\t\"settings\": MapStr{\n\t\t\t\"number_of_shards\": 1,\n\t\t\t\"number_of_replicas\": 1,\n\t\t},\n\t\t\"mappings\": MapStr{\n\t\t\t\"doc\": MapStr{\n\t\t\t\t\"properties\": MapStr{},\n\t\t\t},\n\t\t},\n\t}\n\tfor i := 0; i < v.NumField(); i++ {\n\t\tfield := v.Field(i)\n\t\ttag := field.Tag.Get(\"elastic\")\n\t\tif len(tag) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\ttagfields := strings.Split(tag, \",\")\n\t\tmapping[\"mappings\"].(MapStr)[\"doc\"].(MapStr)[\"properties\"].(MapStr)[field.Name] = MapStr{}\n\t\tfor _, tagfield := range tagfields {\n\t\t\ttagfieldValues := strings.Split(tagfield, \":\")\n\t\t\tmapping[\"mappings\"].(MapStr)[\"doc\"].(MapStr)[\"properties\"].(MapStr)[field.Name].(MapStr)[tagfieldValues[0]] = tagfieldValues[1]\n\t\t}\n\t}\n\tmappingJSON, err := json.Marshal(mapping)\n\tif err != nil {\n\t\tlog.Printf(\"elastic: error on json marshal - %s\\n\", err)\n\t\treturn err\n\t}\n\n\t_, err = e.CreateIndex(index).BodyString(string(mappingJSON)).Do(context.Background())\n\tif err != nil {\n\t\tlog.Printf(\"elastic: error creating elastic index %s - %s\\n\", index, err)\n\t\treturn err\n\t}\n\tlog.Printf(\"elastic: index %s created\\n\", index)\n\treturn nil\n}", "func (o Model) RebuildIndexes(pattern string) error {\n\t// Quick exit in case no index exists\n\tif o.IndexSet == nil || len(o.IndexSet.Indexes) == 0 {\n\t\treturn nil\n\t}\n\n\tp := res.Pattern(pattern)\n\tif !p.IsValid() {\n\t\treturn errors.New(\"invalid pattern\")\n\t}\n\n\t// Drop existing index entries\n\tfor _, idx := range o.IndexSet.Indexes {\n\t\terr := o.BadgerDB.DB.DropPrefix([]byte(idx.Name))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tt := reflect.TypeOf(o.Type)\n\n\t// Create a prefix to seek from\n\tridPrefix := pattern\n\ti := p.IndexWildcard()\n\tif i >= 0 {\n\t\tridPrefix = pattern[:i]\n\t}\n\n\t// Create new index entries in a single transaction\n\treturn o.BadgerDB.DB.Update(func(txn *badger.Txn) error {\n\t\tit := txn.NewIterator(badger.DefaultIteratorOptions)\n\t\tdefer it.Close()\n\t\tprefix := []byte(ridPrefix)\n\t\tfor it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {\n\t\t\t// Ensure the key matches the pattern\n\t\t\tif !p.Matches(string(it.Item().Key())) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Load item and unmarshal it\n\t\t\titem := it.Item()\n\t\t\tv := reflect.New(t)\n\t\t\terr := item.Value(func(dta []byte) error {\n\t\t\t\treturn json.Unmarshal(dta, v.Interface())\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// Loop through indexes and generate a new entry per index\n\t\t\tfor _, idx := range o.IndexSet.Indexes {\n\t\t\t\trname := item.KeyCopy(nil)\n\t\t\t\tidxKey := idx.getKey(rname, idx.Key(v.Elem().Interface()))\n\t\t\t\terr = txn.SetEntry(&badger.Entry{Key: idxKey, Value: nil, UserMeta: typeIndex})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}", "func AddDefaultIndexes(ctx context.Context, mgr ctrl.Manager) error {\n\tif err := ByMachineNode(ctx, mgr); err != nil {\n\t\treturn err\n\t}\n\n\tif err := ByMachineProviderID(ctx, mgr); err != nil {\n\t\treturn err\n\t}\n\n\tif feature.Gates.Enabled(feature.ClusterTopology) {\n\t\tif err := ByClusterClassName(ctx, mgr); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif feature.Gates.Enabled(feature.MachinePool) {\n\t\tif err := ByMachinePoolNode(ctx, mgr); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := ByMachinePoolProviderID(ctx, mgr); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func UpdatePhotosIndexes(gallery models.Gallery, photos []models.Photo) error {\n\tfor _, photo := range photos {\n\t\terr := DB.C(photosCollection).Update(bson.M{\n\t\t\t\"site_id\": gallery.SiteID,\n\t\t\t\"_id\": photo.ID,\n\t\t}, bson.M{\n\t\t\t\"$set\": bson.M{\n\t\t\t\t\"index\": photo.Index,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (handler StormWatchHandler) Index(c *gin.Context) {\n\tstormWatchs := []m.StormWatch{}\t\n\tvar query = handler.db\n\n\tstartParam,startParamExist := c.GetQuery(\"start\")\n\tlimitParam,limitParamExist := c.GetQuery(\"limit\")\n\n\t//start param exist\n\tif startParamExist {\n\t\tstart,_ := strconv.Atoi(startParam)\n\t\tif start != 0 {\n\t\t\tquery = query.Offset(start).Order(\"created_at asc\")\t\t\n\t\t} else {\n\t\t\tquery = query.Offset(0).Order(\"created_at desc\")\n\t\t}\n\t} \n\n\t//limit param exist\n\tif limitParamExist {\n\t\tlimit,_ := strconv.Atoi(limitParam)\n\t\tquery = query.Limit(limit)\n\t} else {\n\t\tquery = query.Limit(10)\n\t}\n\n\tquery.Order(\"created_at desc\").Find(&stormWatchs)\n\tc.JSON(http.StatusOK, stormWatchs)\n\treturn\n}", "func (engine *Engine) initIndexer(options *types.EngineOpts) {\n\t// 初始化索引器\n\tengine.indexers = make([]*core.Indexer, options.NumShards)\n\tfor shard := 0; shard < options.NumShards; shard++ {\n\t\tindexer, _ := core.NewIndexer(*options.IndexerOpts)\n\t\tengine.indexers[shard] = indexer\n\t}\n\n\t// 初始所有管道\n\tengine.indexerAddDocChans = make([]chan indexerAddDocReq, options.NumShards)\n\tengine.indexerLookupChans = make([]chan indexerLookupReq, options.NumShards)\n\tfor shard := 0; shard < options.NumShards; shard++ {\n\t\tengine.indexerAddDocChans[shard] = make(chan indexerAddDocReq, options.IndexerBufLen)\n\t\tengine.indexerLookupChans[shard] = make(chan indexerLookupReq, options.IndexerBufLen)\n\t}\n}", "func (tbl DbCompoundTable) CreateIndexes(ifNotExist bool) (err error) {\n\n\terr = tbl.CreateAlphaBetaIndex(ifNotExist)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}" ]
[ "0.804923", "0.78490174", "0.7118752", "0.70991796", "0.7068827", "0.69319904", "0.6840407", "0.6802941", "0.6791201", "0.6669298", "0.66180503", "0.6590874", "0.6555461", "0.6504994", "0.6485711", "0.64855134", "0.6479472", "0.63830847", "0.63748515", "0.63636535", "0.63266534", "0.6280624", "0.6262745", "0.6249875", "0.6248216", "0.6154794", "0.6134977", "0.6091284", "0.60541373", "0.60509175", "0.6038511", "0.60250837", "0.60208124", "0.59810007", "0.5953415", "0.5949399", "0.59493774", "0.5925435", "0.59179777", "0.59179", "0.59081966", "0.59004825", "0.58817345", "0.586037", "0.5794697", "0.57939404", "0.57869214", "0.57646203", "0.5729614", "0.5680908", "0.56549686", "0.56447667", "0.56233025", "0.5606327", "0.5589059", "0.55874026", "0.55650383", "0.5555525", "0.55297685", "0.5525469", "0.5508432", "0.55004805", "0.5488656", "0.54703975", "0.5465355", "0.5444844", "0.5443131", "0.54428905", "0.5436369", "0.54266685", "0.542079", "0.541674", "0.5404464", "0.5379314", "0.53613436", "0.536106", "0.5341716", "0.53366315", "0.5335227", "0.5332288", "0.53318673", "0.533138", "0.53258866", "0.5321675", "0.53215957", "0.5317429", "0.53163517", "0.5315874", "0.5314813", "0.5299393", "0.5293271", "0.52874404", "0.52740854", "0.52611643", "0.5245053", "0.52427274", "0.52379835", "0.5219057", "0.5215852", "0.5206846" ]
0.7907395
1
Close closes a mgo.Session value. Used to add defer statements for closing the copied session.
func (ds *DataStore) Close() { ds.MongoSession.Close() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Close(session *mgo.Session) {\n\tsession.Close()\n\t}", "func SessionClose(testingInstance TestingInstance, session *discordgo.Session) {\n\terr := session.Close()\n\tif err != nil {\n\t\ttestingInstance.Error(err)\n\t}\n}", "func (sp *SessionProxy) Close() error { return sp.GetSession().Close() }", "func (s *MockSession) Close() {}", "func (s *Session) Close() error {\n\t_, err := s.srv.conn.request(\"DELETE\", \"/_session\", nil, nil, s, 0)\n\treturn err\n}", "func (s *Stream) sessionClose() {\n\ts.dieLock.Lock()\n\tdefer s.dieLock.Unlock()\n\n\tselect {\n\tcase <-s.die:\n\tdefault:\n\t\tclose(s.die)\n\t}\n}", "func (s *session) Close() error {\n\treturn s.obj.Call(sessionMethodClose, 0).Err\n}", "func Close() {\n\tsession.Close()\n}", "func (s *session) Close() error {\n\treturn s.s.Close()\n}", "func (_m *DBClient) CloseSession() {\n\t_m.Called()\n}", "func (s *Session) Close() error {\n\ttReq := &Request{\n\t\tMethod: \"session-close\",\n\t}\n\tr := &Response{}\n\n\terr := s.Client.request(tReq, r)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (_m *MgoSession) Close() {\n\t_m.Called()\n}", "func (me *GJUser) CloseSession(){\n\tme.qreq(\"sessions/close\",\"\")\n}", "func (d *Abstraction) CloseSession() {\n\tfor k, v := range d.Sigmap {\n\t\tdelete(d.Sigmap, k)\n\t\tclose(v)\n\t}\n\td.Conn.RemoveSignal(d.Recv)\n\td.Conn.Close()\n}", "func (fs *FakeSession) Close() error {\n\treturn nil\n}", "func (s *Session) Close() error {\n\tr := C.wt_session_close(s.s)\n\ts.s = nil\n\tif r != 0 {\n\t\treturn wtError(r)\n\t}\n\treturn nil\n}", "func (s *Session) Close() {\n\ts.Sid = DefaultSid\n}", "func (s *Session) close(ctx context.Context) error {\n\treturn s.doSession(ctx, func(ctx context.Context, conn *grpc.ClientConn, header *headers.RequestHeader) (*headers.ResponseHeader, interface{}, error) {\n\t\trequest := &api.CloseSessionRequest{\n\t\t\tHeader: header,\n\t\t}\n\t\tclient := api.NewSessionServiceClient(conn)\n\t\tresponse, err := client.CloseSession(ctx, request)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\treturn response.Header, response, nil\n\t})\n}", "func (s Session) Close() error {\n\tif err := DismCloseSession(*s.Handle); err != nil {\n\t\treturn err\n\t}\n\treturn DismShutdown()\n}", "func (session ClientSession) Close() error {\n\treturn wlanCloseHandle(windows.Handle(session), 0)\n}", "func (s *session) Close() error {\n\treturn s.conn.Close()\n}", "func (s *GosnmpSession) Close() error {\n\treturn s.gosnmpInst.Conn.Close()\n}", "func (c *Session) Close() {\n\tif c.devID != nil {\n\t\terr := c.devID.Close()\n\t\tif err != nil {\n\t\t\tc.log.Warn(fmt.Sprintf(\"Failed to close DevID handle: %v\", err))\n\t\t}\n\t}\n\n\tif c.ak != nil {\n\t\terr := c.ak.Close()\n\t\tif err != nil {\n\t\t\tc.log.Warn(fmt.Sprintf(\"Failed to close attestation key handle: %v\", err))\n\t\t}\n\t}\n\n\tif c.ekHandle != 0 {\n\t\tc.flushContext(c.ekHandle)\n\t}\n\n\tif c.rwc != nil {\n\t\tif closeTPM(c.rwc) {\n\t\t\treturn\n\t\t}\n\n\t\terr := c.rwc.Close()\n\t\tif err != nil {\n\t\t\tc.log.Warn(fmt.Sprintf(\"Failed to close TPM: %v\", err))\n\t\t}\n\t}\n}", "func (s *session) Close(e error) error {\n\ts.close(e, false)\n\t<-s.runClosed\n\treturn nil\n}", "func (s *Session) Close() error {\n\t// TODO: send end preformative (if Begin has been exchanged)\n\tselect {\n\tcase <-s.conn.done:\n\t\treturn s.conn.err\n\tcase s.conn.delSession <- s:\n\t\treturn nil\n\t}\n}", "func (s *Session) Close() {\n\tif s.session != nil {\n\t\ts.session.Close()\n\t}\n}", "func (js *JobSession) Close() error {\n\tif js.name == \"\" && js.tracker == nil {\n\t\treturn ErrorInvalidSession\n\t}\n\tjs.name = \"\"\n\tjs.tracker = nil\n\treturn nil\n}", "func (s *session) Close() {\n\tctx := context.TODO()\n\ts.RollbackTxn(ctx)\n}", "func (b *Base) CloseSession() error {\n\treturn ErrFunctionNotSupported\n}", "func (s Session) Close() error {\n\ts.conn.Close()\n\treturn s.cli.Close()\n}", "func CloseSession(mongoSession *mgo.Session) {\n\tlog.Infof(\"Closing Session \\n\")\n\tmongoSession.Close()\n}", "func CloseSession(c *gin.Context) {\n\tvar input models.SessionInput\n\tif err := c.ShouldBindJSON(&input); err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\n\tresult := models.DB.\n\t\tModel(&models.Session{}).\n\t\tWhere(\"code = ?\", strings.ToUpper(input.Code)).\n\t\tUpdate(\"status\", \"closed\")\n\n\tif result.Error != nil {\n\t\tif errors.Is(result.Error, gorm.ErrRecordNotFound) {\n\t\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": result.Error.Error()})\n\t\t} else {\n\t\t\tc.JSON(http.StatusInternalServerError, gin.H{\"error\": result.Error.Error()})\n\t\t}\n\t\treturn\n\t}\n\n\tif result.RowsAffected == 0 {\n\t\tc.JSON(http.StatusOK, gin.H{\"data\": models.SessionClosedResult{\n\t\t\tCode: strings.ToUpper(input.Code),\n\t\t\tStatus: \"closed\",\n\t\t\tOK: false,\n\t\t\tReason: \"Already closed or does not exist.\", // already closed or could not find\n\t\t}})\n\t\treturn\n\t}\n\n\tif err := shuffleTopics(strings.ToUpper(input.Code)); err != nil {\n\t\tc.JSON(http.StatusOK, gin.H{\"data\": models.SessionClosedResult{\n\t\t\tCode: strings.ToUpper(input.Code),\n\t\t\tStatus: \"closed\",\n\t\t\tOK: false,\n\t\t\tReason: \"Not enough participants.\",\n\t\t}})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\"data\": models.SessionClosedResult{\n\t\tCode: strings.ToUpper(input.Code),\n\t\tStatus: \"closed\",\n\t\tOK: true,\n\t\tReason: \"\",\n\t}})\n}", "func (s Session) Close() {\n\t// spec: Close(void);\n\ts.Go(_SessionClose, dbus.FlagNoReplyExpected, nil)\n}", "func (d *Driver) Close() {\n\tappiumReq := &appiumRequest{\n\t\t\"DELETE\",\n\t\tnil,\n\t\t\"/wd/hub/session/\" + d.sessionID,\n\t}\n\n\tresp := doAppiumRequest(appiumReq, d.driverClient, \"\")\n\n\tstatusCodeErrorHandler(\n\t\tresp.StatusCode, 500,\n\t\t\"appigo: unable to close session\",\n\t)\n}", "func (tre *Teoregistry) Close() {\n\ttre.session.Close()\n}", "func (es *EventStore) CloseSession() {\n\tes.ds.CloseSession()\n}", "func (d *DeviceGoUSB) CloseSession() error {\n\tvar req, rep Container\n\treq.Code = OC_CloseSession\n\terr := d.RunTransaction(&req, &rep, nil, nil, 0)\n\td.session = nil\n\treturn err\n}", "func (c *Client) close(err error) error {\n\tif c.session == nil {\n\t\treturn nil\n\t}\n\treturn c.session.Close(err)\n}", "func (d *Discord) Close() {\n\td.session.Close()\n}", "func (c Client) Close() error {\n\tlog.Debugln(\"closing quic session\")\n\treturn c.Session.CloseWithError(0, \"\")\n}", "func (s *Session) Close() {\n\ts.closeChan <- true\n}", "func (m *Store) Close() {\n\tif m.session != nil {\n\t\tm.session.Close()\n\t}\n}", "func Close(ctx context.Context) {\n\tmw := ctx.Value(mwContextKey{}).(sessionMiddleware)\n\tid := ctx.Value(sessionIdContextKey{}).(string)\n\tdelete(mw.sessions, id)\n}", "func (m *Model) Close() error {\n\treturn m.sess.Close()\n}", "func TestSessionClose(t *testing.T) {\n\t// TODO(r0mant): Implement this.\n\tt.Skip(\"Not Implemented\")\n}", "func (s *session) Close() error {\n\tserverSessions.Dec()\n\ts.closeOnce.Do(func() {\n\t\t// closing needs to happen asynchronously because the last client\n\t\t// (session writer) will try to close this session, causing a deadlock\n\t\t// because of closeOnce\n\t\tgo func() {\n\t\t\ts.log.Infof(\"Closing session %v\", s.id)\n\t\t\tif s.term != nil {\n\t\t\t\ts.term.Close()\n\t\t\t}\n\t\t\tclose(s.closeC)\n\n\t\t\t// close all writers in our multi-writer\n\t\t\ts.writer.Lock()\n\t\t\tdefer s.writer.Unlock()\n\t\t\tfor writerName, writer := range s.writer.writers {\n\t\t\t\ts.log.Infof(\"Closing session writer: %v\", writerName)\n\t\t\t\tcloser, ok := io.Writer(writer).(io.WriteCloser)\n\t\t\t\tif ok {\n\t\t\t\t\tcloser.Close()\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t})\n\treturn nil\n}", "func (session *session) Close() {\n\t_ = session.Context.Uninit()\n\tsession.Context.Free()\n\n\tif session.Playmic {\n\t\tsession.MicCaptureDevice.Uninit()\n\t\tsession.MicPlaybackDevice.Uninit()\n\t}\n\n\tsession.DefaultPlaybackDevice.Uninit()\n\tsession.MusicPlaybackDevice.Uninit()\n}", "func (b *Bot) Close() error {\n\treturn b.session.Close()\n}", "func (nh *NodeHost) CloseSession(ctx context.Context,\n\tsession *client.Session) error {\n\ttimeout, err := getTimeoutFromContext(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsession.PrepareForUnregister()\n\trs, err := nh.ProposeSession(session, timeout)\n\tif err != nil {\n\t\treturn err\n\t}\n\tselect {\n\tcase r := <-rs.CompletedC:\n\t\tif r.Completed() && r.GetResult() == session.ClientID {\n\t\t\treturn nil\n\t\t} else if r.Rejected() {\n\t\t\treturn ErrRejected\n\t\t} else if r.Timeout() {\n\t\t\treturn ErrTimeout\n\t\t} else if r.Terminated() {\n\t\t\treturn ErrClusterClosed\n\t\t}\n\t\tplog.Panicf(\"unknown v code %v, client id %d\",\n\t\t\tr, session.ClientID)\n\tcase <-ctx.Done():\n\t\tif ctx.Err() == context.Canceled {\n\t\t\treturn ErrCanceled\n\t\t} else if ctx.Err() == context.DeadlineExceeded {\n\t\t\treturn ErrTimeout\n\t\t}\n\t}\n\tpanic(\"should never reach here\")\n}", "func (d *DB) Close() error {\n\treturn d.sess.Close()\n}", "func (s *Session) Close() (err error) {\n\tselect {\n\tcase <-s.done:\n\t\treturn s.err\n\tdefault:\n\t\treturn s.closeWithError(ErrSessionClosed)\n\t}\n}", "func (s *Session) Close() {\n\ts.Closed = true\n\tclose(s.CloseSignal)\n}", "func (s *Session) Close() error {\n\ts.Watch(map[string]bool{\"enable\": false})\n\tclose(s.done)\n\treturn s.socket.Close()\n}", "func (mongoDBConnect MongoDBConnect) CloseSession() {\n\tdefer mongoDBConnect.mongoSession.Close()\n}", "func (s *Session) Close() error {\n\terr := s.close(context.TODO())\n\ts.ticker.Stop()\n\treturn err\n}", "func (session *NetSession) CloseSession() error {\n\tsession.Bufio.Flush()\n\tsession.Conn.Close()\n\tsession.Conn = nil\n\tvar returnErr error = nil\n\tif pps, ok := session.PusherPullersSessionMap[session.ReourcePath]; ok {\n\t\tif errs := pps.StopSession(&session.ID); len(errs) != 0 {\n\t\t\tfor index, err := range errs {\n\t\t\t\treturnErr = fmt.Errorf(\"%v\\nindex = %v,error = %v\", returnErr, index, err)\n\t\t\t}\n\t\t}\n\t\tif session.SessionType == PusherClient {\n\t\t\t// if this session is pusher ,\n\t\t\t// we need free all resource include puller's resource\n\t\t\tsession.PusherPullersSessionMapMutex.Lock()\n\t\t\tdelete(session.PusherPullersSessionMap, session.ReourcePath)\n\t\t\tsession.PusherPullersSessionMapMutex.Unlock()\n\t\t}\n\t}\n\treturn returnErr\n}", "func (rt *RethinkDB) Close() {\n\trt.session.Close()\n}", "func (c *Cassandra) Close() error {\n\tif c.session == nil {\n\t\treturn nil\n\t}\n\n\tc.session.Close()\n\n\treturn nil\n}", "func (bs *BatchSession) Close() {\n\tif (bs._q != nil) {\n\t\tbs._rc.RemQ(bs._q)\n\t}\n\tif (bs._rc != nil) {\n\t\tbs._rc.Close()\n\t}\n\tbs._rc = nil\n\tbs._q = nil\n\tbs.Ended = time.Now()\n}", "func (ts *TokenStore) Close() {\n\tts.session.Close()\n}", "func (m *cassandraMetadataPersistence) Close() {\n\tif m.session != nil {\n\t\tm.session.Close()\n\t}\n}", "func (s *Session) Close() error {\n\tif atomic.CompareAndSwapInt32(&s.closed, 0, 1) {\n\t\ts.conn.Close()\n\t\tclose(s.stopedChan)\n\t\tif s.closeCallback != nil {\n\t\t\ts.closeCallback(s)\n\t\t}\n\t}\n\treturn nil\n}", "func (p *Session) Close() error {\n\treturn p.socket.Close()\n}", "func (c *Classifier) Close() error { return c.session.Close() }", "func (repo *VesselsRepository) Close() {\n\trepo.session.Close()\n}", "func (s *Session) Close() error {\n\tdbLogger.Infof(\"Closing session: %s\", s.uuid)\n\n\tclose(s.cancel)\n\ts.wp.Wait()\n\ts.schema.stop()\n\n\treturn nil\n}", "func (js *jsonfileSessionRepository) Close() error {\n\tif js.lFile == nil {\n\t\treturn nil\n\t}\n\n\terr := js.lFile.Close()\n\tif err != nil {\n\t\treturn errors.Errorf(\"cannot close session file '%s': %v\", js.path, err)\n\t}\n\n\treturn nil\n}", "func (s *Session) Close() {\n\ts.closeSession.Do(func() {\n\t\tselect {\n\t\tcase <-s.closed:\n\t\tdefault:\n\t\t\tclose(s.closed)\n\t\t\ts.streamClient.CloseSend()\n\t\t}\n\t})\n}", "func CloseSession(goRoutine string, mongoSession *mgo.Session) {\n\tdefer helper.CatchPanic(nil, goRoutine, \"CloseSession\")\n\thelper.WriteStdout(goRoutine, \"mongo.CloseSession\", \"Started\")\n\n\t// Close the specified session\n\tmongoSession.Close()\n\n\thelper.WriteStdout(goRoutine, \"mongo.CloseSession\", \"Completed\")\n}", "func (driver *Driver) Close() error {\n\tdriver.session.Close()\n\treturn nil\n}", "func (r *RedisSession) Close() error {\n\treturn r.pool.Close()\n}", "func (r *RedisSession) Close() error {\n\treturn r.pool.Close()\n}", "func (s *Adapter) CloseSession(ctx context.Context, req *mixer_v1beta1.CloseSessionRequest) (*mixer_v1beta1.CloseSessionResponse, error) {\n\tsessID := sessionID(req.GetSessionId())\n\n\tif h, ok := s.metricHandlers[sessID]; ok {\n\t\th.Shutdown()\n\t\tdelete(s.metricHandlers, sessID)\n\t}\n\tif h, ok := s.traceSpanHandlers[sessID]; ok {\n\t\th.Shutdown()\n\t\tdelete(s.traceSpanHandlers, sessID)\n\t}\n\n\tglog.Infof(\"Closed session %s\", sessID)\n\n\treturn &mixer_v1beta1.CloseSessionResponse{\n\t\tStatus: &google_rpc.Status{\n\t\t\tCode: int32(google_rpc.OK),\n\t\t},\n\t}, nil\n}", "func (db *DB) Close() {\n\tdb.session.Close()\n}", "func (db *DB) Close() {\n\tdb.session.Close()\n}", "func (s *EventStore) Close() {\n\ts.session.Close()\n}", "func (s *SessionSRTP) Close() error {\n\treturn nil\n}", "func (m *MongoDB) Close() {\n\tm.sess.Close()\n}", "func (s *InMemoryDocumentSessionOperations) Close() {\n\ts._close(true)\n}", "func (gs *Service) closeSession(session Session) {\n\t//TODO: delete Chat connection\n\tfor client := range session.hub.Clients {\n\t\t_ = client.Conn.Close()\n\t}\n\tdelete(gs.sessions, session.sessionID)\n}", "func CloseSession(s *Server) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tvar data = struct {\n\t\t\tToken string `json:\"token\" binding:\"required\"`\n\t\t}{}\n\n\t\tif err := c.BindJSON(&data); err != nil {\n\t\t\tc.JSON(http.StatusBadRequest, gin.H{\"status\": \"JSON Body is missing fields\"})\n\t\t\treturn\n\t\t}\n\n\t\tif _, redisErr := s.Redis.Do(\"DEL\", data.Token); redisErr != nil {\n\t\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"Internal error\"})\n\t\t\treturn\n\t\t}\n\n\t\tc.JSON(http.StatusOK, gin.H{\"result\": \"ok\"})\n\t}\n}", "func (s *Session) Close() error {\n\treturn s.ws.Close()\n}", "func (m *Mongo) Close() {\n\tm.Session.Close()\n}", "func (m *Mongo) Close() {\n\tm.Session.Close()\n}", "func (s *Session) Close() error {\n\ts.shutdownLock.Lock()\n\tdefer s.shutdownLock.Unlock()\n\n\tif s.IsShutdown() {\n\t\treturn nil\n\t}\n\tatomic.StoreInt32(&s.shutdown, 1)\n\tif s.shutdownErr == nil {\n\t\ts.shutdownErr = ErrSessionShutdown\n\t}\n\tasyncNotify(s.shutdownCh)\n\tselect {\n\tcase s.acceptCh <- nil:\n\tdefault:\n\t}\n\tclose(s.shutdownCh)\n\ts.conn.Close()\n\n\t//s.streamLock.Lock()\n\t//defer s.streamLock.Unlock()\n\ts.streams.Range(func(key, value interface{}) bool {\n\t\tstream := value.(*Stream)\n\t\tstream.forceClose(true)\n\t\treturn true\n\t})\n\t//for len(s.sendCh) > 0 {\n\t//frame := <-s.sendCh\n\t//putBytesToPool(frame.F)\n\t//}\n\t//RecycleBufReaderToPool(s.connReader)\n\tfor atomic.LoadInt32(&s.sendingNum) > 0 {\n\t\ttime.Sleep(10 * time.Nanosecond)\n\t}\n\tclose(s.sendCh)\n\treturn nil\n}", "func (pool *SessionPool) Close() {\n\tpool.rwLock.Lock()\n\tdefer pool.rwLock.Unlock()\n\n\t//TODO(Aiee) append 2 lists\n\tidleLen := pool.idleSessions.Len()\n\tactiveLen := pool.activeSessions.Len()\n\n\t// iterate all sessions\n\tfor i := 0; i < idleLen; i++ {\n\t\tsession := pool.idleSessions.Front().Value.(*pureSession)\n\t\tsession.close()\n\t\tpool.idleSessions.Remove(pool.idleSessions.Front())\n\t}\n\tfor i := 0; i < activeLen; i++ {\n\t\tsession := pool.activeSessions.Front().Value.(*pureSession)\n\t\tsession.close()\n\t\tpool.activeSessions.Remove(pool.activeSessions.Front())\n\t}\n\n\tpool.closed = true\n\tif pool.cleanerChan != nil {\n\t\tclose(pool.cleanerChan)\n\t}\n}", "func (st *SessionStoreMySQL) SessionRelease() {\n\tdefer func() {\n\t\terr := st.conn.Close()\n\t\tif err != nil {\n\t\t\tsession.SLogger.Println(err)\n\t\t}\n\t}()\n\n\tb, err := session.EncodeGob(st.values)\n\tif err != nil {\n\t\tsession.SLogger.Println(err)\n\t\treturn\n\t}\n\t_, err = st.conn.Exec(\"UPDATE \"+TableName+\" set `session_data`=?, `session_expiry`=? where session_key=?\",\n\t\tb, time.Now().Unix(), st.sid)\n\tif err != nil {\n\t\tsession.SLogger.Println(err)\n\t\treturn\n\t}\n}", "func (s *MongoStore) Close() {\n\tif s.session != nil {\n\t\ts.session.Close()\n\t}\n}", "func (sshConfig *SSHConfig) CloseSession() {\n\tif sshConfig.session != nil {\n\t\tsshConfig.session.Close()\n\t\tsshConfig.session = nil\n\t}\n}", "func (mc Wrapper) Close() error {\n\treturn mc.Client.Disconnect(mc.ctx)\n}", "func (s *sshSession) Close() {\n\ts.Session.Close()\n\ts.conn.Close()\n}", "func (d *MovieDatabase) Close() {\n\td.Client.Disconnect(d.Context)\n}", "func (i *Cassandra) Close() error {\n\tif !i.session.Closed() {\n\t\ti.session.Close()\n\t}\n\treturn nil\n}", "func (ks *KeyStore) CloseSession(session pkcs11.SessionHandle) {\n\terr := pkcs11Ctx.CloseSession(session)\n\tif err != nil {\n\t\tlogrus.Debugf(\"Error closing session: %s\", err.Error())\n\t}\n}", "func (t TerminalSession) Close() error {\n\t//log.Println(\"Terminal session was closed\")\n\tif err := t.sockConn.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (rs *SessionResultSet) Close() error {\n\treturn rs.ResultSet.Close()\n}", "func (c *Conn) Close() error {\n\terr := c.Session.CloseWithError(0, \"\")\n\n\tif c.onClosed != nil {\n\t\tc.onClosed()\n\t}\n\n\treturn err\n}", "func (r *sessionRecorder) Close() error {\n\tvar errors []error\n\terr := r.alog.Close()\n\terrors = append(errors, err)\n\n\t// wait until all events from recorder get flushed, it is important\n\t// to do so before we send SessionEndEvent to advise the audit log\n\t// to release resources associated with this session.\n\t// not doing so will not result in memory leak, but could result\n\t// in missing playback events\n\tcontext, cancel := context.WithTimeout(context.TODO(), defaults.ReadHeadersTimeout)\n\tdefer cancel() // releases resources if slowOperation completes before timeout elapses\n\terr = r.alog.WaitForDelivery(context)\n\tif err != nil {\n\t\terrors = append(errors, err)\n\t\tr.log.Warnf(\"Timeout waiting for session to flush events: %v\", trace.DebugReport(err))\n\t}\n\n\treturn trace.NewAggregate(errors...)\n}", "func (dbWoker *DomainInformationStorage)Close() {\n\tdbWoker.session.Close()\n}", "func (s *SessionRTP) Close() error {\n\treturn s.session.close()\n}", "func (s *SessionManager) SessionRelease(ctx *bm.Context, sv *Session) {\n\t// set http cookie\n\ts.setHTTPCookie(ctx, s.c.CookieName, sv.Sid)\n\t// set mc\n\tconn := s.mc.Get(ctx)\n\tdefer conn.Close()\n\tkey := sv.Sid\n\titem := &memcache.Item{\n\t\tKey: key,\n\t\tObject: sv,\n\t\tFlags: memcache.FlagJSON,\n\t\tExpiration: int32(s.c.CookieLifeTime),\n\t}\n\tif err := conn.Set(item); err != nil {\n\t\tlog.Error(\"SessionManager set error(%s,%v)\", key, err)\n\t}\n}" ]
[ "0.71127445", "0.6988447", "0.6953739", "0.68764406", "0.6822951", "0.6751179", "0.6730526", "0.6690702", "0.6605853", "0.6533115", "0.65138006", "0.6497762", "0.6476485", "0.64715266", "0.6440591", "0.637412", "0.631042", "0.6295334", "0.6289746", "0.62867993", "0.62823224", "0.6216583", "0.6215415", "0.6210419", "0.6201942", "0.61960316", "0.61933595", "0.61805266", "0.61540645", "0.61328554", "0.6129904", "0.6126138", "0.6121668", "0.6080289", "0.60734606", "0.60733515", "0.6068201", "0.60541546", "0.6028323", "0.60234004", "0.6019856", "0.60156524", "0.60051143", "0.5960442", "0.5953446", "0.59516406", "0.5937464", "0.5927912", "0.5916414", "0.5904564", "0.5900159", "0.58997077", "0.58906186", "0.58901024", "0.58781576", "0.58739537", "0.58632237", "0.5855921", "0.58514154", "0.58114195", "0.5806759", "0.57898724", "0.5787675", "0.57758045", "0.57571644", "0.5746724", "0.57373244", "0.5721854", "0.5713058", "0.57096165", "0.570265", "0.570265", "0.5678085", "0.5666726", "0.5666726", "0.5643855", "0.5625056", "0.56123185", "0.5603166", "0.55694264", "0.556902", "0.5566215", "0.5552742", "0.5552742", "0.5540424", "0.5522615", "0.5521064", "0.55210114", "0.5507262", "0.54997987", "0.5489238", "0.54677474", "0.54659784", "0.54453427", "0.5421531", "0.54196054", "0.5407872", "0.54052967", "0.5404484", "0.5400681", "0.53883713" ]
0.0
-1
Collection returns mgo.collection for the given name
func (ds *DataStore) Collection(name string) *mgo.Collection { return ds.MongoSession.DB(commons.AppConfig.Database).C(name) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetCollection(name string) *mgo.Collection {\n\treturn DB.C(name)\n}", "func C(name string) *mgo.Collection {\n return Db.C(name)\n}", "func (m *MongoDB) C(name string) *mgo.Collection {\n\treturn m.db.C(name)\n}", "func (m *MongoDB) Collection(name string) (*mongo.Collection, error) {\n\tcoll, ok := m.coll[name]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"not defined collection %s\", name)\n\t}\n\n\treturn coll, nil\n}", "func collectionByName(q sqlx.Queryer, name string) (*model.ItemCollection, error) {\n\tcoll := &model.ItemCollection{}\n\terr := sqlx.Get(q, coll, qCollectionByName, name)\n\tif err == sql.ErrNoRows {\n\t\treturn nil, ErrItemCollectionNotFound\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn coll, nil\n}", "func (d *Database) Collection(name string) *Collection {\n\tvar cp *mongo.Collection\n\n\tcp = d.database.Collection(name)\n\n\treturn &Collection{\n\t\tcollection: cp,\n\t}\n}", "func (svc *Rekognition) GetCollection(name string) (*Collection, error) {\n\tcollectionName := svc.prefix + name\n\n\t// get the stream from cache\n\tsvc.collectionsMu.RLock()\n\tc, ok := svc.collections[collectionName]\n\tsvc.collectionsMu.RUnlock()\n\tif ok {\n\t\treturn c, nil\n\t}\n\n\tc = NewCollection(svc, name)\n\tsvc.collectionsMu.Lock()\n\tsvc.collections[collectionName] = c\n\tsvc.collectionsMu.Unlock()\n\treturn c, nil\n}", "func GetCollection(session *mgo.Session, collectionName string) *mgo.Collection {\n\treturn GetDatabase(session).C(collectionName)\n}", "func (d WrapperDatabase) Collection(name string) CollectionLayer {\n\treturn &WrapperCollection{Collection: d.Database.Collection(name)}\n}", "func (m *MongoDB) GetCollection(name string) *mgo.Collection {\n\treturn m.db.C(name)\n}", "func (b *Bucket) Collection(collectionName string) *Collection {\n\treturn b.DefaultScope().Collection(collectionName)\n}", "func newCollection(name string, db *database) (Collection, error) {\n\tif name == \"\" {\n\t\treturn nil, WithStack(InvalidArgumentError{Message: \"name is empty\"})\n\t}\n\tif db == nil {\n\t\treturn nil, WithStack(InvalidArgumentError{Message: \"db is nil\"})\n\t}\n\treturn &collection{\n\t\tname: name,\n\t\tdb: db,\n\t\tconn: db.conn,\n\t}, nil\n}", "func (db Database) C(name string) Collection {\n\treturn Collection{\n\t\tConn: db.Conn,\n\t\tNamespace: db.Name + \".\" + name,\n\t\tLastErrorCmd: db.LastErrorCmd,\n\t}\n}", "func GetCollection(mongoSession *mgo.Session, collectionName string) (collection *mgo.Collection) {\n\treturn mongoSession.DB(mongoDatabase).C(collectionName)\n}", "func (s *Scope) Collection(collectionName string) *Collection {\n\treturn newCollection(s, collectionName)\n}", "func (ins *instance) getCollection() (*mgo.Collection, error) {\n\tsession, err := ins.getConnection()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsession.SetSafe(&mgo.Safe{})\n\tsession.SetMode(mgo.Strong, true)\n\tc := session.DB(ins.database).C(CollectionName)\n\treturn c, nil\n}", "func CreateOrReturnCollection(name string) *Collection {\n\tnamedCollectionLock.Lock()\n\tdefer namedCollectionLock.Unlock()\n\tif q, found := namedCollection[name]; found {\n\t\treturn q\n\t}\n\n\tqmc := &Collection{\n\t\tcollection: nil,\n\t\tdone: make(chan struct{}),\n\t\t// WARNING: use a different configuration name\n\t\texpirePeriod: time.Duration(config.DiscoveryCollectionRetentionSeconds) * time.Second,\n\t}\n\tgo qmc.StartAutoExpiration()\n\n\tnamedCollection[name] = qmc\n\n\treturn qmc\n}", "func (AktaKelahiran) Collection() *mongo.Collection {\n\treturn mongodb.DB.Collection(variable.CollectionNames.SubMission.AktaKelahiran)\n}", "func GetColl(s *mgo.Session, collName string) []Simple {\n if s == nil {\n log.Println(\"FATAL: Can not access MongoDB! Application Closing!\")\n os.Exit(1)\n }\n\n defer s.Close()\n s.SetMode(mgo.Monotonic, true)\n\n c := s.DB(\"nebula\").C(collName)\n\n var results []Simple\n err := c.Find(nil).All(&results)\n\n if err != nil {\n log.Printf(\"ERROR: Can not access \"+collName+\" collection to get items!\") \n }\n\n return results\n}", "func CollectionByTitle(db *sqlx.DB, name string) ([]Collection, error) {\n\treturn collectionQuery(\n\t\tdb,\n\t\t`SELECT * FROM collection WHERE title LIKE $1;`,\n\t\tdbSearchString(name),\n\t)\n}", "func (s *MongoStore) C(sess *mgo.Session, collectionName string) *mgo.Collection {\n\treturn sess.DB(s.db).C(collectionName)\n}", "func C(cfg *Config) *mgo.Collection {\n\treturn cfg.Mongo.session.DB(cfg.Mongo.Database).C(cfg.Mongo.Collection)\n}", "func (m *MongoDAL) c(collection string) *mgo.Collection {\n\treturn m.session.DB(m.dbName).C(collection)\n}", "func (d *Dao) GetCollection(session *mgo.Session, collectionName string) *mgo.Collection {\n\treturn session.DB(dbname).C(collectionName)\n}", "func newCollection ( db *Db, collectionName string, keyField string, cacheDur time.Duration, cleanupInterval time.Duration) *Collection {\n\twtcc := new(Collection)\n\twtcc.Parentdb = db\n\twtcc.Name = collectionName\n\twtcc.KeyField = keyField\n\twtcc.Dbc = wtcc.Parentdb.Db.C(collectionName)\n\twtcc.C = cache.New(cacheDur, cleanupInterval)\n\treturn wtcc\t\n}", "func (repo *repository) Get(name string) (*CollectionType, error) {\n\trepo.mu.Lock()\n\tdefer repo.mu.Unlock()\n\n\tif a, ok := repo.collections[name]; ok {\n\t\treturn a, nil\n\t}\n\treturn nil, fmt.Errorf(\"can't find CollectionType named %s\", name)\n}", "func GetCollectionName(name string) string {\n\t// pluralize the name\n\tname = inflect.Pluralize(name)\n\n\t//split name into string array\n\tsplittedName := strings.Split(name, \"\")\n\n\t//uppercase first character and assign back\n\tsplittedName[0] = strings.ToLower(splittedName[0])\n\n\t//merge string array\n\tname = strings.Join(splittedName, \"\")\n\treturn name\n\n}", "func (a *ArangoDb) collection(ctx context.Context, colName string) (arango.Collection, error) {\n\tvar col arango.Collection\n\n\tcol, err := (*a.Db).Collection(ctx, colName)\n\n\tif arango.IsNotFound(err) {\n\t\tcol, err = (*a.Db).CreateCollection(ctx, colName, nil)\n\t} else if err != nil {\n\t\treturn nil, &e.Error{Code: e.EINTERNAL, Op: \"db.collection\", Err: err}\n\t}\n\n\treturn col, nil\n}", "func (c *Repository) Collection() *mgo.Collection {\n\treturn c.Session.DB(database).C(collection)\n}", "func (k Keeper) GetCollection(ctx sdk.Context, denom string) (types.Collection, error) {\n\tnfts := k.GetNFTs(ctx, denom)\n\tif len(nfts) == 0 {\n\t\treturn types.Collection{}, sdkerrors.Wrapf(types.ErrUnknownCollection, \"collection %s not existed \", denom)\n\t}\n\treturn types.NewCollection(denom, nfts), nil\n}", "func NewCollection(name string, database *ugo.Database) *UnqliteCollection {\n\tcollection := &UnqliteCollection{name, database, jx9.NewJX9Script(), true, true, false, nil, \"\", \"\"}\n\tcollection.Flush()\n\treturn collection\n}", "func (mongoDBConnection MongoDBConnect) GetCollection(session *mgo.Session, collectionName string) (collection *mgo.Collection) {\n\tcollection = session.DB(mongoDBConnection.database).C(collectionName)\n\treturn\n}", "func (api *apiConfig) GetCollectionByName(name string) (*Collection, error) {\n\t// If an override was configured, use it instead.\n\tif api.getCollectionByName != nil {\n\t\treturn api.getCollectionByName(name)\n\t}\n\n\tcollections, err := api.GetAllCollections()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlowerName := strings.ToLower(name)\n\n\tfor _, collection := range *collections {\n\t\tif strings.ToLower(collection.Name) == lowerName {\n\t\t\treturn &collection, nil\n\t\t}\n\t}\n\n\t// Report that no collection was found by that name.\n\treturn nil, nil\n}", "func Coll(colname string) driver.Collection {\n\tcol, err := Database.Collection(nil, colname)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to open collection: %v\", err)\n\t}\n\treturn col\n}", "func GetCollection(collection string) *mongo.Collection {\n\tif client == nil {\n\t\tinitClient()\n\t}\n\n\tucol := client.Database(\"loanCalc\").Collection(collection)\n\n\treturn ucol\n}", "func (d *MongoConnector) GetCollection(name string) *mongo.Collection {\n\treturn d.mongoClient.Database(Database).Collection(name)\n}", "func GetCollection(collection string) (*mongo.Collection){\n\t/*\n\t\tConnect to my cluster\n */\n\tconnectionString:=\"mongodb+srv://dbUser:[email protected]/todo-app-test?retryWrites=true&w=majority\"\n client, err := mongo.NewClient(options.Client().ApplyURI(connectionString))\n if err != nil {\n log.Fatal(err)\n }\n ctx,_ := context.WithTimeout(context.Background(), 10*time.Second)\n\t//defer cancel()\n\t//defer client.Disconnect(ctx)\n err = client.Connect(ctx)\n if err != nil {\n log.Fatal(err)\n }\n\treturn client.Database(database).Collection(collection)\n}", "func (c *collection) Name() string {\n\treturn c.name\n}", "func New(s Site, name string, metadata map[string]interface{}) *Collection {\n\treturn &Collection{\n\t\tName: name,\n\t\tMetadata: metadata,\n\t\tcfg: s.Config(),\n\t\tsite: s,\n\t}\n}", "func GetCollection(ctx context.Context, c Client, uri string) (*Collection, error) {\n\tresp, err := c.Get(ctx, uri)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar result Collection\n\terr = json.NewDecoder(resp.Body).Decode(&result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &result, nil\n}", "func NewCollection() Collection {\n\treturn make(Collection)\n}", "func GetCollection(client *mongo.Client, dbName string, collection string) *Collection {\n\treturn &Collection{client.Database(dbName).Collection(collection), nil}\n}", "func (pg *PGClient) CollectionViewByName(name string) (*model.ItemCollectionView, error) {\n\ttx, err := pg.DB.Beginx()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tswitch err {\n\t\tcase nil:\n\t\t\terr = tx.Commit()\n\t\tdefault:\n\t\t\ttx.Rollback()\n\t\t}\n\t}()\n\treturn collectionViewByName(tx, name, true)\n}", "func (app *App) getCollection(collection string) *mongo.Collection {\n\treturn app.MongoClient.Database(app.Config.DbName).Collection(collection)\n}", "func GetCollection(model interface{}) (*mongo.Collection, error) {\n\tdb, err := GetMongoPool().GetDatabase()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmodelString, ok := model.(string)\n\tif ok {\n\t\treturn db.Collection(modelString), err\n\t}\n\treturn db.Collection(reflect.TypeOf(model).Name()), err\n}", "func NewCollection() *Collection {\n\treturn &Collection{}\n}", "func (Admin) Collection() *mongo.Collection {\n\treturn mongodb.DB.Collection(variable.CollectionNames.Admin)\n}", "func (mdbc *MongoDbController) getCollection(collectionName string) (*mongo.Collection, context.Context, context.CancelFunc) {\n\t// Write the hash to the database\n\tcollection := mdbc.MongoClient.Database(mdbc.dbName).Collection(collectionName)\n\tbackCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\n\treturn collection, backCtx, cancel\n}", "func (dao ArtistDAO) getCollection() *mgo.Collection {\n\treturn dao.conn.DB(\"bookfire\").C(\"artists\")\n}", "func (c *Collection) Name() string {\n\treturn c.collectionName\n}", "func (l Link) Collection(database, collection string) (*mongo.Collection, error) {\n\tif err := l.linkCheck(\"link.Collection\"); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn l.client.Database(database).Collection(collection), nil\n}", "func (iteration Iteration) GetCollectionName() string {\n\treturn IterationName\n}", "func retrieveMongoCollection(client *mongo.Client, db string, collection string) *mongo.Collection {\n\tcol := client.Database(db).Collection(collection)\n\treturn col\n}", "func (m Service) GetCollection() string {\n\treturn ServiceCollectionName\n}", "func (mc *MongoClient) GetCollectionHandler(name string) *mongo.Collection {\n\tif name == \"\" {\n\t\tmc.logger.Fatalw(\"you have not set mongodb collection name\")\n\t}\n\treturn mc.getDbHandler().Collection(name)\n}", "func (mm *Model) GetCollection(session *mgo.Session) CachedCollection {\n\treturn mm.generateCollection(session)\n}", "func GetMongoCollection(collectionName string) (*mgo.Collection, error) {\n\tmgoSession, err := GetMongoSession()\n\tif err != nil {\n\t\tlog.Printf(\"[GetCollection] Error connecting to mongo db: [%s]\\n\", err.Error())\n\t\treturn nil, err\n\t}\n\n\tcollection := mgoSession.DB(mongoDbName).C(collectionName)\n\treturn collection, nil\n}", "func NewCollection(audiences Audiences) *Collection {\n\treturn &Collection{\n\t\tbyID: new(sync.Map),\n\t\tbyKey: new(sync.Map),\n\t\tbyName: new(sync.Map),\n\t\tbyTokenID: new(sync.Map),\n\t\taudiences: audiences,\n\t}\n}", "func (a Admin) CollectionName() string {\n\treturn \"admins\"\n}", "func (db *MongoDB) CreateCollection(c string) *mongo.Collection {\n\t// Check if MongoDB is connected\n\tif db.client == nil {\n\t\tdb.l.Println(\"[ERROR] MongoDB is not connected yet\")\n\t\treturn nil\n\t}\n\n\tdb.Collections[c] = db.client.Database(db.name).Collection(c)\n\treturn db.Collections[c]\n}", "func (database *Database) GetCollection(collection string) (*db.Col, error) {\n\t// Make sure the collection exists first\n\tif err := database.createCollection(collection); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Switch to the collection\n\tobjects := database.Client.Use(collection)\n\n\t// Return the collection\n\treturn objects, nil\n}", "func assertCollection(ctx context.Context, db driver.Database, name string, t *testing.T) driver.Collection {\n\tc, err := db.Collection(ctx, name)\n\tif driver.IsNotFound(err) {\n\t\tt.Fatalf(\"Collection '%s': does not exist\", name)\n\t} else if err != nil {\n\t\tt.Fatalf(\"Failed to open collection '%s': %s\", name, describe(err))\n\t}\n\treturn c\n}", "func CreateCollection(name string) *Collection {\r\n\thomePath := GetCollectionsHomePath()\r\n\tcollectionPath := homePath + string(os.PathSeparator) + name\r\n\tif !util.FolderExists(collectionPath) {\r\n\t\tos.Mkdir(collectionPath, 0755)\r\n\t\tlog.Printf(\"Created collection at %s\", collectionPath)\r\n\t\tcollectionEntry := CollectionEntry{\r\n\t\t\tname: name,\r\n\t\t\tpath: collectionPath,\r\n\t\t}\r\n\t\treturn NewCollection(collectionEntry)\r\n\t}\r\n\tlog.Printf(\"Collection %s already exists\", name)\r\n\treturn nil\r\n}", "func SetCollectionName(colname string) func(*MGO) error {\n\treturn func(m *MGO) error {\n\t\tm.colname = colname\n\t\treturn nil\n\t}\n}", "func (s *VisvalingamSimplifier) Collection(c orb.Collection) orb.Collection {\n\treturn collection(s, c)\n}", "func FetchByChName(year int, name string) []history.OneCollection {\n\tcollections := FetchByYear(year)\n\tvar data []history.OneCollection\n\tfor _, collection := range collections {\n\t\tif strings.Contains(collection.ChName, name) {\n\t\t\tdata = append(data, collection)\n\t\t}\n\t}\n\treturn data\n\n}", "func Mgo(c string) (*mgo.Collection, Closer) {\n\tsession := config.session.Copy()\n\tv := session.DB(os.Getenv(\"MONGO_URI_DATABASE\"))\n\n\treturn v.C(c), session\n}", "func (db *Mngo) GetCollection(c *store.Context, model store.Model) *mongo.Collection {\n\tutils.EnsurePointer(model)\n\tmongoModel := store.EnsureGenericModel(model)\n\treturn db.database.Collection(mongoModel.GetCollection())\n}", "func (tmdb *TMDb) SearchCollection(name string, options map[string]string) (*CollectionSearchResults, error) {\n\tvar availableOptions = map[string]struct{}{\n\t\t\"page\": {},\n\t\t\"language\": {}}\n\tvar collections CollectionSearchResults\n\tsafeName := url.QueryEscape(name)\n\toptionsString := getOptionsString(options, availableOptions)\n\turi := fmt.Sprintf(\"%s/search/collection?query=%s&api_key=%s%s\", baseURL, safeName, tmdb.apiKey, optionsString)\n\tresult, err := getTmdb(uri, &collections)\n\treturn result.(*CollectionSearchResults), err\n}", "func NewCollection(collectionEntry CollectionEntry) *Collection {\r\n\taccess := NewAccess(collectionEntry)\r\n\treturn &Collection{\r\n\t\tentry: collectionEntry,\r\n\t\tDb: access,\r\n\t}\r\n}", "func (b *Bucket) Collections() *CollectionManager {\n\tcli := b.sb.getCachedClient()\n\n\treturn &CollectionManager{\n\t\tcollectionsSupported: cli.supportsCollections(),\n\t\tmgmtProvider: b,\n\t\tbucketName: b.Name(),\n\t\ttracer: b.sb.Tracer,\n\t}\n}", "func GetCollection(kind string, namespace string, args ...string) ([]byte, error) {\n\tget := []string{\"get\", kind, \"-n\", namespace}\n\treturn kubectl(append(get, args...)...)\n}", "func (m *MongoDB) Collections() map[string]*mongo.Collection {\n\treturn m.coll\n}", "func (instance *Instance) GetCollection() (collection string) {\n\tif val := instance.GetIndexInstance(); val != nil {\n\t\treturn val.GetDefinition().GetCollection()\n\t} else {\n\t\t// TODO: should we panic\n\t}\n\treturn\n}", "func routeCollection(db *mongo.Database) *mongo.Collection {\n\treturn db.Collection(\"routes\")\n}", "func (r *staticCollection) Name() string {\n\treturn r.name\n}", "func NewCollection(address, database, collection string) *Collection {\n\treturn &Collection{address: address, database: database, collection: collection}\n}", "func NewCollection(label string) *Collection {\n\treturn &Collection{\n\t\tItems: make(map[string]*APIResource),\n\t\tmanifests: make(map[string][]byte),\n\t\tResourceLabel: label,\n\t}\n}", "func (c Client) GetCollection(id string) (Collection, error) {\n\turl := c.Config.BaseURL + \"/\" + c.Config.Version + \"/\" + collectionsEndpoint + \"/\" + id\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn Collection{}, err\n\t}\n\treq.Header.Add(\"Authorization\", \"Bearer \"+c.Config.Token)\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\tresp, err := c.Client.Do(req)\n\tif err != nil {\n\t\treturn Collection{}, err\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn Collection{}, err\n\t}\n\tvar collection Collection\n\terr = json.Unmarshal(body, &collection)\n\tif err != nil {\n\t\treturn Collection{}, err\n\t}\n\treturn collection, nil\n}", "func (group *Group) GetCollection() string {\n\treturn GroupsCollection\n}", "func NewCollection(id ...string) (Collection, error) {\n\tvar err error\n\tc := Collection{}\n\n\t// create an ID unless the parameter is a string of 'collections'...\n\t// TODO: document why this is here? when can this happen and why?\n\tif id[0] != \"collections\" {\n\t\tc.ID, err = IDFromString(id[0])\n\t} else {\n\t\tc.ID, err = NewID()\n\t}\n\n\tc.MediaTypes = []string{TaxiiContentType}\n\treturn c, err\n}", "func (m *Messages) Collection(messagesmap map[string]string) *Collection {\n\treturn NewCollection(m, messagesmap)\n}", "func (u UserDAO) getCollection() *mgo.Collection {\n\treturn u.conn.DB(\"bookfire\").C(\"users\")\n}", "func getCollectionName(ctx contractapi.TransactionContextInterface) (string, error) {\n\n\t// Get the MSP ID of submitting client identity\n\tclientMSPID, err := ctx.GetClientIdentity().GetMSPID()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to get verified MSPID: %v\", err)\n\t}\n\n\t// Create the collection name\n\torgCollection := \"_implicit_org_\" + clientMSPID\n\n\treturn orgCollection, nil\n}", "func (c *Collections) Create(name string, items ...Item) (*Collection, error) {\n\tinsta := c.insta\n\n\tmodule := \"collection_create\"\n\tif len(items) == 1 {\n\t\tmodule = \"feed_timeline\"\n\t}\n\n\tids := []string{}\n\tfor _, i := range items {\n\t\tids = append(ids, i.ID)\n\t}\n\tmediaIDs, err := json.Marshal(ids)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata, err := json.Marshal(map[string]string{\n\t\t\"module_name\": module,\n\t\t\"added_media_ids\": string(mediaIDs),\n\t\t\"_uid\": toString(insta.Account.ID),\n\t\t\"name\": name,\n\t\t\"_uuid\": insta.uuid,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbody, _, err := insta.sendRequest(\n\t\t&reqOptions{\n\t\t\tEndpoint: urlCollectionsCreate,\n\t\t\tIsPost: true,\n\t\t\tQuery: generateSignature(data),\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn := Collection{insta: insta}\n\terr = json.Unmarshal(body, &n)\n\treturn &n, err\n}", "func LoadCollection(file string) (*Collection, error) {\n\tspec, err := LoadCollectionSpec(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewCollection(spec)\n}", "func (c *ComponentCollection) Get(name common.ID) (*ComponentResource, error) {\n\tr := c.New()\n\tif err := c.core.db.get(c, name, r); err != nil {\n\t\treturn nil, err\n\t}\n\treturn r, nil\n}", "func CopyCollection(c *mgo.Collection) *mgo.Collection {\n\treturn CopyDatabase(c.Database).C(c.Name)\n}", "func ensureCollection(ctx context.Context, db driver.Database, name string, options *driver.CreateCollectionOptions, t testEnv) driver.Collection {\n\tc, err := db.Collection(ctx, name)\n\tif driver.IsNotFound(err) {\n\t\tc, err = db.CreateCollection(ctx, name, options)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to create collection '%s': %s\", name, describe(err))\n\t\t}\n\t} else if err != nil {\n\t\tt.Fatalf(\"Failed to open collection '%s': %s\", name, describe(err))\n\t}\n\treturn c\n}", "func (s *DataStore) CollectionService() cabby.CollectionService {\n\treturn CollectionService{DB: s.DB, DataStore: s}\n}", "func (repo *mongoBaseRepo) GetCollection() interface{} {\n\treturn repo.collection\n}", "func (api *apiConfig) GetAllItemsInCollectionByName(name string, maxPages int) ([][]byte, error) {\n\t// If an override was configured, use it instead.\n\tif api.getAllItemsInCollectionByName != nil {\n\t\treturn api.getAllItemsInCollectionByName(name, maxPages)\n\t}\n\n\t// Find the collection by name.\n\tcollection, err := api.GetCollectionByName(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif collection == nil {\n\t\treturn nil, nil\n\t}\n\n\t// Now find the items by the collection's ID.\n\treturn api.GetAllItemsInCollectionByID(collection.ID, maxPages)\n}", "func (d *closedDB) GetCollection(ctx context.Context, req *protomodel.GetCollectionRequest) (*protomodel.GetCollectionResponse, error) {\n\treturn nil, store.ErrAlreadyClosed\n}", "func (fastuser *FastUser) CollectionName() string {\n\treturn \"fastuseraccount\"\n}", "func newCollection() *collection {\n\treturn &collection{\n\t\terrs: []error{},\n\t}\n}", "func (engine *Engine) Collection() string {\n\treturn engine.evaluator.Collection()\n}", "func CollectionByID(db *sqlx.DB, id string) ([]Collection, error) {\n\treturn collectionQuery(\n\t\tdb,\n\t\t`SELECT * FROM collection WHERE id LIKE $1;`,\n\t\tdbSearchString(strings.ToUpper(nonIDCharRegex.ReplaceAllString(id, \"\"))),\n\t)\n}", "func (a *ManagementApiService) GetCollection(ctx _context.Context, applicationId int32, campaignId int32, collectionId int32) apiGetCollectionRequest {\n\treturn apiGetCollectionRequest{\n\t\tapiService: a,\n\t\tctx: ctx,\n\t\tapplicationId: applicationId,\n\t\tcampaignId: campaignId,\n\t\tcollectionId: collectionId,\n\t}\n}", "func (app *Apollo) GetCollection(c *gin.Context) {\n\tpid := c.Param(\"pid\")\n\ttgtFormat := c.Query(\"format\")\n\tif tgtFormat == \"\" {\n\t\ttgtFormat = \"json\"\n\t}\n\tif tgtFormat != \"json\" && tgtFormat != \"xml\" && tgtFormat != \"uvamap\" {\n\t\tlog.Printf(\"ERROR: Unsupported format for %s requested %s\", tgtFormat, pid)\n\t\tc.String(http.StatusBadRequest, fmt.Sprintf(\"unsupported format %s\", tgtFormat))\n\t\treturn\n\t}\n\tlog.Printf(\"INFO: get collection for PID %s as %s\", pid, tgtFormat)\n\tstartTime := time.Now()\n\trootID, dbErr := lookupIdentifier(&app.DB, pid)\n\tif dbErr != nil {\n\t\tlog.Printf(\"ERROR: %s\", dbErr.Error())\n\t\tc.String(http.StatusNotFound, dbErr.Error())\n\t\treturn\n\t}\n\n\troot, dbErr := getTree(&app.DB, rootID.ID)\n\tif dbErr != nil {\n\t\tlog.Printf(\"ERROR: %s\", dbErr.Error())\n\t\tc.String(http.StatusInternalServerError, dbErr.Error())\n\t\treturn\n\t}\n\telapsedNanoSec := time.Since(startTime)\n\telapsedMS := int64(elapsedNanoSec / time.Millisecond)\n\n\tlog.Printf(\"INFO: collection tree retrieved from DB; sending to client. Elapsed Time: %d (ms)\", elapsedMS)\n\tif tgtFormat == \"json\" {\n\t\t//c.Header(\"Content-Disposition\", fmt.Sprintf(\"attachment; filename=%s.json\", pid))\n\t\tc.JSON(http.StatusOK, root)\n\t} else {\n\t\txml, err := generateXML(root, tgtFormat)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"ERROR: unable to generate XML for %s: %s\", pid, err.Error())\n\t\t\tc.String(http.StatusInternalServerError, \"unable to generate XML content\")\n\t\t\treturn\n\t\t}\n\t\tc.Header(\"Content-Disposition\", fmt.Sprintf(\"attachment; filename=%s.xml\", pid))\n\t\tc.Header(\"Content-Type\", \"application/xml\")\n\t\tc.String(http.StatusOK, xml)\n\t}\n}", "func newVertexCollection(name string, g *graph) (Collection, error) {\n\tif name == \"\" {\n\t\treturn nil, WithStack(InvalidArgumentError{Message: \"name is empty\"})\n\t}\n\tif g == nil {\n\t\treturn nil, WithStack(InvalidArgumentError{Message: \"g is nil\"})\n\t}\n\treturn &vertexCollection{\n\t\tname: name,\n\t\tg: g,\n\t\tconn: g.db.conn,\n\t}, nil\n}" ]
[ "0.7958804", "0.79391026", "0.73163956", "0.73108506", "0.73050374", "0.72839034", "0.7242758", "0.72282404", "0.7102441", "0.70284396", "0.70057887", "0.69019735", "0.68647474", "0.6844251", "0.6840011", "0.683378", "0.68303245", "0.6718207", "0.66450536", "0.6634506", "0.66336256", "0.66199493", "0.65950423", "0.65777946", "0.65723634", "0.6522886", "0.65074", "0.6492383", "0.6464798", "0.64583915", "0.64038426", "0.6403432", "0.64018536", "0.63957465", "0.63477194", "0.6342309", "0.63359815", "0.6313803", "0.6306085", "0.6301706", "0.62968624", "0.62072986", "0.61979806", "0.61973286", "0.6183785", "0.61818737", "0.6178245", "0.61201197", "0.609585", "0.6093614", "0.60933614", "0.60626036", "0.60320824", "0.6025764", "0.60026044", "0.60016894", "0.59847856", "0.59811485", "0.59575945", "0.5943129", "0.5918075", "0.58957344", "0.5893754", "0.5892219", "0.58832866", "0.58795404", "0.5878381", "0.5877717", "0.58726424", "0.5871905", "0.5831662", "0.5820322", "0.5795967", "0.5790366", "0.5785434", "0.5768951", "0.5751152", "0.5748489", "0.57276964", "0.5700606", "0.56961524", "0.5693624", "0.5688188", "0.5680769", "0.56786025", "0.5670577", "0.56601894", "0.5649523", "0.564352", "0.5600225", "0.55983543", "0.5595647", "0.5571898", "0.55635065", "0.5560364", "0.5552805", "0.55485183", "0.5545643", "0.5542832", "0.55406344" ]
0.7683345
2
NewDataStore creates a new DataStore object to be used for each HTTP request.
func NewDataStore() *DataStore { session := Session.Copy() dataStore := &DataStore{ MongoSession: session, } return dataStore }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewDataStore() *DataStore {\n\treturn &DataStore{}\n}", "func newDataStore() *dataStore {\n\tds := dataStore{\n\t\tdata: make(map[string]Info),\n\t}\n\treturn &ds\n}", "func NewDataStore(db neo4j.Driver) DataStore {\n\tif store == nil {\n\t\tstore = &dataStore{db: db}\n\t}\n\n\treturn store\n}", "func NewDataStore(ctx context.Context, c *kivik.Client) (*DataStore, error) {\n\tvar ds DataStore\n\tdbName := \"todos\"\n\t// Check to see if the todos database already exists.\n\tdbExists, err := c.DBExists(context.TODO(), dbName, nil)\n\tif err != nil {\n\t\treturn &ds, fmt.Errorf(\"error determining if %s db exists: %s\", dbName, err)\n\t}\n\t// If the todos database doesn't exist, create it.\n\tif !dbExists {\n\t\terr = c.CreateDB(ctx, dbName, nil)\n\t\tif err != nil {\n\t\t\treturn &ds, fmt.Errorf(\"error creating %s database: %s\", dbName, err)\n\t\t}\n\t\t// TODO: Create the design documents for various queries.\n\t}\n\tdb, err := c.DB(ctx, dbName, nil)\n\tif err != nil {\n\t\treturn &ds, fmt.Errorf(\"error getting %s db handle: %s\", dbName, err)\n\t}\n\n\tds = DataStore{ctx, db}\n\treturn &ds, nil\n}", "func NewDataStore(driver, dsn string) (DataStore, error) {\r\n\treturn newDataStore(driver, dsn)\r\n}", "func NewDataStore(ctx context.Context, kvSource staert.KvSource, object Object, listener Listener) (*Datastore, error) {\n\tdatastore := Datastore{\n\t\tkv: kvSource,\n\t\tctx: ctx,\n\t\tmeta: &Metadata{object: object},\n\t\tlockKey: kvSource.Prefix + \"/lock\",\n\t\tlocalLock: &sync.RWMutex{},\n\t\tlistener: listener,\n\t}\n\terr := datastore.watchChanges()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &datastore, nil\n}", "func NewDataStore(\n\tvolumeInformer lhinformers.VolumeInformer,\n\tengineInformer lhinformers.EngineInformer,\n\treplicaInformer lhinformers.ReplicaInformer,\n\tengineImageInformer lhinformers.EngineImageInformer,\n\tnodeInformer lhinformers.NodeInformer,\n\tsettingInformer lhinformers.SettingInformer,\n\timInformer lhinformers.InstanceManagerInformer,\n\tlhClient lhclientset.Interface,\n\n\tpodInformer coreinformers.PodInformer,\n\tcronJobInformer batchinformers_v1beta1.CronJobInformer,\n\tdaemonSetInformer appsinformers.DaemonSetInformer,\n\tdeploymentInformer appsinformers.DeploymentInformer,\n\tpersistentVolumeInformer coreinformers.PersistentVolumeInformer,\n\tpersistentVolumeClaimInformer coreinformers.PersistentVolumeClaimInformer,\n\tkubeNodeInformer coreinformers.NodeInformer,\n\tpriorityClassInformer schedulinginformers.PriorityClassInformer,\n\n\tkubeClient clientset.Interface,\n\tnamespace string) *DataStore {\n\n\treturn &DataStore{\n\t\tnamespace: namespace,\n\n\t\tlhClient: lhClient,\n\t\tvLister: volumeInformer.Lister(),\n\t\tvStoreSynced: volumeInformer.Informer().HasSynced,\n\t\teLister: engineInformer.Lister(),\n\t\teStoreSynced: engineInformer.Informer().HasSynced,\n\t\trLister: replicaInformer.Lister(),\n\t\trStoreSynced: replicaInformer.Informer().HasSynced,\n\t\tiLister: engineImageInformer.Lister(),\n\t\tiStoreSynced: engineImageInformer.Informer().HasSynced,\n\t\tnLister: nodeInformer.Lister(),\n\t\tnStoreSynced: nodeInformer.Informer().HasSynced,\n\t\tsLister: settingInformer.Lister(),\n\t\tsStoreSynced: settingInformer.Informer().HasSynced,\n\t\timLister: imInformer.Lister(),\n\t\timStoreSynced: imInformer.Informer().HasSynced,\n\n\t\tkubeClient: kubeClient,\n\t\tpLister: podInformer.Lister(),\n\t\tpStoreSynced: podInformer.Informer().HasSynced,\n\t\tcjLister: cronJobInformer.Lister(),\n\t\tcjStoreSynced: cronJobInformer.Informer().HasSynced,\n\t\tdsLister: daemonSetInformer.Lister(),\n\t\tdsStoreSynced: daemonSetInformer.Informer().HasSynced,\n\t\tdpLister: deploymentInformer.Lister(),\n\t\tdpStoreSynced: deploymentInformer.Informer().HasSynced,\n\t\tpvLister: persistentVolumeInformer.Lister(),\n\t\tpvStoreSynced: persistentVolumeInformer.Informer().HasSynced,\n\t\tpvcLister: persistentVolumeClaimInformer.Lister(),\n\t\tpvcStoreSynced: persistentVolumeClaimInformer.Informer().HasSynced,\n\t\tknLister: kubeNodeInformer.Lister(),\n\t\tknStoreSynced: kubeNodeInformer.Informer().HasSynced,\n\t\tpcLister: priorityClassInformer.Lister(),\n\t\tpcStoreSynced: priorityClassInformer.Informer().HasSynced,\n\t}\n}", "func NewDataStore() *DataStore {\n\tprometheusRegister()\n\treturn &DataStore{\n\t\tnicIPPools: make(map[string]*NICIPPool),\n\t\tpodsIP: make(map[PodKey]PodIPInfo),\n\t}\n}", "func NewDataStore(path string) (*DataStore, error) {\n\ts := DataStore{Path: path}\n\tif s.Path == \"\" {\n\t\treturn &s, errors.New(\"No database location specfied in config\")\n\t}\n\n\terr := s.Open()\n\treturn &s, err\n}", "func newDataStore(driver, dsn string) (*dataStore, error) {\r\n\r\n\tvar this *dataStore\r\n\r\n\tif db, err := sqlx.Open(driver, dsn); err != nil {\r\n\t\treturn nil, err\r\n\t} else if err := db.Ping(); err != nil {\r\n\t\treturn nil, err\r\n\t} else {\r\n\t\tthis = &dataStore{\r\n\t\t\tDB: db,\r\n\t\t\tconnPool: new(ConnPool),\r\n\t\t\tnamedStmts: make(map[string]map[string]*namedStmt),\r\n\t\t}\r\n\t}\r\n\r\n\treturn this, nil\r\n}", "func NewDataStore(host string, port uint, bucketName, username, password string) (DataStore, error) {\n\tconnString := fmt.Sprintf(\"http://%s:%d\", host, port)\n\tcluster, err := gocb.Connect(connString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cluster.Authenticate(gocb.PasswordAuthenticator{\n\t\tUsername: username,\n\t\tPassword: password,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbucket, err := cluster.OpenBucket(bucketName, \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &CBDataStore{\n\t\tcluster: cluster,\n\t\tbucket: bucket,\n\t}, nil\n}", "func CreateDataStore() *DataStore {\n\treturn &DataStore{\n\t\tdataStoreLock: sync.RWMutex{},\n\t\tkvSet: make(map[uint64][]byte),\n\t\tkvTime: make(map[uint64]int64),\n\t}\n}", "func newDatastore(dir string) (ds.Batching, error) {\n\t// Create the datastore directory if it doesn't exist yet.\n\tif err := os.MkdirAll(dir, 0755); err != nil {\n\t\treturn nil, xerrors.Errorf(\"failed to create directory %s for DAG store datastore: %w\", dir, err)\n\t}\n\n\t// Create a new LevelDB datastore\n\tdstore, err := levelds.NewDatastore(dir, &levelds.Options{\n\t\tCompression: ldbopts.NoCompression,\n\t\tNoSync: false,\n\t\tStrict: ldbopts.StrictAll,\n\t\tReadOnly: false,\n\t})\n\tif err != nil {\n\t\treturn nil, xerrors.Errorf(\"failed to open datastore for DAG store: %w\", err)\n\t}\n\t// Keep statistics about the datastore\n\tmds := measure.New(\"measure.\", dstore)\n\treturn mds, nil\n}", "func newStore(c *Config, httpAddr, raftAddr string) *store {\n\tinternalData := meta.Data{\n\t\tIndex: 1,\n\t}\n\ts := store{\n\t\tdata: &Data{\n\t\t\tData: internalData,\n\t\t},\n\t\tclosing: make(chan struct{}),\n\t\tdataChanged: make(chan struct{}),\n\t\tpath: c.Dir,\n\t\tconfig: c,\n\t\thttpAddr: httpAddr,\n\t\traftAddr: raftAddr,\n\t\tlogger: zap.New(zap.NullEncoder()),\n\t}\n\n\treturn &s\n}", "func New(ds datastore.Datastore) *Store {\n\treturn &Store{\n\t\tds: ds,\n\t}\n}", "func NewDatastore(ds datastore.MultiGetterSetterDeleterCloser) *Datastore {\n\treturn &Datastore{ds: ds}\n}", "func NewStore(c *cli.Context) Store {\n\treturn &datastore{\n\t\tDB: open(c.String(\"database-config\")),\n\t}\n}", "func makeDataStore(group StoreGroup, siteNames []string) (result engine.Interface, err error) {\n\tswitch group.Type {\n\tcase \"bolt\":\n\t\tif err = makeDirs(group.Bolt.Path); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsites := []engine.BoltSite{}\n\t\tfor _, site := range siteNames {\n\t\t\tsites = append(sites, engine.BoltSite{SiteID: site, FileName: fmt.Sprintf(\"%s/%s.db\", group.Bolt.Path, site)})\n\t\t}\n\t\tresult, err = engine.NewBoltDB(bolt.Options{Timeout: group.Bolt.Timeout}, sites...)\n\tdefault:\n\t\treturn nil, errors.Errorf(\"unsupported store type %s\", group.Type)\n\t}\n\n\treturn result, errors.Wrap(err, \"can't initialize data store\")\n}", "func NewDatastore(ctx *pulumi.Context,\n\tname string, args *DatastoreArgs, opts ...pulumi.ResourceOption) (*Datastore, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.DisplayName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'DisplayName'\")\n\t}\n\tif args.OrganizationId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'OrganizationId'\")\n\t}\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"organizationId\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Datastore\n\terr := ctx.RegisterResource(\"google-native:apigee/v1:Datastore\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewDatastore(id uint) *Datastore {\n\treturn &Datastore{ID: id}\n}", "func NewStore(dataDir string, defaultEntityKey string, maxInventorySize int) *Store {\n\tif defaultEntityKey == \"\" {\n\t\tslog.Error(\"creating delta store: default entity ID can't be empty\")\n\t\tpanic(\"default entity ID can't be empty\")\n\t}\n\n\td := &Store{\n\t\tDataDir: dataDir,\n\t\tCacheDir: filepath.Join(dataDir, CACHE_DIR),\n\t\tmaxInventorySize: maxInventorySize,\n\t\tdefaultEntityKey: defaultEntityKey,\n\t\tplugins: make(pluginSource2Info),\n\t}\n\n\t// Nice2Have: remove side effects from constructor\n\tif err := d.createDataStore(); err != nil {\n\t\tslog.WithError(err).Error(\"can't initialize data store\")\n\t\tpanic(err)\n\t}\n\n\t// Nice2Have: remove side effects from constructor\n\tcachedDeltaPath := filepath.Join(d.CacheDir, CACHE_ID_FILE)\n\tif err := d.readPluginIDMap(cachedDeltaPath); err != nil {\n\t\tslog.WithError(err).WithField(\"file\", cachedDeltaPath).Error(\"can't initialize plugin-id map\")\n\t\terr = os.Remove(cachedDeltaPath)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\treturn d\n}", "func (a *SSERelayDataStoreAdapter) CreateDataStore(\n\tcontext interfaces.ClientContext,\n\tdataStoreUpdates interfaces.DataStoreUpdates,\n) (interfaces.DataStore, error) {\n\tvar sw *streamUpdatesStoreWrapper\n\twrappedStore, err := a.wrappedFactory.CreateDataStore(context, dataStoreUpdates)\n\tif err != nil {\n\t\treturn nil, err // this will cause client initialization to fail immediately\n\t}\n\tsw = newStreamUpdatesStoreWrapper(\n\t\ta.updates,\n\t\twrappedStore,\n\t\tcontext.GetLogging().GetLoggers(),\n\t)\n\n\ta.mu.Lock()\n\tdefer a.mu.Unlock()\n\ta.store = sw\n\treturn sw, nil\n}", "func NewDatastore(bucketName string, options ...func(o *Options)) *Datastore {\n\topts := DefaultOptions()\n\t// apply options\n\tfor _, fn := range options {\n\t\tfn(opts)\n\t}\n\n\treturn &Datastore{\n\t\tPath: opts.Path,\n\t\tBucket: bucketName,\n\t\tRegion: opts.Region,\n\t\taccessKey: opts.AccessKey,\n\t\taccessSecret: opts.AccessSecret,\n\t\taccessToken: opts.AccessToken,\n\t}\n}", "func NewEventDataStore(ctx *pulumi.Context,\n\tname string, args *EventDataStoreArgs, opts ...pulumi.ResourceOption) (*EventDataStore, error) {\n\tif args == nil {\n\t\targs = &EventDataStoreArgs{}\n\t}\n\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource EventDataStore\n\terr := ctx.RegisterResource(\"aws-native:cloudtrail:EventDataStore\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewStore(dir string, hints map[string]string) *Store {\n\tif hints == nil {\n\t\thints = map[string]string{}\n\t}\n\treturn &Store{\n\t\tData: &sync.Map{},\n\t\tDir: dir,\n\t\tHints: hints,\n\t\tListFiles: listFiles,\n\t\tParseFile: parseFile,\n\t\tFileExists: fileExists,\n\t}\n}", "func NewPostDataStore(dbHandler repository.DBHandler) *PostDataStore {\n\treturn &PostDataStore{\n\t\tDBHandler: dbHandler,\n\t}\n}", "func NewStore(name string) Store {\n\tnewFunc, ok := stores[name]\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn newFunc()\n}", "func NewStore() *Store {\n\ts := new(Store)\n\ts.data = make(map[string]interface{})\n\ts.dataChangeNotifiers = make([]chan bool, 0)\n\ts.dataWatchers = make(map[chan string]bool)\n\treturn s\n}", "func newStore(ts service.Service, config *Config) (*Store, error) {\n\tif config.Datastore == nil {\n\t\tdatastore, err := newDefaultDatastore(config.RepoPath, config.LowMem)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconfig.Datastore = datastore\n\t}\n\tif config.EventCodec == nil {\n\t\tconfig.EventCodec = newDefaultEventCodec(config.JsonMode)\n\t}\n\tif !managedDatastore(config.Datastore) {\n\t\tif config.Debug {\n\t\t\tif err := util.SetLogLevels(map[string]logging.LogLevel{\"store\": logging.LevelDebug}); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\ts := &Store{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tdatastore: config.Datastore,\n\t\tdispatcher: newDispatcher(config.Datastore),\n\t\teventcodec: config.EventCodec,\n\t\tmodelNames: make(map[string]*Model),\n\t\tjsonMode: config.JsonMode,\n\t\tlocalEventsBus: &localEventsBus{bus: broadcast.NewBroadcaster(0)},\n\t\tstateChangedNotifee: &stateChangedNotifee{},\n\t\tservice: ts,\n\t}\n\n\tif s.jsonMode {\n\t\tif err := s.reregisterSchemas(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\ts.dispatcher.Register(s)\n\treturn s, nil\n}", "func (cl Client[G, P]) NewStore(ctx context.Context) {\n\tDebugf(msgEnter)\n\tdefer Debugf(msgExit)\n\n\ts, err := cl.getSecrets(ctx)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"unable to create cookie store: %v\", err))\n\t}\n\n\tvar store cookie.Store\n\tif !IsProduction() {\n\t\tstore = cookie.NewStore(s.HashKey, s.BlockKey)\n\t\topts := sessions.Options{\n\t\t\tDomain: \"fake-slothninja.com\",\n\t\t\tPath: \"/\",\n\t\t}\n\t\tstore.Options(opts)\n\t} else {\n\t\tstore = cookie.NewStore(s.HashKey, s.BlockKey)\n\t\topts := sessions.Options{\n\t\t\tDomain: \"slothninja.com\",\n\t\t\tPath: \"/\",\n\t\t\tMaxAge: 60 * 60 * 24, // 1 Day in seconds\n\t\t\tSecure: true,\n\t\t}\n\t\tstore.Options(opts)\n\t}\n\tcl.Router.Use(sessions.Sessions(sessionName, store))\n}", "func NewStore() *Store {\n\treturn &Store{\n\t\tRaftDir: \"\",\n\t\tRaftBindAddr: \"\",\n\t\tdata: make(map[string]string),\n\t}\n}", "func newDBStore(db *leveldbhelper.DBHandle, dbName string) *store {\n\treturn &store{db, dbName}\n}", "func NewStore()(*Store) {\n m := &Store{\n Entity: *iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.NewEntity(),\n }\n return m\n}", "func NewDatastore(cfg Config, dev bool) (*Datastore, error) {\n\tlogger, err := log.NewLogger(cfg.LogPath, dev)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogger.Infow(\"initialized logger\", \"config\", cfg)\n\t// Configure to use Minio Server\n\ts3Config := &aws.Config{\n\t\t// TODO: determine if we need session token\n\t\tCredentials: credentials.NewStaticCredentials(cfg.AccessKey, cfg.SecretKey, \"\"),\n\t\tEndpoint: aws.String(cfg.Endpoint),\n\t\tRegion: aws.String(cfg.Region),\n\t\tDisableSSL: aws.Bool(cfg.Secure),\n\t\tS3ForcePathStyle: aws.Bool(true),\n\t}\n\ts3Session, err := session.NewSession(s3Config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\td := &Datastore{\n\t\tConfig: cfg,\n\t\tS3: s3.New(s3Session),\n\t\tl: logger,\n\t\tdebugLogging: os.Getenv(\"STORJ_IPFS_DEBUG\") == \"true\",\n\t}\n\treturn d, nil\n}", "func (s *DatastoreStore) New(r *http.Request, name string) (*sessions.Session,\n\terror) {\n\tsession := sessions.NewSession(s, name)\n\tsession.Options.MaxAge = 60 * 60 * 24 * 365 * 4 // 4 years in seconds\n\tsession.IsNew = true\n\tvar err error\n\tif c, errCookie := r.Cookie(name); errCookie == nil {\n\t\terr = securecookie.DecodeMulti(name, c.Value, &session.ID, s.Codecs...)\n\t\tif err == nil {\n\t\t\terr = s.load(r, session)\n\t\t\tif err == nil {\n\t\t\t\tsession.IsNew = false\n\t\t\t}\n\t\t\tif err == ErrNotFound {\n\t\t\t\terr = nil\n\t\t\t}\n\t\t}\n\t}\n\treturn session, err\n}", "func NewStore() *Store {\n\tvar st Store\n\tst.Records = make(map[string]HostSet)\n\tst.Netviews = make(map[string]string)\n\tst.Cidrs = make(map[string]string)\n\treturn &st\n}", "func newWithDatastorer(name string, datastorer datastorer) (dsdb *DatastoreDB, err error) {\n\tdsdb = new(DatastoreDB)\n\tdsdb.kind = name\n\tdsdb.datastorer = datastorer\n\n\terr = dsdb.connect()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = dsdb.testDB()\n\tif err = dsdb.testDB(); err != nil {\n\t\tdsdb.Close()\n\t\treturn nil, err\n\t}\n\n\treturn dsdb, nil\n}", "func DefineDataStore() (faasflow.DataStore, error) {\n\t// initialize minio DataStore\n\tminiods, err := minioDataStore.InitFromEnv()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn miniods, nil\n}", "func NewStore() *Store {\n\treturn &Store{\n\t\tls: make(map[string]InitFunc),\n\t}\n}", "func DefineDataStore() (faasflow.DataStore, error) {\n\treturn nil, nil\n}", "func CreateNewStore(storePath string) (*Store, error) {\n\tstore := &Store{}\n\n\tjsonData, err := json.Marshal(store)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = ioutil.WriteFile(storePath, jsonData, os.FileMode(0600))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstore.Path, err = filepath.Abs(storePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn store, nil\n}", "func NewSliceDataStore() *SliceDataStore {\n\treturn &SliceDataStore{}\n}", "func New(storeType string) (*Store, error) {\n\tswitch storeType {\n\tcase \"memory\":\n\t\treturn &Store{storeType: storeType, engine: makeMemoryStore()}, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported store type: %s\", storeType)\n\t}\n}", "func NewStore() *Store {\n\n\t// Create store wrapper.\n\ts := &Store{\n\t\tStoreService: influxdb.NewStoreService(),\n\t}\n\treturn s\n}", "func (e Engine) NewStore(config dvid.StoreConfig) (dvid.Store, bool, error) {\n\treturn e.newLogs(config)\n}", "func NewStore() *Store {\n\treturn &Store{\n\t\tES: MustOpenConnection(),\n\t}\n}", "func DefineDataStore() (flow.DataStore, error) {\n\treturn nil, nil\n}", "func NewStore(schema Schema, options ...CeousOption) *BaseStore {\n\tstore := &BaseStore{\n\t\tschema: schema,\n\t}\n\tfor _, option := range options {\n\t\toption(store)\n\t}\n\tif !store.disableCache {\n\t\tstore.runner = store._runner\n\t} else {\n\t\tstore.runner = sq.NewStmtCacher(store._runner)\n\t}\n\treturn store\n}", "func newDBStore(db *couchdb.CouchDatabase, dbName string) *dbstore {\n\treturn &dbstore{dbName, db}\n}", "func NewStore(database string) (ClientStore, error) {\n\tdb, err := newClientDatabase(database)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn db, err\n}", "func NewDatastore(options *Options) (*Datastore, error) {\n\tif options == nil {\n\t\treturn nil, fmt.Errorf(\"options cannot be nil\")\n\t}\n\n\tif options.JobCount < 1 {\n\t\toptions.JobCount = runtime.NumCPU()\n\t}\n\n\t// create our badger-backed metastor database\n\tmetaDB, err := badger.New(options.MetaPath, options.MetaPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create the metastor client,\n\t// with encryption enabled and our created badger DB backend\n\tmetaClient, err := metastor.NewClientFromConfig([]byte(options.Config.Namespace), metastor.Config{\n\t\tDatabase: metaDB,\n\t})\n\n\t// create a datastor cluster, using our predefined addresses and namespace,\n\t// which will be used to store the actual data\n\tdatastorCluster, err := zerodb.NewCluster(options.DataStor.Shards, options.Password, options.Namespace, nil, datastor.SpreadingTypeRandom)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create our pipeline which will be used to process or data prior to storage,\n\t// and process it once again upon reading it back from storage\n\tpipeline, err := pipeline.NewPipeline(options.DataStor.Pipeline, datastorCluster, -1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create our custom client\n\tds := &Datastore{\n\t\tdata: client.NewClient(metaClient, pipeline),\n\t\tmeta: metaClient,\n\t}\n\n\treturn ds, nil\n}", "func NewStore(host, notice, dbPath string) *Store {\n\ts := &Store{\n\t\thost: host,\n\t\tdbPath: dbPath,\n\t\tuser: newUserDB(dbPath),\n\t\tmessage: newMessageDB(dbPath),\n\t\tgroup: newGroupDB(dbPath),\n\t}\n\n\ts.friend = newFriendDB(s.user)\n\ts.postman = newPostman(notice, s.user, s.friend, s.group)\n\n\treturn s\n}", "func NewStore() *Store {\n\treturn &Store{\n\t\tstore: make(map[packet.ID]*Future),\n\t}\n}", "func NewStore() *Store {\n\treturn &Store{}\n}", "func NewStore() *Store {\n\treturn &Store{}\n}", "func NewStore() *Store {\n\treturn &Store{}\n}", "func NewStore(init StoreInit) (s *Store, err error) {\r\n\tvar initialCapacity int\r\n\tif utils.IsSet(init.InitCapacity) {\r\n\t\tinitialCapacity = init.InitCapacity\r\n\t} else {\r\n\t\tinitialCapacity = len(init.Args)\r\n\t}\r\n\r\n\tlocalStore := Store{\r\n\t\tstore: make(map[string]string, initialCapacity),\r\n\t\tparent: init.Parent,\r\n\t}\r\n\r\n\tfor _, arg := range init.Args {\r\n\t\tkey, value, err := splitArgument(arg)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\r\n\t\tif value, err = encode.ConvertStringToUtf8(value); err != nil {\r\n\t\t\treturn nil, fmt.Errorf(\"Error converting value for key '%v' to UTF-8: %v\", key, err)\r\n\t\t}\r\n\r\n\t\tif !localStore.hasKey(key) {\r\n\t\t\tlocalStore.Set(key, value)\r\n\t\t} else {\r\n\t\t\treturn nil, fmt.Errorf(\"Duplicate key '%v' found\", key)\r\n\t\t}\r\n\t}\r\n\r\n\treturn &localStore, nil\r\n}", "func NewStore(d *db.DB) *Store {\n\treturn &Store{\n\t\tdb: d,\n\t}\n}", "func NewStore(storePath string) *Store {\n\tif storePath == \"\" {\n\t\tlog.Panic(\"need the path of where the data will be stored\")\n\t}\n\treturn &Store{logger: log.New(os.Stdout, \"fantail:\", log.Lshortfile), path: storePath}\n}", "func (s *service) NewStore(ctx context.Context, req *pb.NewStoreRequest) (*pb.NewStoreReply, error) {\n\tlog.Debugf(\"received new store request\")\n\n\tid, _, err := s.manager.NewStore()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &pb.NewStoreReply{\n\t\tID: id.String(),\n\t}, nil\n}", "func NewExactMatchDataStore()(*ExactMatchDataStore) {\n m := &ExactMatchDataStore{\n ExactMatchDataStoreBase: *NewExactMatchDataStoreBase(),\n }\n return m\n}", "func New(channelID string, transientStore transientStore, collDataStore storeapi.Store) *pvtdatastore.Store {\n\treturn pvtdatastore.New(channelID, transientStore, collDataStore)\n}", "func NewStore(cfg *embed.Config) (Store, error) {\n\tc, err := initEtcdClient(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := new(pdStore)\n\ts.client = c\n\treturn s, nil\n}", "func NewStore() *Store {\n\treturn &Store{\n\t\tstore: make(map[workloadmeta.Kind]map[string]workloadmeta.Entity),\n\t}\n}", "func New() *Store {\n\treturn &Store{\n\t\tmu: sync.Mutex{},\n\t\tsess: make(map[string]*entities.Session),\n\t}\n}", "func New(provider storage.Provider) (*Store, error) {\n\tstore, err := provider.OpenStore(nameSpace)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to open did anchor store: %w\", err)\n\t}\n\n\treturn &Store{\n\t\tstore: store,\n\t}, nil\n}", "func NewStore(ctx context.Context, onAfterStore ...func(name string, value interface{})) *Store {\n\tlogger := logging.FromContext(ctx)\n\n\tstore := &Store{\n\t\tUntypedStore: configmap.NewUntypedStore(\n\t\t\t\"route\",\n\t\t\tlogger,\n\t\t\tconfigmap.Constructors{\n\t\t\t\tDomainConfigName: NewDomainFromConfigMap,\n\t\t\t\tgc.ConfigName: gc.NewConfigFromConfigMapFunc(ctx),\n\t\t\t\tnetcfg.ConfigMapName: network.NewConfigFromConfigMap,\n\t\t\t\tcfgmap.FeaturesConfigName: cfgmap.NewFeaturesConfigFromConfigMap,\n\t\t\t},\n\t\t\tonAfterStore...,\n\t\t),\n\t}\n\n\treturn store\n}", "func NewStore() Store {\n\treturn &store{\n\t\thosts: make(map[string][]*v1alpha3.ServiceEntry_Endpoint),\n\t\tm: &sync.RWMutex{},\n\t}\n}", "func newStore(c *Config) (*Store, error) {\n\tif c == nil {\n\t\tc = defaultConfig()\n\t}\n\tmutex := &sync.RWMutex{}\n\tstore := new(Store)\n\tstartTime := time.Now().UTC()\n\tfileWatcher, err := newWatcher(\".\")\n\tif err != nil {\n\t\tlog.Info(fmt.Sprintf(\"unable to init file watcher: %v\", err))\n\t}\n\tif c.Monitoring {\n\t\tmonitoring.Init()\n\t}\n\tstore.fileWatcher = fileWatcher\n\tstore.store = makeStorage(\"\")\n\tstore.keys = []string{}\n\tstore.compression = c.Compression\n\tstore.dbs = make(map[string]*DB)\n\tstore.lock = mutex\n\tstore.stat = new(stats.Statistics)\n\tstore.stat.Start = startTime\n\tstore.indexes = make(map[string]*index)\n\tc.setMissedValues()\n\tstore.config = c\n\tif c.LoadPath != \"\" {\n\t\terrLoad := loadData(store, c.LoadPath)\n\t\tif errLoad != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to load data: %v\", errLoad)\n\t\t}\n\t}\n\tstore.writer, err = newWriter(c.LoadPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create writer: %v\", err)\n\t}\n\treturn store, nil\n}", "func (b *PersistentDataStoreBuilder) CreateDataStore(\n\tcontext interfaces.ClientContext,\n\tdataStoreUpdates interfaces.DataStoreUpdates,\n) (interfaces.DataStore, error) {\n\tcore, err := b.persistentDataStoreFactory.CreatePersistentDataStore(context)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn datastore.NewPersistentDataStoreWrapper(core, dataStoreUpdates, b.cacheTTL,\n\t\tcontext.GetLogging().GetLoggers()), nil\n}", "func New() *Store {\n\treturn &Store{}\n}", "func New() *Store {\n\treturn &Store{}\n}", "func NewStore(robot *Robot) (Store, error) {\n\tname := Config.StoreName\n\tif _, ok := Stores[name]; !ok {\n\t\treturn nil, fmt.Errorf(\"%s is not a registered store\", Config.StoreName)\n\t}\n\n\tstore, err := Stores[name].newFunc(robot)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn store, nil\n}", "func NewStore() (s Store) {\n\ts = make(Store, 0)\n\treturn s\n}", "func newStore(config *Config) *store {\n\treturn &store{\n\t\thashMap: hash.NewUnsafeHash(config.Capacity),\n\t\texpireHeap: newTimeHeap(config.Capacity),\n\t\texpireTimer: new(refreshTimer),\n\t}\n}", "func NewStore(db *sql.DB) *Store {\n\treturn &Store{\n\t\tdb: db,\n\t\tQueries: New(db), // New creates and returns a queries object\n\t}\n}", "func NewStore() *Store {\n\treturn &Store{\n\t\topaStore: inmem.New(),\n\t}\n}", "func NewStore() *Store {\n\treturn &Store{commands: make(map[string]*Config, 0)}\n}", "func New(addr, password string) *Store {\n\treturn &Store{\n\t\tpool: newPool(addr, password),\n\t}\n}", "func NewStore(c *Config) *Store {\n\t// create a new store\n\tstore := Store{}\n\tstore.config = c\n\tstore.DBname = c.DBname\n\t// check if the file exists\n\tvar build bool\n\t_, err := os.Stat(c.DBname)\n\tif err != nil {\n\t\tlogger.Critical(\"error on stat , %s\", err)\n\t\tbuild = true\n\t}\n\t// if it is a new file build some tables\n\tif build {\n\t\tstore.Build(c)\n\t}\n\tstore.leases = Load(c.DBname)\n\treturn &store\n}", "func NewDatastore(ctx context.Context, cfg *config.Config, dsClient datastoreInterface) (Database, error) {\n\treturn &Datastore{\n\t\tClient: dsClient,\n\t\tctx: ctx,\n\t\tnamespace: cfg.GoogleCloudDatastore.Namespace,\n\t}, nil\n}", "func NewStore(filename string) (*Store, error) {\n\tf, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, newStoreDefaultPerms)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewStoreWithBackend(f)\n}", "func (mdsf *MemoryDataStoreFactory) Create(conf map[string]string) (DataStore, error) {\n\treturn &MemoryDataStore{\n\t\tUsers: map[int64]string{\n\t\t\t1: \"mnbbrown\",\n\t\t\t0: \"root\",\n\t\t},\n\t\tRWMutex: sync.RWMutex{},\n\t}, nil\n}", "func NewStore(p string) (Store, error) {\n\tp = path.Join(p, SQLiteDBName)\n\tdb, err := sql.Open(\"sqlite3\", fmt.Sprintf(\"file:%s?mode=ro\", p))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstmt, err := db.Prepare(\"select value from entries where key = ?\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcache, err := lru.New(DirCacheSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taclCache, err := lru.New(AccessCacheSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &sqlStore{\n\t\tdb: db,\n\t\tstmt: stmt,\n\t\tcache: cache,\n\t\tacl: aclCache,\n\n\t\tusers: make(map[string]int),\n\t\tgroups: make(map[string]int),\n\t}, nil\n}", "func NewStore(db *cockroach.DB, logger *zap.Logger) *Store {\n\treturn &Store{\n\t\tdb: db,\n\t\tlogger: logger,\n\t\tclock: DefaultClock,\n\t}\n}", "func New(ctx context.Context, log logger.Logger, db *db.Store, cache *cache.Cache) (*Store, error) { // nolint:gocognit\n\ts := &Store{\n\t\tlog: log,\n\t\tcache: cache,\n\t}\n\n\t// Set configuration\n\ts.setConfig()\n\n\tvar err error\n\n\tswitch s.typeStore {\n\tcase \"postgres\":\n\t\ts.store, err = postgres.New(ctx, db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase \"mongo\":\n\t\ts.store, err = mongo.New(ctx, db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase \"redis\":\n\t\ts.store, err = redis.New(ctx, db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase \"dgraph\":\n\t\ts.store, err = dgraph.New(ctx, db, log)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase \"leveldb\":\n\t\ts.store, err = leveldb.New(ctx, db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase \"badger\":\n\t\ts.store, err = badger.New(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase \"ram\":\n\t\tfallthrough\n\tdefault:\n\t\ts.store, err = ram.New(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tlog.Info(\"init linkStore\", field.Fields{\n\t\t\"db\": s.typeStore,\n\t})\n\n\treturn s, nil\n}", "func New(ctx context.Context, local bool) (*store, error) {\n\tts, err := google.DefaultTokenSource(ctx, auth.ScopeFullControl)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Problem setting up client OAuth: %s\", err)\n\t}\n\tclient := httputils.DefaultClientConfig().WithTokenSource(ts).With2xxOnly().Client()\n\tstorageClient, err := storage.NewClient(context.Background(), option.WithHTTPClient(client))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Problem creating storage client: %s\", err)\n\t}\n\tcache, err := lru.New(LRU_CACHE_SIZE)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed creating cache: %s\", err)\n\t}\n\treturn &store{\n\t\tbucket: storageClient.Bucket(FIDDLE_STORAGE_BUCKET),\n\t\tcache: cache,\n\t}, nil\n}", "func NewStore(path string, issues []*Issue) *Store {\n\treturn &Store{\n\t\tIssues: issues,\n\t\tPath: path,\n\t}\n}", "func New(db *db.DB) core.StepStore {\n\treturn &stepStore{db}\n}", "func NewEventStore(ds DataStore) (*EventStore, error) {\n\treturn &EventStore{\n\t\tds: ds,\n\t\ttopicMutex: &sync.RWMutex{},\n\t\tdcMutex: &sync.RWMutex{},\n\t\tindexMutex: &sync.RWMutex{},\n\t\ttopicNameToID: make(map[string]string),\n\t\ttopicIDToName: make(map[string]string),\n\t\ttopicSchemaMap: make(map[string]*gojsonschema.Schema),\n\t\ttopicSchemaPropertiesMap: make(map[string](map[string]interface{})),\n\t\tdcNameToID: make(map[string]string),\n\t\tdcIDToName: make(map[string]string),\n\t}, nil\n}", "func NewStore(ctx context.Context, l log.Logger, db *sqlx.DB, beaconName string) (*Store, error) {\n\tp := Store{\n\t\tlog: l,\n\t\tdb: db,\n\n\t\trequiresPrevious: chain.PreviousRequiredFromContext(ctx),\n\t}\n\n\tid, err := p.AddBeaconID(ctx, beaconName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.beaconID = id\n\n\treturn &p, nil\n}", "func NewStore(db *sql.DB) *Store {\n\n\treturn &Store{\n\t\tdb: db,\n\t\tQueries: New(db), //defined in db.go by sqlc\n\t}\n}", "func NewStore(c Config) *Store {\n\treturn &Store{\n\t\tpath: c.Dir,\n\t\thost: c.Hostname,\n\t\taddr: c.BindAddress,\n\t\tdata: &Data{},\n\t\tHeartbeatTimeout: time.Duration(c.HeartbeatTimeout),\n\t\tElectionTimeout: time.Duration(c.ElectionTimeout),\n\t\tLeaderLeaseTimeout: time.Duration(c.LeaderLeaseTimeout),\n\t\tCommitTimeout: time.Duration(c.CommitTimeout),\n\t\tLogger: log.New(os.Stderr, \"\", log.LstdFlags),\n\t}\n}", "func NewStore(db database.Client) Store {\n\treturn &store{\n\t\tdb: db,\n\t}\n}", "func NewDatastore(ctx context.Context, cfg config.MongoConfig) (m *Mongo, err error) {\n\tm = &Mongo{MongoDriverConfig: cfg}\n\n\tm.connection, err = mongodriver.Open(&m.MongoDriverConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdatabaseCollectionBuilder := map[mongohealth.Database][]mongohealth.Collection{\n\t\tmongohealth.Database(m.Database): {\n\t\t\tmongohealth.Collection(m.ActualCollectionName(config.ImportsCollection)),\n\t\t\tmongohealth.Collection(m.ActualCollectionName(config.ImportsLockCollection)),\n\t\t},\n\t}\n\tm.healthClient = mongohealth.NewClientWithCollections(m.connection, databaseCollectionBuilder)\n\tm.lockClient = mongolock.New(ctx, m.connection, config.ImportsCollection)\n\n\treturn m, nil\n}", "func New(c *sqlstore.Config) Datastore {\n\treturn sqlstore.New(c)\n}", "func newFsStore(root string) Backend {\n\treturn &fsStore{\n\t\troot: root,\n\t\tkinds: map[string]bool{},\n\t\tcheckDuration: defaultDuration,\n\t\tdata: map[Key]*resource{},\n\t\tdonec: make(chan struct{}),\n\t\tProbe: probe.NewProbe(),\n\t}\n}", "func New(dburl string) *Store {\n\treturn &Store{\n\t\tDatabaseURL: dburl,\n\t}\n}", "func NewStore(db *database.DB, collection string) Store {\n\treturn &store{db, collection}\n}", "func NewStore(cfgFilepath string) (*Store, error) {\n\tcfg, err := getConfig(cfgFilepath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb, err := CreateDB(*cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Store{db}, nil\n}" ]
[ "0.82330394", "0.809883", "0.7739795", "0.7739755", "0.755764", "0.7512227", "0.7293508", "0.7257735", "0.71911937", "0.71226996", "0.69651943", "0.6802696", "0.6616057", "0.65748113", "0.649016", "0.64899117", "0.6447895", "0.64251417", "0.64111", "0.6393936", "0.6374082", "0.63455284", "0.63343805", "0.62962407", "0.6281608", "0.62398154", "0.61913866", "0.6183574", "0.6171013", "0.61413276", "0.61178786", "0.6104983", "0.6101235", "0.6099253", "0.60899013", "0.60694754", "0.6039269", "0.60153073", "0.599654", "0.59950614", "0.59949327", "0.5986385", "0.5978422", "0.5972552", "0.59606194", "0.596019", "0.5953002", "0.59502554", "0.59434026", "0.5936894", "0.5933192", "0.5925512", "0.59241414", "0.5919269", "0.5919269", "0.5919269", "0.5918944", "0.59158003", "0.5913258", "0.59093624", "0.59066856", "0.5906476", "0.5905855", "0.5900643", "0.5900263", "0.5896215", "0.5885439", "0.5868495", "0.58472514", "0.5846243", "0.5837531", "0.5837531", "0.58155864", "0.58148575", "0.58120555", "0.5811942", "0.58034575", "0.57971865", "0.5794019", "0.5793417", "0.5772645", "0.57591707", "0.57586163", "0.5744263", "0.57358146", "0.5733974", "0.57330906", "0.5727746", "0.572433", "0.5721453", "0.5720843", "0.57086533", "0.56831884", "0.56829756", "0.5679719", "0.56774783", "0.56705475", "0.56700003", "0.56663114", "0.5665136" ]
0.7506629
6
InitializeDB creates a DB connection from the provided configuration
func InitDB(pgConf *config.PostgresConf) (Datastore, error) { dbDSN := fmt.Sprintf("postgres://%s@%s:%d/%s?sslmode=disable", pgConf.DBUser, pgConf.DBServer, pgConf.DBPort, pgConf.DBName) db, err := gorm.Open(postgres.New(postgres.Config{DSN: dbDSN}), &gorm.Config{NamingStrategy: schema.NamingStrategy{SingularTable: true}}) if err != nil { sentry.CaptureException(err) return nil, err } log.Info("Database connection successful") return &Database{db}, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func InitializeDatabase(config string, log debug.Logger) {\n\tdatabaseAccess = databaseConnection{dbconfig: config, log: log}\n\tdatabaseAccess.initialize()\n}", "func InitializeDB() error {\n\tdsn := fmt.Sprintf(\"user=%s password=%s dbname=%s host=%s port=%s sslmode=disable\",\n\t\tconfig.C.Database.Postgres.PostgresUsername,\n\t\tconfig.C.Database.Postgres.PostgresPassword,\n\t\tconfig.C.Database.Postgres.PostgresDatabaseName,\n\t\tconfig.C.Database.Postgres.PostgresHost,\n\t\tconfig.C.Database.Postgres.PostgresConnectionPort,\n\t)\n\n\tvar err error\n\tDB, err = gorm.Open(postgres.Open(dsn), &gorm.Config{})\n\n\tdoAutoMigrations()\n\n\treturn err\n}", "func InitDB(dbDriver, dbConnectionString string, maxConnectionCount int) (DB, error) {\n\tvar err error\n\tvar db DB\n\n\tif dbDriver == \"pgx\" {\n\t\tdb, err = NewPgxDB(dbConnectionString, maxConnectionCount)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error opening postgresql database with pgx driver: %s\", err)\n\t\t}\n\t} else if dbDriver == \"mysql\" {\n\t\tdb, err = NewMySQLDB(dbConnectionString, maxConnectionCount)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error opening mysql database: %s\", err)\n\t\t}\n\t} else if dbDriver == \"mgo\" {\n\t\tdb, err = NewMongoDB(dbConnectionString, maxConnectionCount)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error opening mongo database with mgo driver: %s\", err)\n\t\t}\n\t\t// } else if dbDriver == \"pq\" {\n\t\t// \tdb, err = NewPqDB(\n\t\t// \t\tdbConnectionString, maxConnectionCount)\n\t\t// \tif err != nil {\n\t\t// \t\treturn nil, fmt.Errorf(\"Error opening postgresql database with pq driver: %s\", err)\n\t\t// \t}\n\t} else if dbDriver == \"none\" {\n\t\tdb = nil\n\t} else {\n\t\treturn nil, errors.New(\"Can't recognize DB driver type\")\n\t}\n\n\treturn db, nil\n}", "func Initialize(conf *config.DatabaseConfig) error {\n\tlock.Lock()\n\tdefer lock.Unlock()\n\n\tif database != nil {\n\t\treturn fmt.Errorf(\"Database already initialized\")\n\t}\n\n\tlog.Log.Debug().Msg(\"initializing database connection\")\n\n\tdsn := fmt.Sprintf(\"host=%s user=%s password=%s dbname=%s port=%s sslmode=disable TimeZone=%s\",\n\t\tconf.Host, conf.Username, conf.Password, conf.DatabaseName, conf.Port, conf.Timezone)\n\tdb, err := connect(dsn)\n\n\t// TODO: better handling to wait for DB up\n\tif err != nil && isConnectionError(err) && conf.ConnectRetries > 0 {\n\t\tdb, err = connectRetry(dsn, conf.ConnectRetries, conf.ConnectBackoff)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tdatabase = db\n\tlog.Log.Debug().Msg(\"database initialized\")\n\n\treturn nil\n}", "func InitDB(driver, connectionstring string) error {\n\tdb, err := gorm.Open(driver, connectionstring)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsetDB(db)\n\treturn nil\n}", "func InitDBConnection() *Database {\n\thost := viper.GetString(\"db.host\")\n\tuser := viper.GetString(\"db.user\")\n\tdbname := viper.GetString(\"db.dbname\")\n\tdbConfig := fmt.Sprintf(\"host=%s user=%s dbname=%s sslmode=disable\", host, user, dbname)\n\tdb, err := gorm.Open(\"postgres\", dbConfig)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"Failed to initiate a connection to the database: %s\", err))\n\t}\n\n\tfmt.Println(\"Migrating database\")\n\tdb.AutoMigrate(&User{}, &Organization{}, &Restaurant{}, &Menu{}, &Activity{}, &OrderItem{})\n\n\treturn &Database{db}\n}", "func InitializeDb() {\n\tdbPort, err := strconv.Atoi(os.Getenv(\"DB_PORT\"))\n\tif err != nil {\n\t\tlog.Fatal(\"Database port is not valid\")\n\t}\n\n\tdbConnString := fmt.Sprintf(\n\t\t\"host=%s port=%d user=%s password=%s dbname=%s sslmode=disable\",\n\t\tos.Getenv(\"DB_HOST\"),\n\t\tdbPort,\n\t\tos.Getenv(\"DB_USER\"),\n\t\tos.Getenv(\"DB_PASS\"),\n\t\tos.Getenv(\"DB_NAME\"),\n\t)\n\n\tDB, err = sql.Open(\"postgres\", dbConnString)\n\tif err != nil {\n\t\tlog.Fatal(\"Could not connect to db- \", err)\n\t}\n}", "func (server *Server) Initialize(Dbdriver, DbUser, DbPassword, DbPort, DbHost, DbName string) *gorm.DB {\n\tvar err error\n\n\tif Dbdriver == \"postgres\" {\n\t\tDBURL := fmt.Sprintf(\"host=%s port=%s user=%s dbname=%s sslmode=disable password=%s\", DbHost, DbPort, DbUser, DbName, DbPassword)\n\t\tserver.DB, err = gorm.Open(Dbdriver, DBURL)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Cannot connect to %s database\", err)\n\n\t\t} else {\n\t\t\tlog.Printf(\"We are connected to the %s database\", Dbdriver)\n\t\t}\n\t}\n\n\treturn server.DB\n}", "func InitDb(conf config.Config, reset bool) error {\n\tif !IsOpen() {\n\t\tif err := openAdapter(conf); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn adp.CreateDb(reset)\n}", "func (a *App) Initialize(config *Configuration) {\n\tif !config.Debug {\n\t\tif config.Postgres == nil {\n\t\t\tlog.Panic(\"No postgres configuration is given, but debug mode is disabled\")\n\t\t}\n\t\tvar PgDbURL = config.Postgres.DbURL\n\t\tdbURLSliced := strings.Split(PgDbURL, \":\")\n\t\thost := dbURLSliced[0]\n\t\tport, err := strconv.Atoi(dbURLSliced[1])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tcreateErr := CreatePostgreDBIfNotExist(config.Postgres.Database, host, port, config.Postgres.Username, config.Postgres.Password)\n\t\tif createErr != nil {\n\t\t\tlog.Fatalf(\"Error during db creation: %v\", createErr)\n\t\t}\n\t\tconnectionString := fmt.Sprintf(\"host=%s port=%d user=%s password=%s dbname=%s sslmode=disable\",\n\t\t\thost, port, config.Postgres.Username, config.Postgres.Password, config.Postgres.Database)\n\t\ta.DB, err = sql.Open(\"postgres\", connectionString)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tCreateTable(a.DB)\n\t} else {\n\t\ta.DB = nil\n\t}\n\ta.DataGenerationWg.Add(1)\n\tgo generateRandomInitData(a.DB, config, &a.DataGenerationWg)\n\ta.Router = mux.NewRouter()\n\ta.InitializeRoutes()\n}", "func InitializeDatabase(c *Config) (*gorm.DB, error) {\n\tvar err error\n\tcon, err := gorm.Open(\"mysql\", c.GetDatasource())\n\n\tif err != nil {\n\t\tfmt.Println(\"[ERROR] Failed to connect to MySQL. Config= \" + Configuration.MySQL.Host)\n\t\treturn nil, err\n\t}\n\n\tcon.LogMode(LogMode)\n\tcon.SingularTable(true)\n\n\tfmt.Println(\"[INFO] Connected to MySQL. Config => \" + Configuration.MySQL.Host + \", LogMode => \" + fmt.Sprintf(\"%v\", LogMode))\n\treturn con, nil\n}", "func InitializeDB() *Database {\n\tconfig := new(dbConfig)\n\tconfigFile, err := ioutil.ReadFile(\"config.yaml\")\n\terr = yaml.Unmarshal(configFile, &config)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcredentials := fmt.Sprintf(\"%s:%s@/%s?charset=utf8&parseTime=True&loc=Local\", config.Database.DatabaseUser, config.Database.DatabasePassword, config.Database.Database)\n\tdialect := config.Database.DatabaseType\n\n\tdb, err := gorm.Open(dialect, credentials)\n\tif err != nil {\n\t\tlog.Fatal().Msgf(\"Failed to connect to Database. Reason: %v\\n\", err)\n\t}\n\tlog.Info().Msg(\"Successfully connected to qBot Database.\")\n\n\tdb.DB().SetConnMaxLifetime(time.Second * 100)\n\tdb.DB().SetMaxIdleConns(50)\n\tdb.DB().SetMaxOpenConns(200)\n\n\t//db.DropTableIfExists(models.User{}, models.Question{}, models.Answer{}) // Temp\n\t//db.DropTable(\"user_questions\", \"question_answers\", \"user_answers\") // Temp\n\tif err := db.AutoMigrate(models.User{}, models.Question{}, models.Answer{}).Error; err != nil {\n\t\tlog.Fatal().Msgf(\"Unable to migrate database. \\nReason: %v\", err)\n\t}\n\tlog.Info().Msg(\"Database migration successful.\")\n\treturn &Database{db}\n}", "func InitDatabase(dsn string) error {\n\tfmt.Println(\"Init db connection\")\n\t// config := mysql.NewConfig()\n\t// config.User = username\n\t// config.Passwd = password\n\t// config.Net = protocol\n\t// config.Addr = host\n\t// config.DBName = database\n\t// config.Params = map[string]string{\n\t// \t\"charset\": charset,\n\t// \t\"parseTime\": \"True\",\n\t// }\n\tdb, err := gorm.Open(\"sqlite3\", dsn)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn err\n\t}\n\tDbConn = db\n\treturn nil\n}", "func DBInit(conStr string) {\n\tif db == nil {\n\t\tvar err error\n\t\tdbConnection, err := gorm.Open(\"mysql\", conStr+\"?charset=utf8&parseTime=True&loc=Local\")\n\t\t// db connection will be closed if there's no request for a while\n\t\t// which would cause 500 error when a new request comes.\n\t\t// diable pool here to avoid this problem.\n\t\tdbConnection.DB().SetMaxIdleConns(0)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err.Error(),\n\t\t\t}).Fatal(\"Faile to create db connection pool\")\n\t\t} else {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"message\": dbConnection.GetErrors(),\n\t\t\t\t\"db\": conStr,\n\t\t\t}).Info(\"connected to mysql\")\n\t\t}\n\t\tdb = &DB{dbConnection}\n\t}\n\tdb.dbConnect.SetLogger(log.StandardLogger())\n\t// db.Debug message will be logged be logrug with Info level\n\tdb.dbConnect.Debug().AutoMigrate(&Todo{})\n}", "func InitDB() (*gorm.DB, error) {\n\t// attempt to open a new connection to the db\n\tglog.Info(\"Opening a new connection to the db...\")\n\tconnStr := fmt.Sprintf(\n\t\t\"%s:%s@(%s)/%s?charset=utf8&parseTime=True&loc=Local\", \n\t\tconfig.DbUsername, config.DbPassword, config.DbHostName, config.DbName,\n\t)\n\tdb, err := gorm.Open(config.DbDriver, connStr);\n\tif err != nil {\n\t\treturn db, err\n\t}\n\treturn db, err\n}", "func InitDB(setting *domain.GlobalConfig) {\n\tsource := \"\"\n\tswitch setting.DBType {\n\tcase domain.SQLITE3:\n\t\tlogrus.Info(\"InitDB has done when new client, skip.\")\n\t\treturn\n\tcase domain.MYSQL:\n\t\tsource = fmt.Sprintf(\"%s:%s@tcp(%s:%s)/\",\n\t\t\tsetting.DBUser, setting.DBPassword, setting.DBHost, setting.DBPort)\n\tdefault:\n\t\tsource = fmt.Sprintf(\"%s:%s@tcp(%s:%s)/\",\n\t\t\tsetting.DBUser, setting.DBPassword, setting.DBHost, setting.DBPort)\n\t}\n\n\tdb, err := sql.Open(setting.DBType, source)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"connection to db error: %s\", err)\n\t}\n\tdefer db.Close()\n\n\tsql := \"CREATE DATABASE IF NOT EXISTS \" + setting.DBName + \";\"\n\t_, err = db.Exec(sql)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"create db %s error: %v\", setting.DBName, err)\n\t}\n}", "func InitDB() *gorm.DB {\n\tdbConf := GetPostgresConfig()\n\tdbSpec := fmt.Sprintf(\n\t\t\"host=%s port=%s dbname=%s user=%s password=%s sslmode=%s\",\n\t\tdbConf.Host, dbConf.Port, dbConf.DB, dbConf.User, dbConf.Password, dbConf.SSLMode,\n\t)\n\tconn, err := gorm.Open(\"postgres\", dbSpec)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to connect to database with config: %s, err: %s\", dbSpec, err))\n\t}\n\tdb = conn\n\tmigrate() // apply database migrations\n\tif os.Getenv(\"DEBUG\") == \"true\" {\n\t\tdb.LogMode(true)\n\t}\n\treturn db\n}", "func (a *App) Initialize(dbType, dbString string) {\n\tvar err error\n\ta.DB, err = gorm.Open(sqlite.Open(dbString))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\ta.Engine = gin.Default()\n\ta.initializeRoutes()\n}", "func InitDBConfig(driverName, dbName, dataSource string) {\n\tonce.Do(func() {\n\t\tif err := orm.RegisterDriver(driverName, orm.DRSqlite); err != nil {\n\t\t\tklog.Exitf(\"Failed to register driver: %v\", err)\n\t\t}\n\t\tif err := orm.RegisterDataBase(\n\t\t\tdbName,\n\t\t\tdriverName,\n\t\t\tdataSource); err != nil {\n\t\t\tklog.Exitf(\"Failed to register db: %v\", err)\n\t\t}\n\t\t// sync database schema\n\t\tif err := orm.RunSyncdb(dbName, false, true); err != nil {\n\t\t\tklog.Errorf(\"run sync db error %v\", err)\n\t\t}\n\t\t// create orm\n\t\tDBAccess = orm.NewOrm()\n\t\tif err := DBAccess.Using(dbName); err != nil {\n\t\t\tklog.Errorf(\"Using db access error %v\", err)\n\t\t}\n\t})\n}", "func InitDb(appConfig *AppConfig) {\n\tlog.Info(\"Initialize database connection\")\n\tDbs = fmt.Sprintf(\"host=%s port=%s user=%s password=%s dbname=%s sslmode=%s\",\n\t\tappConfig.Db.Host,\n\t\tappConfig.Db.Port,\n\t\tappConfig.Db.User,\n\t\tappConfig.Db.Password,\n\t\tappConfig.Db.DbName,\n\t\tappConfig.Db.SSLMode,\n\t)\n\tlog.Info(\"Successfully initialize database connection\")\n\tdb := GetDB()\n\tlog.Info(\"Start table migrations\")\n\tdb.AutoMigrate(\n\t\t&Session{},\n\t)\n\tlog.Info(\"Table migrations achieved\")\n}", "func InitDatabase() *Database {\n\t// eg. \"postgres://postgres:postgres@localhost/postgres?sslmode=disable\"\n\t// TODO: enable SSL on DB\n\tconn, err := sql.Open(\"postgres\", os.Getenv(\"PG_CONNECTION_STRING\"))\n\tif err != nil {\n\t\tlog.Fatal(err) // kill server if we can't use DB on startup\n\t}\n\treturn &Database{\n\t\tconn: conn,\n\t}\n}", "func initializeDB() *gorm.DB {\n\t// load Env Variables\n\tHOST := os.Getenv(\"HOST\")\n\tDB_PORT := os.Getenv(\"DB_PORT\")\n\tUSER := os.Getenv(\"USER\")\n\tNAME := os.Getenv(\"NAME\")\n\tPASSWORD := os.Getenv(\"PASSWORD\")\n\n\t// Data connection string\n\tDB_URI := fmt.Sprintf(\"host=%s user=%s dbname=%s sslmode=disable password=%s port=%s\", HOST, USER, NAME, PASSWORD, DB_PORT)\n\t\n\t// Open DB\n\tdb, err := gorm.Open(postgres.Open(DB_URI), &gorm.Config{})\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t} else {\n\t\tfmt.Println(\"DB Connected successfully\")\n\t}\n\n\tdb.AutoMigrate(&models.Person{})\n\tdb.AutoMigrate(&models.Book{})\n\n\treturn db\n}", "func Init(CDB config.DatabaseConfig) (db *DataBase, err error) {\n\n\t// for local launch\n\tif os.Getenv(CDB.URL) == \"\" {\n\t\tos.Setenv(CDB.URL, \"user=db_forum_user password=db_forum_password dbname=db_forum sslmode=disable\")\n\t}\n\n\tvar database *sql.DB\n\tif database, err = sql.Open(CDB.DriverName, os.Getenv(CDB.URL)); err != nil {\n\t\tutils.PrintDebug(\"database/Init cant open:\" + err.Error())\n\t\treturn\n\t}\n\n\tdb = &DataBase{\n\t\tDb: database,\n\t}\n\tdb.Db.SetMaxOpenConns(CDB.MaxOpenConns)\n\n\tif err = db.Db.Ping(); err != nil {\n\t\tutils.PrintDebug(\"database/Init cant access:\" + err.Error())\n\t\treturn\n\t}\n\tutils.PrintDebug(\"database/Init open\")\n\tif err = db.CreateTables(); err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func (p *Oracle) InitDB(connectionString string) error {\n\tdb, err := InitDatabase(\"goracle\", connectionString)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.db = db\n\treturn nil\n}", "func Initialize() {\n\tdatabaseHost := os.Getenv(\"DB_HOST\")\n\tdatabasePort := os.Getenv(\"DB_PORT\")\n\tdatabaseUser := os.Getenv(\"DB_USER\")\n\tdatabasePass := os.Getenv(\"DB_PASS\")\n\tdatabaseName := os.Getenv(\"DB_NAME\")\n\n\tpostgresConnectionURL := fmt.Sprintf(\"postgres://%s:%s@%s:%s/%s?sslmode=disable\", databaseUser, databasePass, databaseHost, databasePort, databaseName)\n\n\tvar err error\n\tdb, err = sql.Open(\"postgres\", postgresConnectionURL)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t//defer db.Close()\n\n\terr = db.Ping()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tmaxOpenConn, err := strconv.Atoi(os.Getenv(\"DB_MAX_OPEN_CONN\"))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tmaxIdleConn, err := strconv.Atoi(os.Getenv(\"DB_MAX_IDLE_CONN\"))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdb.SetMaxOpenConns(maxOpenConn)\n\tdb.SetMaxIdleConns(maxIdleConn)\n\n\tfmt.Println(\"Database connected!\")\n\n}", "func Init(driver string, config *Config) (*Database, error) {\n\timpl, err := initDriver(driver)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\textDb := &Database{\n\t\tdriver: driver,\n\t\ti: impl,\n\t\tdsn: impl.dsn(config),\n\t\taccess: &sync.RWMutex{},\n\t\tWarnChan: make(chan []error),\n\t\tconfig: config,\n\t}\n\tif err = extDb.Reconnect(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn extDb, nil\n}", "func (c *Client) InitDb(db *rds.DBInstance, password, dbname string) error {\n\tport := strconv.FormatInt(*db.Endpoint.Port, 10)\n\thost := *db.Endpoint.Address\n\trdsEngine := *db.Engine\n\tuser := *db.MasterUsername\n\n\tvar engine string\n\n\tif v, ok := engineType[rdsEngine]; ok {\n\t\tengine = v\n\t}\n\n\tvar args string\n\n\tif e, ok := engineConnection[rdsEngine]; ok {\n\t\targs = fmt.Sprintf(e,\n\t\t\thost, port, user, password, dbname)\n\t}\n\n\tvar err error\n\n\tc.DB, err = sql.Open(engine, args)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Couldn't open connection to database\")\n\t\treturn err\n\t}\n\n\terr = c.DB.Ping()\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Couldn't ping database\")\n\t\treturn err\n\t}\n\treturn nil\n}", "func InitializeDB(models ...interface{}) *gorm.DB {\n\tdb, err := ConnectDB()\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tdb.DB()\n\tfor _, model := range models {\n\t\tdb.AutoMigrate(model)\n\t}\n\n\treturn db\n}", "func InitDb(dbConfig Config) (*sql.DB, error) {\n\tconnectionURL := newSQLServerConnectionURL(\n\t\tdbConfig.User, dbConfig.Pass, dbConfig.Host, dbConfig.Name, dbConfig.Port)\n\tcreateTableQuery := sqlServerTableCreationQuery\n\n\tlog.Debugf(\"Establishing connection with '%s'. Connection string: '%q'\", dbDriverName,\n\t\tstrings.Replace(connectionURL.String(), connectionURL.User.String() + \"@\", \"***:***@\", 1))\n\n\tdb, err := sql.Open(dbDriverName, connectionURL.String())\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"while establishing connection to '%s'\", dbDriverName)\n\t}\n\n\tlog.Debug(\"Testing connection\")\n\tif err := db.Ping(); err != nil {\n\t\treturn nil, errors.Wrap(err, \"while testing DB connection\")\n\t}\n\n\tq := strings.Replace(createTableQuery, \"{name}\", SanitizeSQLArg(dbConfig.DbOrdersTableName), -1)\n\tlog.Debugf(\"Ensuring table exists. Running query: '%q'.\", q)\n\tif _, err := db.Exec(q); err != nil {\n\t\treturn nil, errors.Wrap(err, \"while initiating DB table\")\n\t}\n\n\treturn db, nil\n}", "func InitDB(cfg abcconfig.DBConfig) error {\n\t// No username provided is a signal to skip database usage\n\tif len(cfg.User) == 0 {\n\t\treturn nil\n\t}\n\n\tconnStr, err := abcdatabase.GetConnStr(cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tDB, err = sql.Open(cfg.DB, connStr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp := DB.Ping()\n\tif p != nil {\n\t\treturn p\n\t}\n\n\treturn nil\n}", "func DbInit() (*sql.DB, error) {\n\tvar db *sql.DB\n\tvar err error\n\tenv := EnvVars()\n\tparams := fmt.Sprintf(\"postgres://%s@%s/%s?sslmode=disable\",\n\t\tenv.DbUser, env.DbHost, env.Db)\n\tdb, err = sql.Open(\"postgres\", params)\n\treturn db, err\n}", "func InitializeDB() *gorm.DB {\n\tdb, err := gorm.Open(\"mysql\", \"root:root@tcp(127.0.0.1:3306)/referee?parseTime=true&readTimeout=1s&writeTimeout=1s&timeout=1s\")\n\tCheck(err)\n\n\treturn db\n}", "func InitDatabase(dbConf *config.SectionDatabase) (*sqlx.DB, error) {\n\tdb, err := sqlx.Open(\"mysql\", fmt.Sprintf(\"%s:%s@tcp(%s:%d)/%s?charset=utf8mb4,utf8\", dbConf.User, dbConf.Pass, dbConf.Host, dbConf.Port, dbConf.Name))\n\n\tif err != nil {\n\t\treturn db, err\n\t}\n\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn db, err\n\t}\n\n\tdb.SetMaxIdleConns(dbConf.MaxIdleConns)\n\t// for db invalid connection after EOF\n\tdb.SetConnMaxLifetime(time.Second)\n\n\t// connect success\n\treturn db, nil\n}", "func InitializeDB(dbfile string) error {\n\tCloseDB()\n\tdb, err := prepareDB(dbfile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tadb = &agendaDB{\n\t\tdb: db,\n\t\tfile: dbfile,\n\t\tqueue: startExecQueue(db),\n\t}\n\treturn nil\n}", "func Init(config *util.Config) (*DAO, error) {\n\tconnStr := fmt.Sprintf(\"user=%s dbname=%s host=%s sslmode=%s\", config.User, config.DBName, config.Host, config.SSLMode)\n\tdb, err := sql.Open(\"postgres\", connStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &DAO{db}, nil\n}", "func Init(config *util.Config) (*DAO, error) {\n\tconnStr := fmt.Sprintf(\"user=%s dbname=%s host=%s sslmode=%s\", config.User, config.DBName, config.Host, config.SSLMode)\n\tdb, err := sql.Open(\"postgres\", connStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &DAO{db}, nil\n}", "func InitTestDB() *sql.DB {\n\t// TODO: Read test info from config file\n\tvar err error\n\n\tvar conf = map[string]string{\n\t\t\"db_user\": \"taquilla\",\n\t\t\"db_passwd\": \"secret\",\n\t\t\"db_name\": \"taquillatest\",\n\t\t\"db_host\": \"localhost\",\n\t\t\"listen\": \"127.0.0.1:8080\",\n\t\t\"secret\": \"secretkeyVerySecret\",\n\t}\n\n\tdbConf := fmt.Sprintf(\n\t\t\"user=%s dbname=%s password=%s\",\n\t\tconf[\"db_user\"], conf[\"db_name\"], conf[\"db_passwd\"],\n\t)\n\n\tConn, err = sql.Open(\"postgres\", dbConf)\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error connecting to the database\", err)\n\t\treturn nil\n\t}\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\treturn Conn\n}", "func (s *Storage) Init(connectionString string) (*sql.DB, error) {\n\tdbase, err := sql.Open(\"postgres\", connectionString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts.Conn = dbase\n\n\t// Creates all the DAOs of this storage.\n\terr = s.createDAOs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn dbase, s.Conn.Ping()\n}", "func InitDatabase() (err error) {\n\tvar pgo *pg.Options\n\n\tif pgo, err = pg.ParseURL(options.PgSQLDSN); err != nil {\n\t\treturn\n\t}\n\tlog.Debugf(\"Try to connect to postgrsql server...\")\n\tdb = pg.Connect(pgo)\n\treturn\n}", "func InitDB(connString string) *gorm.DB {\n\tdb, err := gorm.Open(postgres.Open(connString), &gorm.Config{\n\t\tLogger: getLogger(),\n\t})\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to connect to the database\")\n\t}\n\tDB = db\n\treturn DB\n}", "func (sql *SqlConnection) InitDB() error {\n\n\tvar err error\n\n\t// open a db connection //\n\tsql.Db, err = gorm.Open(\"sqlite3\", \"/var/tmp/tennis.db\")\n\tif err != nil {\n\t\tfmt.Println(\"Failed to connect database : \", err.Error())\n\t}\n\tsql.Db.LogMode(true)\n\n\treturn err\n}", "func (a *App) Initialize(user, dbname string) {\n\tconnectionString := fmt.Sprintf(\"user=%s dbname=%s sslmode=disable\", user, dbname)\n\t// connectionString := fmt.Sprintf(\"user=john dbname=subscription_test sslmode=disable\")\n\n\tvar err error\n\ta.DB, err = sql.Open(\"postgres\", connectionString)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ta.Router = mux.NewRouter()\n\ta.initializeRoutes()\n}", "func InitDatabase(driverName, dataSourceName string, numRetries int, sleepDuration time.Duration) *sql.DB {\n\tvar err error\n\tvar db *sql.DB\n\tfor i := 0; i < numRetries; i++ {\n\t\tdb, err = sql.Open(driverName, dataSourceName)\n\t\tif err == nil {\n\t\t\tlog.Println(\"DB connection initialized...\")\n\t\t\tbreak\n\t\t}\n\t\tlog.Println(\"DB connection failed to initialize... Sleeping...\")\n\t\ttime.Sleep(sleepDuration)\n\t}\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t} else {\n\t\tlog.Printf(\"Successfully connected to DB: %s\\n\", dataSourceName)\n\t}\n\treturn db\n}", "func (app *App) Initialize(user, password, dbname string) {\n\tconnectionString := fmt.Sprintf(\"user=%s password=%s dbname=%s\", user, password, dbname)\n\n\tvar err error\n\tapp.DB, err = sql.Open(\"postgres\", connectionString)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tapp.Router = mux.NewRouter()\n}", "func InitDatabase() *sql.DB {\n\tlog.Println(\"connecting database.\")\n\n\tquery := url.Values{}\n\tquery.Add(\"database\", \"Company\")\n\n\tu := &url.URL{\n\t\tScheme: \"sqlserver\",\n\t\tUser: url.UserPassword(\"sa\", \"1234\"),\n\t\tHost: fmt.Sprintf(\"%s:%d\", \"localhost\", 1433),\n\t\t// Path: instance, // if connecting to an instance instead of a port\n\t\tRawQuery: query.Encode(),\n\t}\n\n\tlog.Println(u.String())\n\n\tcondb, err := sql.Open(\"sqlserver\", u.String())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlog.Println(\"test ping database.\")\n\tif err = condb.Ping(); err != nil {\n\t\tpanic(err)\n\t}\n\treturn condb\n}", "func InitDatabase(cfg *config.HorizonConfig) (AgbotDatabase, error) {\n\n\tif cfg.IsBoltDBConfigured() {\n\t\tdbObj := DatabaseProviders[\"bolt\"]\n\t\treturn dbObj, dbObj.Initialize(cfg)\n\n\t} else if cfg.IsPostgresqlConfigured() {\n\t\tdbObj := DatabaseProviders[\"postgresql\"]\n\t\treturn dbObj, dbObj.Initialize(cfg)\n\n\t}\n\treturn nil, errors.New(fmt.Sprintf(\"neither bolt DB nor Postgresql DB is configured correctly.\"))\n\n}", "func InitDatabase() *Server {\n\tvar err error\n\n\tconnString := getConnString()\n\n\tlog.Printf(\"Setting connection to db with configuration: %s \\n\", connString)\n\n\tserver := &Server{}\n\tserver.db, err = sql.Open(\"sqlserver\", connString)\n\tif err != nil {\n\t\tlog.Fatal(\"Error opening connection: \", err.Error())\n\t}\n\n\tserver.db.SetConnMaxLifetime(time.Minute * 4)\n\n\treturn server\n}", "func InitDb(host string, user string, port int, sslmode string, dbName string, password string) (interfaces.Database, error) {\n\tconnStr := fmt.Sprintf(\n\t\t\"host=%s user=%s port=%d sslmode=%s dbname=%s\",\n\t\thost, user, port, sslmode, dbName,\n\t)\n\tif password != \"\" {\n\t\tconnStr += fmt.Sprintf(\" password=%s\", password)\n\t}\n\tdb, err := sql.Open(\"postgres\", connStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb.SetMaxIdleConns(5)\n\tdb.SetMaxOpenConns(10)\n\n\tdbmap := &gorp.DbMap{\n\t\tDb: db,\n\t\tDialect: gorp.PostgresDialect{},\n\t\tTypeConverter: util.TypeConverter{},\n\t}\n\n\tdbmap.AddTableWithName(Game{}, \"games\").SetKeys(true, \"ID\")\n\tdbmap.AddTableWithName(Player{}, \"players\").SetKeys(true, \"ID\")\n\tdbmap.AddTableWithName(EncryptedPlayer{}, \"encrypted_players\")\n\tdbmap.AddTableWithName(Clan{}, \"clans\").SetKeys(true, \"ID\")\n\tdbmap.AddTableWithName(Membership{}, \"memberships\").SetKeys(true, \"ID\")\n\tdbmap.AddTableWithName(Hook{}, \"hooks\").SetKeys(true, \"ID\")\n\n\t// dbmap.TraceOn(\"[gorp]\", log.New(os.Stdout, \"KHAN:\", log.Lmicroseconds))\n\treturn egorp.New(dbmap, dbName), nil\n}", "func InitDB(dataSourceName string) {\n\n\t\tvar err error\n\t\tDB, err = sql.Open(\"postgres\", dataSourceName)\n\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t}\n\n\t\tif err = DB.Ping(); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t} else {\n\t\t\t\tlog.Println(\"Connection to db established!\")\n\t\t}\n\n}", "func initDb(username, password, endpoint, port, database string) (*sql.DB, error) {\n\t// Create url for connection\n\turl := fmt.Sprintf(\"%s:%s@tcp(%s:%s)/%s?parseTime=true\", username, password, endpoint, port, database)\n\n\t// Open connection to SQL DB\n\tdb, err := sql.Open(\"mysql\", url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Test database connection\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db, err\n}", "func initDB(options Options) (*mgo.Session, error) {\n\tdialInfo := &mgo.DialInfo{\n\t\tAddrs: strings.Split(options.DBHost, \",\"),\n\t\tDatabase: options.DBName,\n\t\tUsername: options.DBUser,\n\t\tPassword: options.DBPassword,\n\t\tDialServer: func(addr *mgo.ServerAddr) (net.Conn, error) {\n\t\t\treturn tls.Dial(\"tcp\", addr.String(), &tls.Config{InsecureSkipVerify: true})\n\t\t},\n\t\tReplicaSetName: \"rs0\",\n\t\tTimeout: time.Second * 10,\n\t}\n\n\tif !options.SSL {\n\t\tdialInfo.ReplicaSetName = \"\"\n\t\tdialInfo.DialServer = nil\n\t}\n\t// connect to the database\n\tsession, err := mgo.DialWithInfo(dialInfo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn session, err\n}", "func (driver *SQLDriver) Initialize() error {\n\t// Parse the DSN and create a database object\n\tdb, err := sql.Open(env.Get(\"STORAGE_SQL_DRIVER\", \"sqlite3\"), env.Get(\"STORAGE_SQL_DSN\", \"./db\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Ping the database\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Migrate the database\n\ttable := env.Get(\"STORAGE_SQL_TABLE\", \"pasty\")\n\t_, err = db.Exec(`\n\t\tCREATE TABLE IF NOT EXISTS ? (\n\t\t\tid varchar NOT NULL PRIMARY KEY,\n\t\t\tcontent varchar NOT NULL,\n\t\t\tsuggestedSyntaxType varchar NOT NULL,\n\t\t\tdeletionToken varchar NOT NULL,\n\t\t\tcreated bigint NOT NULL,\n\t\t\tautoDelete bool NOT NULL\n\t\t);\n `, table)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Set the database object and table name of the SQL driver\n\tdriver.database = db\n\tdriver.table = table\n\treturn nil\n}", "func (c *PostgresClient) InitDB(models []interface{}) (*gorm.DB, error) {\n\tc.LogConfig()\n\terr := c.Connect()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.DB.LogMode(c.LogMode)\n\tc.CreateDBExtensions()\n\tc.Migrate(models)\n\treturn c.DB, nil\n}", "func init() {\n\tlog.Info(\"Initializing database\")\n\tpsqlInfo := fmt.Sprintf(\"host=%s port=%s user=%s \"+\n\t\t\"password=%s dbname=%s sslmode=disable\",\n\t\tconfig.Config().GetString(\"database.host\"),\n\t\tconfig.Config().GetString(\"database.port\"),\n\t\tconfig.Config().GetString(\"database.user\"),\n\t\tconfig.Config().GetString(\"database.password\"),\n\t\tconfig.Config().GetString(\"database.name\"))\n\tdb, err := sql.Open(\"postgres\", psqlInfo)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\terr = db.Ping()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlog.Info(\"Successfully connected to database!\")\n}", "func DBInit() *gorm.DB {\n\te := godotenv.Load() //Load .env file\n\tif e != nil {\n\t\tfmt.Print(e)\n\t}\n\n\thost := os.Getenv(\"DB_HOST\")\n\tport := os.Getenv(\"DB_PORT\")\n\tpassword := os.Getenv(\"DB_PASSWORD\")\n\tdbUser := os.Getenv(\"DB_USER\")\n\tdbName := os.Getenv(\"DB_NAME\")\n\tdbURI := fmt.Sprintf(\"%s:%s@tcp(%s:%s)/%s?charset=utf8&parseTime=True&loc=%s\", dbUser, password, host, port, dbName, \"Asia%2FJakarta\")\n\n\tdb, err := gorm.Open(\"mysql\", dbURI)\n\tif err != nil {\n\t\tlog.Panicf(\"failed to connect to database with err : %s \", err)\n\t}\n\tdb.DB().SetConnMaxLifetime(time.Minute * 5)\n\tdb.DB().SetMaxIdleConns(0)\n\tdb.DB().SetMaxOpenConns(5)\n\n\tdb.LogMode(true)\n\n\tdB = db\n\tdb.AutoMigrate(\n\t\t&domain.Transaction{},\n\t\t&domain.TransactionDetail{},\n\t\t&domain.Cart{},\n\t\t&domain.CartDetail{},\n\t\t&domain.Product{},\n\t\t&domain.StatusCode{},\n\t)\n\treturn dB\n}", "func Initialise(models ...interface{}) (*gorm.DB, error) {\n\tDB, err := Connect(\n\t\tos.Getenv(\"DATABASE_URL\"),\n\t\tos.Getenv(\"DB_HOST\"),\n\t\tos.Getenv(\"DB_PORT\"),\n\t\tos.Getenv(\"DB_USER\"),\n\t\tos.Getenv(\"DB_PASSWORD\"),\n\t\tos.Getenv(\"DB_NAME\"),\n\t)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tDB = DB.AutoMigrate(models...)\n\n\tDB.LogMode(true)\n\treturn DB, nil\n}", "func Init() (*DB, error) {\n\tlogrus.Info(\"Initializing datasource ...\")\n\tvar err error\n\tcnx := os.Getenv(\"UNAME\") + \":\" + os.Getenv(\"PASS\")\n\tschema, exist := os.LookupEnv(\"SCHEMA\")\n\tif !exist {\n\t\tschema = \"stacke\"\n\t}\n\torm := &DB{DB: nil}\n\tdataSourceName := cnx + \"@tcp(\" + os.Getenv(\"SERVER\") + \":3306)/\" + schema + \"?parseTime=true\"\n\n\t// TODO cnx settings\n\tdb, err = gorm.Open(mysql.Open(dataSourceName), &gorm.Config{\n\t\tLogger: gormlog.New(\n\t\t\tlog.New(os.Stdout, \"\\r\\n\", log.LstdFlags),\n\t\t\tlogger.Config{\n\t\t\t\tSlowThreshold: time.Second, // Slow SQL threshold\n\t\t\t\tLogLevel: gormlog.Info,\n\t\t\t\tColorful: true,\n\t\t\t},\n\t\t)})\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tlogrus.Fatal(\"Failed to connect database\")\n\t}\n\torm.DB = db\n\treturn orm, err\n}", "func (src *DataSrc) InitDB(conf *viper.Viper) error {\n\tdbEngine := conf.GetString(\"db.engine\")\n\tdbHost := conf.GetString(\"db.host\")\n\tdbPort := conf.GetString(\"db.port\")\n\tdbAddr := dbHost + \":\" + dbPort\n\tdbName := conf.GetString(\"db.name\")\n\tdbUser := conf.GetString(\"db.user\")\n\tdbPassword := conf.GetString(\"db.password\")\n\n\tvar errdb error\n\tvar db *sqlx.DB\n\tif dbEngine == \"postgres\" {\n\t\tdb, errdb = sqlx.Connect(\"postgres\", \"host=\"+dbHost+\" port=\"+dbPort+\" user=\"+dbUser+\" password=\"+dbPassword+\" dbname=\"+dbName+\" sslmode=disable\")\n\t\tif errdb != nil {\n\t\t\tfmt.Println(\"Error connectiing to DB \", errdb)\n\t\t\treturn errdb\n\t\t}\n\t\tsrc.DB = db\n\n\t} else if dbEngine == \"mysql\" {\n\t\tdb, errdb = sqlx.Connect(\"mysql\", dbUser+\":\"+dbPassword+\"@\"+dbAddr+\"/\"+dbName+\"?charset=utf8&parseTime=True&loc=Local\")\n\t\tif errdb != nil {\n\t\t\tfmt.Println(\"Error connectiing to DB \", errdb)\n\t\t\treturn errdb\n\t\t}\n\t\tsrc.DB = db\n\n\t} else if dbEngine == \"sqlite\" {\n\t\tdb, errdb = sqlx.Connect(\"sqlite3\", dbName)\n\t\tif errdb != nil {\n\t\t\tfmt.Println(\"Error connecting to DB \", errdb)\n\t\t\treturn errdb\n\t\t}\n\t\tsrc.DB = db\n\t}\n\treturn nil\n\n}", "func InitDB(dataSourceName string) (*sqlx.DB, error) {\n\tvar err error\n\n\tdb, err = sqlx.Open(\"sqlite3\", dataSourceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttableSchema := `CREATE TABLE IF NOT EXISTS agents (\n\t\tagent_id TEXT PRIMARY KEY UNIQUE,\n\t\tstarted_at datetime NOT NULL,\n\t\tterminated_at datetime,\n\t\tcurrent_status TEXT NOT NULL,\n\t\teffective_config TEXT NOT NULL\n\t);`\n\n\t_, err = db.Exec(tableSchema)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error in creating agents table: %s\", err.Error())\n\t}\n\n\tAllAgents = Agents{\n\t\tagentsById: make(map[string]*Agent),\n\t\tconnections: make(map[types.Connection]map[string]bool),\n\t\tmux: sync.RWMutex{},\n\t}\n\treturn db, nil\n}", "func Init() {\n\tdbType := config.LoadEnv(\"DB_CONNECTION\")\n\tvar connectionString string\n\n\tif dbType == \"mysql\" {\n\t\tconnectionString = fmt.Sprintf(\n\t\t\t\"%s:%s@(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local\",\n\t\t\tconfig.LoadEnv(\"DB_USERNAME\"), config.LoadEnv(\"DB_PASSWORD\"), config.LoadEnv(\"DB_HOST\"), config.LoadEnv(\"DB_NAME\"))\n\t} else if dbType == \"postgres\" {\n\t\tconnectionString = fmt.Sprintf(\n\t\t\t\"host=%s port=%s user=%s dbname=%s sslmode=disable password=%s\",\n\t\t\tconfig.LoadEnv(\"DB_HOST\"), config.LoadEnv(\"DB_PORT\"), config.LoadEnv(\"DB_USERNAME\"), config.LoadEnv(\"DB_NAME\"), config.LoadEnv(\"DB_PASSWORD\"))\n\t}\n\n\tdb, err = gorm.Open(dbType, connectionString)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n}", "func (db *Database) init() (*gorm.DB, error) {\n\tvar connection, err = gorm.Open(db.Driver, db.getURI())\n\tif err != nil {\n\t\tfmt.Printf(\"✖ Cannot connect to %s database\\n\", db.Driver)\n\t\tlog.Fatal(\"This is the error:\", err)\n\t} else {\n\t\tfmt.Printf(\"⚡ We are connected to the %s database\\n\", db.Driver)\n\t}\n\treturn connection, err\n}", "func Init() *gorm.DB {\n\tdb, err := gorm.Open(\"sqlite3\", \"test.db\")\n\tif err != nil {\n\t\tfmt.Println(\"db err: \", err)\n\t}\n\tdb.DB().SetMaxIdleConns(10)\n\tdb.LogMode(true)\n\tDB = db\n\treturn DB\n}", "func InitDb(cfg Config) (*PostgresDb, error) {\n\tdbConn, err := sqlx.Connect(\"postgres\", fmt.Sprintf(\"%v\", cfg))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp := &PostgresDb{\n\t\tdbConn: dbConn,\n\t}\n\terr = p.dbConn.Ping()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = p.createTable()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}", "func (Server *Server) Initialize(Dbdriver, DbUser, DbPassword, DbPort, DbHost, DbName string) {\n\n\tvar err error\n\tDBURL := fmt.Sprintf(\"%s:%s@tcp(%s:%s)/%s?charset=utf8&parseTime=True&loc=Local\", DbUser, DbPassword, DbHost, DbPort, DbName)\n\tServer.DBServer, err = gorm.Open(Dbdriver, DBURL)\n\tif err != nil {\n\t\tfmt.Printf(\"Cannot connect to %s database\", Dbdriver)\n\t\tlog.Fatal(\"This is the error:\", err)\n\t} else {\n\t\tfmt.Printf(\"We are connected to the %s database\", Dbdriver)\n\t}\n\n\tServer.DBServer.Debug().AutoMigrate(&Data{})\n}", "func (c *ConsulDB) InitializeDatabase() error {\n\tif os.Getenv(\"DATABASE_IP\") == \"\" {\n\t\treturn pkgerrors.New(\"DATABASE_IP environment variable not set.\")\n\t}\n\tconfig := consulapi.DefaultConfig()\n\tconfig.Address = os.Getenv(\"DATABASE_IP\") + \":8500\"\n\n\tclient, err := consulapi.NewClient(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.consulClient = client\n\treturn nil\n}", "func InitDB(dataSource string) {\n\tvar err error\n\tDB, err = sqlx.Connect(\"mysql\", dataSource)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tif err = DB.Ping(); err != nil {\n\t\tlog.Panic(err)\n\t}\n\tDB.SetMaxOpenConns(1000)\n}", "func InitDB() *gorm.DB {\n\tconf := envloader.LoadConfig()\n\tdb, err := gorm.Open(\"sqlite3\", \"db/\"+conf.ENV+\".db\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn db\n}", "func Initialize(o *ConnectOptions) (*mongo.Database, error) {\n\tif client != nil {\n\t\treturn selectDatabase(client, o.Database)\n\t}\n\n\tdatabaseURL := o.DatabaseURL\n\tconnectTimeout := o.ConnectTimeout\n\n\tif connectTimeout == 0 {\n\t\tconnectTimeout = time.Duration(10 * time.Second)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), connectTimeout)\n\tdefer cancel()\n\n\tif databaseURL == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing database url\")\n\t}\n\n\tc, err := mongo.Connect(ctx, options.Client().ApplyURI(o.DatabaseURL))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = testClient(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient = c\n\n\tdefer func() {\n\t\tif err = client.Disconnect(ctx); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\treturn selectDatabase(client, o.Database)\n}", "func GormInit(conf *config.Config) (*gorm.DB, error) {\n\tdb, openErr := gorm.Open(conf.DBType, conf.DBParams)\n\tif openErr != nil {\n\t\tlog.CheckError(openErr)\n\t\treturn nil, openErr\n\t}\n\n\tconnectionErr := db.DB().Ping()\n\tif connectionErr != nil {\n\t\tlog.CheckError(connectionErr)\n\t\treturn nil, connectionErr\n\t}\n\tdb.DB().SetMaxIdleConns(10)\n\tdb.DB().SetMaxOpenConns(100)\n\n\tif config.Environment == \"DEVELOPMENT\" {\n\t\tdb.LogMode(true)\n\t}\n\n\tdb.AutoMigrate(&model.User{}, &model.UserFollows{}, &model.UserUploadsOld{})\n\tdb.AutoMigrate(&model.Torrent{}, &model.TorrentReport{})\n\tdb.AutoMigrate(&model.Comment{}, &model.OldComment{})\n\n\treturn db, nil\n}", "func InitDB(dsn string) {\n\tvar err error\n\tuserDB, err = gorm.Open(postgres.New(postgres.Config{DSN: dsn}))\n\tif err != nil {\n\t\tlog.WithField(\"InitDB\", \"OpenGorm\").Error(err)\n\t}\n}", "func Init(cfg *settings.Config) *gorm.DB {\n\tdsn := fmt.Sprintf(\"%s:%s@%s(%s:%s)/%s?parseTime=True\",\n\t\tcfg.Database.User,\n\t\tcfg.Database.Pass,\n\t\tcfg.Database.Protocol,\n\t\tcfg.Database.Host,\n\t\tcfg.Database.Port,\n\t\tcfg.Database.DataSource)\n\n\tdb, err := gorm.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\tfmt.Println(\"db err: (Init) \", err)\n\t}\n\n\tdb.AutoMigrate(&User{})\n\n\tDB = db\n\n\treturn db\n}", "func InitDB(host string, dbname string, user string, password string) (*sql.DB, error) {\n\tdb, err := sql.Open(\n\t\t\"postgres\", fmt.Sprintf(\"host=%s user=%s password=%s dbname=%s sslmode=disable\", host, user, password, dbname))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = db.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn db, nil\n}", "func DBInit() *gorm.DB {\n\tvar errEnv error\n\terrEnv = godotenv.Load()\n\tif errEnv != nil {\n\t\tlog.Fatalf(\"Error getting env, not comming through %v\", errEnv)\n\t} else {\n\t\tfmt.Println(\"We are getting the env values\")\n\t}\n\t// fmt.Println(\"host=\" + os.Getenv(\"DB_HOST\") + \" port=\" + os.Getenv(\"DB_PORT\") + \" user=\" + os.Getenv(\"DB_USER\") + \" dbname=\" + os.Getenv(\"DB_NAME\") + \" password=\" + os.Getenv(\"DB_PASSWORD\"))\n\tdb, err := gorm.Open(\"postgres\", \"host=\"+os.Getenv(\"DB_HOST\")+\" sslmode=disable port=\"+os.Getenv(\"DB_PORT\")+\" user=\"+os.Getenv(\"DB_USER\")+\" dbname=\"+os.Getenv(\"DB_NAME\")+\" password=\"+os.Getenv(\"DB_PASSWORD\"))\n\tdb.LogMode(true)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdb.AutoMigrate(structs.User{}, structs.UserSession{}, structs.Diary{})\n\n\treturn db\n}", "func InitDatabase(config *Config) (*Database, error) {\n\treturn Init(\"mysql\", config)\n}", "func InitDB() (*gorm.DB, error) {\n\tdb, err := gorm.Open(\"sqlite3\", \"./url.db\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdb.LogMode(true)\n\tmodels.Migrate(db)\n\treturn db, err\n}", "func InitDB(driverName, dataSourceName string) (*sql.DB) {\n conn, err := sql.Open(driverName, dataSourceName)\n \n log.Println(\"open main db conn\")\n \n if err != nil {\n log.Fatal(\"DB is not connected\")\n }\n \n if err = conn.Ping(); err != nil {\n log.Fatal(\"DB is not responded\")\n }\n \n return conn\n}", "func NewDb(c *config.Config) (*sql.DB, error) {\n\tonceDb.Do(func() { //singleton\n\t\tlogUtil.DebugPrint(\"db first time init\\n\")\n\t\tdsn := c.Database.Username + \":\" + c.Database.Password + \"@tcp(\" + c.Database.Host + \":\" + strconv.Itoa(c.Database.Port) + \")/\" + c.Database.Name + \"?parseTime=true\"\n\t\tlogUtil.DebugPrint(dsn)\n\t\tvar err error\n\t\tdb, err = sql.Open(\"mysql\", dsn)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\tdbConfigErr = true\n\t\t}\n\t})\n\tif !dbConfigErr {\n\t\treturn db, nil\n\t} else {\n\t\treturn nil, errors.New(\"db config error\")\n\t}\n}", "func Init() {\n config := common.ConfigObject.DB\n var (\n dbName = config.Name\n dbUser = config.User\n dbPass = config.Password\n )\n\tvar err error\n\tdb, err = sql.Open(\"mysql\", fmt.Sprintf(\"%v:%v@/%v\", dbUser, dbPass, dbName))\n\tpe(err)\n}", "func Init(o *pg.Options) *pg.DB {\n\tlog.Println(\"Initializing the DB with options... | \", o)\n\tif o.Addr == \"\" {\n\t\to.Addr = \"localhost:5432\"\n\t}\n\treturn pg.Connect(o)\n}", "func InitDb() (*App, error) {\n\ta := App{}\n\ta.Env = os.Getenv(\"ENV\")\n\tconnectionString := fmt.Sprintf(\"%s\", a.GetDNS())\n\tvar err error\n\n\tdb, err = sql.Open(\"mysql\", connectionString)\n\tif err != nil {\n\t\tlog.Printf(\"[db/init] - Erro ao tentar abrir conexão (%s). Erro: %s\", a.Env, err.Error())\n\t}\n\n\tdb.SetConnMaxLifetime(time.Minute * 5)\n\tdb.SetMaxIdleConns(9)\n\tdb.SetMaxOpenConns(25)\n\tdb.SetConnMaxLifetime(time.Hour)\n\n\tif err != nil {\n\t\tlog.Printf(\"[db/init] - Erro ao tentar abrir conexão (%s). Erro: %s\", a.Env, err.Error())\n\t}\n\n\treturn &App{DB: db}, nil\n}", "func InitiateDb(dbDir string) (*scribble.Driver, error) {\n\tdb, err := scribble.New(dbDir, nil)\n\tif err != nil {\n\t\tfmt.Println(\"Error\", err)\n\t\treturn nil, err\n\t}\n\treturn db, nil\n}", "func Init(conf DBConfig) *gorm.DB {\n\tdb, err := gorm.Open(\"mysql\", fmt.Sprintf(\n\t\t\"%s:%s@tcp(db:3306)/%s?charset=%s&collation=%s&parseTime=True&loc=UTC\",\n\t\tconf.User,\n\t\tconf.Password,\n\t\tconf.DatabaseName,\n\t\tcharset,\n\t\tcollation,\n\t))\n\tif err != nil {\n\t\tfmt.Println(\"db err: \", err)\n\t}\n\n\tdb.DB().SetMaxIdleConns(10)\n\tdb = db.Set(\"gorm:table_options\", \"ENGINE=InnoDB CHARSET=utf8mb4 COLLATE=utf8mb4_bin\")\n\tdb.SingularTable(true)\n\tdb.LogMode(true)\n\n\treturn db\n}", "func New(ctx context.Context, config *Configuration) (*Database, error) {\n\tdbConfig := fmt.Sprintf(\n\t\t\"host=%s port=%d user=%s password='%s' dbname=%s search_path=%s sslmode=require\",\n\t\tconfig.Host,\n\t\tconfig.Port,\n\t\tconfig.User,\n\t\tconfig.Password,\n\t\tconfig.Name,\n\t\tconfig.Schema,\n\t)\n\n\tdb, err := sql.Open(config.Driver, dbConfig)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, err\n\t}\n\n\treturn &Database{\n\t\tContext: ctx,\n\t\tDatabase: db,\n\t}, nil\n}", "func InitDB() *gorm.DB {\n\tdsn := \"host=\" + DBConfig.Host + \" user=\" + DBConfig.User + \" password=\" + DBConfig.Password + \" dbname=\" +\n\t\tDBConfig.Name + \" port=\" + DBConfig.Port + \" sslmode=disable TimeZone=Asia/Shanghai\"\n\tdb, err := gorm.Open(postgres.Open(dsn), &gorm.Config{\n\t\tDisableForeignKeyConstraintWhenMigrating: true,\n\t\tLogger: logger.Default.LogMode(logger.Info),\n\t})\n\tif err != nil {\n\t\tfmt.Println(\"db err: \", err)\n\t}\n\tsqlDB, err := db.DB()\n\t// SetMaxIdleConns 设置空闲连接池中连接的最大数量\n\tsqlDB.SetMaxIdleConns(10)\n\n\t// SetMaxOpenConns 设置打开数据库连接的最大数量。\n\tsqlDB.SetMaxOpenConns(100)\n\n\t// SetConnMaxLifeti\t1qme 设置了连接可复用的最大时间。\n\tsqlDB.SetConnMaxLifetime(time.Hour)\n\t//db.LogMode(true)\n\tD = db\n\tSqlDB = sqlDB\n\treturn D\n}", "func Initialize() {\n\tfmt.Println(\"inited DB\")\n\tvar err error\n\tclient, err = mongo.Connect(bg(), options.Client().ApplyURI(config.DatabaseURL))\n\tif err != nil {\n\t\tlog.Fatalf(\"%+v\\n\", err)\n\t}\n\n\tdb = client.Database(config.DatabaseName)\n}", "func (a *App) Initialize(databaseURL string) {\n\t// commenting out for Heroku Postgres service\n\t// connectionString :=\n\t// \tfmt.Sprintf(\"user=%s password=%s dbname=%s sslmode=disable\", user, password, dbname)\n\n\tvar err error\n\ta.DB, err = sql.Open(\"postgres\", databaseURL)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not connect to postgres: %+v\", err)\n\t}\n\n\ta.Router = mux.NewRouter()\n\ta.initializeRoutes()\n}", "func InitDB(dsn string) (*MysqlDB, error) {\n\tdb, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdb.SetMaxOpenConns(10)\n\treturn &MysqlDB{Conn: db}, nil\n}", "func InitDBConnection() *sql.DB {\n\tdb, err := sql.Open(\"postgres\", os.Getenv(\"DATABASE_URL\"))\n\t// if there is an error opening the connection, handle it\n\tif err != nil {\n\t\tfmt.Println(\"Cannot open SQL connection\")\n\t\tpanic(err.Error())\n\t}\n\n\treturn db\n}", "func InitDb() {\n\tconfig := dbConfig()\n\tvar err error\n\tpsqlInfo := fmt.Sprintf(\"host=%s port=%s user=%s \"+\n\t\t\"password=%s dbname=%s sslmode=disable\",\n\t\tconfig[host], config[port],\n\t\tconfig[user], config[password], config[dbname])\n\n\tDB, err = sqlx.Connect(\"postgres\", psqlInfo)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = DB.Ping()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Println(\"Successfully connect to database!\")\n\n}", "func DBInit() {\n\t// Mode = \"PRODUCTION\"\n\t// if Mode == \"PRODUCTION\" {\n\t// \tDatabaseURL = \"test.sqlite3\"\n\t// \tDatabaseName = \"sqlite3\"\n\t// } else if Mode == \"DEPLOY\" {\n\tDatabaseURL = os.Getenv(\"DATABASE_URL\")\n\tDatabaseName = \"postgres\"\n\t// }\n\n\tdb, err := gorm.Open(DatabaseName, DatabaseURL)\n\tif err != nil {\n\t\tpanic(\"We can't open database!(dbInit)\")\n\t}\n\t//残りのモデルはまだ入れてない。\n\tdb.AutoMigrate(&model.Post{})\n\tdb.AutoMigrate(&model.User{})\n\tdb.AutoMigrate(&model.Room{})\n\tdefer db.Close()\n}", "func InitDB(dataSourceName string) {\n\tvar err error\n\tstart := time.Now()\n\tticker := time.NewTicker(time.Millisecond * 500)\n\tfor t := range ticker.C {\n\t\tDB, err = sql.Open(\"postgres\", dataSourceName)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tif (t.Second() - start.Second()) > 5 {\n\t\t\tlog.Fatal(\"Can't connect to db\")\n\t\t\tbreak\n\t\t}\n\t}\n\tif err = DB.Ping(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func GormInit(conf *config.Config, logger Logger) (*gorm.DB, error) {\n\n\tdb, openErr := gorm.Open(conf.DBType, conf.DBParams)\n\tif openErr != nil {\n\t\tlog.CheckError(openErr)\n\t\treturn nil, openErr\n\t}\n\n\tIsSqlite = conf.DBType == SqliteType\n\n\tconnectionErr := db.DB().Ping()\n\tif connectionErr != nil {\n\t\tlog.CheckError(connectionErr)\n\t\treturn nil, connectionErr\n\t}\n\n\t// Negative MaxIdleConns means don't retain any idle connection\n\tmaxIdleConns := -1\n\tif IsSqlite {\n\t\t// sqlite doesn't like having a negative maxIdleConns\n\t\tmaxIdleConns = 10\n\t}\n\n\tdb.DB().SetMaxIdleConns(maxIdleConns)\n\tdb.DB().SetMaxOpenConns(400)\n\n\tif config.Environment == \"DEVELOPMENT\" {\n\t\tdb.LogMode(true)\n\t}\n\n\tswitch conf.DBLogMode {\n\tcase \"detailed\":\n\t\tdb.LogMode(true)\n\tcase \"silent\":\n\t\tdb.LogMode(false)\n\t}\n\n\tif logger != nil {\n\t\tdb.SetLogger(logger)\n\t}\n\n\tdb.AutoMigrate(&model.User{}, &model.UserFollows{}, &model.UserUploadsOld{}, &model.Notification{})\n\tif db.Error != nil {\n\t\treturn db, db.Error\n\t}\n\tdb.AutoMigrate(&model.Torrent{}, &model.TorrentReport{})\n\tif db.Error != nil {\n\t\treturn db, db.Error\n\t}\n\tdb.AutoMigrate(&model.File{})\n\tif db.Error != nil {\n\t\treturn db, db.Error\n\t}\n\tdb.AutoMigrate(&model.Comment{}, &model.OldComment{})\n\tif db.Error != nil {\n\t\treturn db, db.Error\n\t}\n\n\treturn db, nil\n}", "func NewDB(conf *Config) (db *DB, err error) {\n\tdb = new(DB)\n\tdb.DB, err = sql.Open(\"postgres\", conf.OpenDBURL())\n\t// TODO (kostyarin): configure db: max idle, max open, lifetime, etc\n\t// using hardcoded values, or keeping the values in\n\t// the Config\n\treturn\n}", "func InitDb() {\n db, err := sqlx.Open(\"postgres\", config.DbURI)\n if err != nil {\n log.Fatalln(\"Database source URI error: \" + err.Error())\n }\n err = db.Ping()\n if err != nil {\n log.Fatalln(\"Database connect error: \" + err.Error())\n }\n Pool = db\n log.Println(\"Database connected\")\n}", "func InitTestDB() (*gorm.DB, error) {\n\tif err := LoadTestEnv(); err != nil {\n\t\tpanic(err)\n\t}\n\n\tconfig := configs.New()\n\tdb := database.New(&config.Database, false)\n\tdatabase.AutoMigrate(db)\n\n\tif conn, ok := db.DB(); ok != nil {\n\t\tdefer conn.Close()\n\t}\n\n\treturn db, nil\n}", "func InitDatabase(c *config.ConfigToml) *Test {\n\ttest := Test{}\n\ttest.cfg = c\n\treturn &test\n}", "func TestDBInit() *Sqlite {\n\ttestdb := &Sqlite{}\n\ttestdb.OpenDB(\"./../gorm_test.db\")\n\ttestdb.setMaxIdleConns(3)\n\ttestdb.logMode(true)\n\treturn testdb\n}", "func DBInit(file string) *PSQLService {\n\tcfg := config.NewConfig(file)\n\ts := cfg.Service\n\tpsql := fmt.Sprintf(psqlInfo, s.Host, s.Port, s.User, s.Password, s.Name)\n\tdb, err := sql.Open(driverName, psql)\n\tif err != nil {\n\t\tlog.Printf(\"Error opening SQL db: %s\", err.Error())\n\t}\n\terr = db.Ping()\n\tif err != nil {\n\t\tlog.Printf(\"Error pingng SQL db: %s\", err.Error())\n\t}\n\treturn &PSQLService{\n\t\tDB: db,\n\t}\n}", "func initAppDB() {\n\n\t// Init config data\n\tdbConf := GetDBConfig()\n\tdbConf.IsAppDB = true\n\n\tdbPoolApp, err := initSocketConnectionPool(dbConf)\n\tif err != nil {\n\t\tlog.Println(\"initial dbConnApp fail : \", err.Error())\n\t} else {\n\t\tlog.Println(\"initial dbConnApp successful\")\n\t\tdbConf.Conn = dbPoolApp\n\t\tdbConf.InitSuccess = true\n\t}\n\n\tdbConf.Err = err\n\n\t// Keep instance\n\tdbAppConf = dbConf\n}", "func initDBConnection(dbUser, dbPass, dbURL, dbNAME string) (err error) {\n\t/*\n\t\tVariables defined here\n\t*/\n\tvar user, pass, url, name string\n\n\t/*\n\t\tverify that all variables exists\n\t*/\n\tif len(dbUser) == 0 || len(dbURL) == 0 || len(dbPass) == 0 || len(dbNAME) == 0 {\n\t\terr = errors.New(\"Missing DB Credentails. Please Check\")\n\t\treturn\n\t}\n\n\t/*\n\t\tverify the varibles and set values after remove spaces\n\t*/\n\tif len(dbUser) > 0 && len(dbPass) > 0 && len(dbURL) > 0 && len(dbNAME) > 0 {\n\t\tuser = strings.TrimSpace(dbUser)\n\t\tpass = strings.TrimSpace(dbPass)\n\t\turl = strings.TrimSpace(dbURL)\n\t\tname = strings.TrimSpace(dbNAME)\n\t}\n\n\t/*\n\t\tPrepares the connection string\n\t*/\n\tconnString := fmt.Sprintf(\"postgres://%s:%s@%s/%s?sslmode=require\", user, pass, url, name)\n\tfmt.Printf(\"connecting to database: %s\\n\", url)\n\n\t/*\n\t\tconnects the database with the provided values, in case of any issue error will be raise\n\t*/\n\tdb, err = sql.Open(\"postgres\", connString)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Database refused connection: %s\", err.Error())\n\t\treturn\n\t}\n\n\treturn\n}" ]
[ "0.7442992", "0.73547006", "0.72342485", "0.719802", "0.7148712", "0.7128819", "0.7119498", "0.71033305", "0.70790875", "0.7067338", "0.7016216", "0.69800913", "0.68921584", "0.6888181", "0.6864967", "0.6860298", "0.6841143", "0.682902", "0.6818211", "0.6807455", "0.6797582", "0.67912203", "0.6790501", "0.6787582", "0.67864555", "0.67796123", "0.67512935", "0.6740454", "0.67298365", "0.6726898", "0.67182624", "0.6717765", "0.6702112", "0.67018026", "0.6701317", "0.6701317", "0.6671027", "0.66707087", "0.6668085", "0.6667356", "0.6666139", "0.6658376", "0.66470104", "0.6639364", "0.6638092", "0.66376364", "0.66328776", "0.6617987", "0.660999", "0.6605031", "0.6589575", "0.6576762", "0.65748614", "0.65696305", "0.6553481", "0.6546559", "0.65351605", "0.6518831", "0.6499885", "0.6486087", "0.6477491", "0.647554", "0.6469567", "0.64644694", "0.64538413", "0.6437456", "0.64364976", "0.6430181", "0.6411214", "0.6406375", "0.64008915", "0.63991946", "0.63976943", "0.639274", "0.6381887", "0.6380298", "0.63705343", "0.63607067", "0.6359511", "0.63592064", "0.6344394", "0.63329357", "0.6329841", "0.6326434", "0.63210684", "0.6313695", "0.63052404", "0.6304063", "0.62986", "0.62887806", "0.6284468", "0.62833315", "0.627461", "0.62712294", "0.6269257", "0.62674606", "0.62647736", "0.62410504", "0.6233314", "0.62325686" ]
0.6857958
16
Init Create DB tables
func (db *Database) CreateDBTable() error { if err := db.DbConn.AutoMigrate(&models.Holiday{}); err != nil { return err } if err := db.DbConn.AutoMigrate(&models.BSE_BHAV{}); err != nil { return err } if err := db.DbConn.AutoMigrate(&models.Order{}); err != nil { return err } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (db database) Init() error {\n\tscript := `CREATE TABLE IF NOT EXISTS txs (\n\t\thash VARCHAR NOT NULL PRIMARY KEY,\n\t\tstatus SMALLINT,\n\t\tcreated_time BIGINT,\n\t\tselector VARCHAR(255),\n\t\ttxid VARCHAR,\n\t\ttxindex BIGINT,\n\t\tamount VARCHAR(100),\n\t\tpayload VARCHAR,\n\t\tphash VARCHAR,\n\t\tto_address VARCHAR,\n\t\tnonce VARCHAR,\n\t\tnhash VARCHAR,\n\t\tgpubkey VARCHAR,\n\t\tghash VARCHAR,\n\t\tversion VARCHAR\n\t);\nCREATE TABLE IF NOT EXISTS gateways (\n\t\tgateway_address VARCHAR NOT NULL PRIMARY KEY,\n\t\tstatus SMALLINT,\n\t\tcreated_time BIGINT,\n\t\tselector VARCHAR(255),\n\t\tpayload VARCHAR,\n\t\tphash VARCHAR,\n\t\tto_address VARCHAR,\n\t\tnonce VARCHAR,\n\t\tnhash VARCHAR,\n\t\tgpubkey VARCHAR,\n\t\tghash VARCHAR,\n\t\tversion VARCHAR\n);\n`\n\t_, err := db.db.Exec(script)\n\treturn err\n}", "func DbInit() {\n\tsqlStatement := `\n\t\tCREATE TABLE benutzer (\n\t\t\tfdNummer VARCHAR(256) PRIMARY KEY, \n\t\t\tVorname VARCHAR(256) NOT NULL, \n\t\t\tNachname VARCHAR(256) NULL,\n\t\t\tAge TINYINT NULL,\n\t\t\tStudiengang VARCHAR(256) NULL,\n\t\t\tSemester TINYINT NULL\n\t\t\t);\n\n\t\tCREATE TABLE nachrichten (\n\t\t\tNachrichtID INTEGER PRIMARY KEY AUTOINCREMENT,\n\t\t\tfdNummer VARCHAR(256) NOT NULL,\n\t\t\tGroupID INTEGER NOT NULL,\n\t\t\tmessage VARCHAR(256) NOT NULL,\n\t\t\tgesendeteUhrzeit DEFAULT CURRENT_TIMESTAMP\n\t\t\t);\n\n\t\tCREATE TABLE chatgroup (\n\t\t\tGroupID INTEGER PRIMARY KEY,\n\t\t\tGroupName VARCHAR(256) NOT NULL\n\t\t\t);\n\t`\n\n\tCreate(sqlStatement)\n}", "func initDb(db *sql.DB) error {\n\n\t_, err := db.Exec(`CREATE TABLE IF NOT EXISTS resource_metadata (\n\t\t\tid TEXT NOT NULL,\n\t\t\ttype TEXT NOT NULL,\n\t\t\tcreated_at TIMESTAMP NOT NULL,\n\t\t\tupdated_at TIMESTAMP NOT NULL,\n\t\t\tdeleted_at TIMESTAMP,\n\t\t\tparams JSONB NOT NULL,\n\t\t\tdata JSONB NOT NULL,\n\t\t\tPRIMARY KEY (id)\n\t)`)\n\tif err != nil {\n\t\tlog.Println(\"Unable to create resource_metadata table.\")\n\t\treturn fmt.Errorf(\"create resource_metadata table: %w\", err)\n\t}\n\n\treturn nil\n}", "func (d *database) Initialize() error {\n\terr := d.createTable(sqlCreateGeneralLedgerTable)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = d.createTable(sqlCreateJournalEntriesTable)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func InitTable(rebuildFlag bool) {\n\t// create a database connection\n\tengine, err := ConnectDB()\n\tif err != nil {\n\t\tlog.Errorf(\"database connect error: %s\", err)\n\t\treturn\n\t}\n\n\tif rebuildFlag {\n\t\t// recreating the tables.\n\t\tfor _, m := range db_models.TableLists {\n\t\t\t// table drop\n\t\t\terr := engine.DropTables(m)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"droptable error: %s\", err)\n\t\t\t}\n\t\t\t// table create\n\t\t\terr = engine.CreateTables(m)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"createtable error: %s\", err)\n\t\t\t}\n\t\t\terr = engine.CreateIndexes(m)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"createindex error: %s\", err)\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// create new tables, if they don't exist on the DB.\n\t\tfor _, m := range db_models.TableLists {\n\t\t\t// If the table has not been created, create it\n\t\t\tif exists, _ := engine.IsTableExist(m); !exists {\n\t\t\t\terr := engine.CreateTables(m)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"createtable error: %s\", err)\n\t\t\t\t}\n\t\t\t\terr = engine.CreateIndexes(m)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"createindex error: %s\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (db *DB) Init(ctx *Context) (err error) {\n\tconst createTable = `CREATE TABLE IF NOT EXISTS ` + tableName + ` (\n\t\tid SERIAL PRIMARY KEY,\n\t\theader VARCHAR(255),\n\t\tdata TEXT\n\t)`\n\t_, err = db.DB.ExecContext(ctx.Ctx, createTable)\n\treturn\n}", "func tablesInit() {\n\ttablesInitorders()\n\ttablesInitcontracts()\n\ttablesInitskills()\n\ttablesInitsovereignty()\n\ttablesInitcorpMembers()\n\ttablesInitprices()\n\ttablesInitassets()\n\tfor it := range c.Tables {\n\t\tsafeExec(c.Tables[it].create())\n\t\tlogf(\"Initialized table %s\", it)\n\t}\n}", "func (db *Postgres) initDB() error {\n\t// Create the schema\n\t// @afiune Can we rename this library?\n\tif err := migrator.Migrate(db.URI, db.SchemaPath); err != nil {\n\t\treturn errors.Wrapf(err, \"Unable to create database schema. [path:%s]\", db.SchemaPath)\n\t}\n\n\t// Add the tables to the database mappings\n\tdb.AddTableWithName(deployment{}, \"deployment\").SetKeys(true, \"id\")\n\tdb.AddTableWithName(supervisor{}, \"supervisor\").SetKeys(true, \"id\")\n\tdb.AddTableWithName(serviceGroup{}, \"service_group\").SetKeys(true, \"id\")\n\tdb.AddTableWithName(service{}, \"service\").SetKeys(true, \"id\")\n\n\t//return db.CreateTablesIfNotExists() // I don't think we can ensure the foreign keys\n\treturn nil\n}", "func Initialize(dbDriver *sql.DB) {\n\tstatement, driverError := dbDriver.Prepare(trainTable)\n\tif driverError != nil {\n\t\tlog.Println(driverError)\n\t}\n\tstatement.Exec()\n\tstatement, _ = dbDriver.Prepare(stationTable)\n\tstatement.Exec()\n\tstatement, _ = dbDriver.Prepare(scheduleTable)\n\tstatement.Exec()\n\tlog.Println(\"All tables created/initialized successfully!\")\n}", "func (db *EdDb) initDbSchema() (err error) {\n\n\t// First, the persistent parts of the database (main.), then the\n\t// ephemeral parts (mem.)\n\t_, err = db.dbConn.Exec(`\n\n CREATE TABLE IF NOT EXISTS main.person (\n id INTEGER PRIMARY KEY,\n name TEXT NOT NULL,\n numUpdates INTEGER DEFAULT 0\n );\n\n CREATE TABLE mem.sessionActivity (\n id INTEGER PRIMARY KEY,\n personId INTEGER NOT NULL,\n dateTime DATETIME DEFAULT CURRENT_TIMESTAMP\n );\n `)\n\treturn\n}", "func Init() {\n\tcreateDB(\"backendtest\")\n\tuseDB(\"backendtest\")\n\tCreateUserTable()\n\tCreateEventTable()\n\tCreateAddFriendTable()\n}", "func createTable() {\n\tvar (\n\t\tdbName = config.GetConfig().DB_NAME\n\t\tdb *sql.DB\n\t\terr error\n\t)\n\t// Connect to database\n\tdb, err = dal.ConnectToDatabase(dbName)\n\tcheckError(\"Connect to database:\"+dbName+\" at createTable of cmd/database/buildDB.go\", err)\n\t// Create table in database\n\terr = dal.CreateTableEmployee(db)\n\tcheckError(\"Create table Employee at createTable of cmd/database/buildDB.go\", err)\n\terr = dal.CreateTableTeam(db)\n\tcheckError(\"Create table Team at createTable of cmd/database/buildDB.go\", err)\n\terr = dal.CreateTableTeamDetail(db)\n\tcheckError(\"Create table TeamDetail at createTable of cmd/database/buildDB.go\", err)\n\tdb.Close()\n}", "func InitDB(db *sql.DB, w http.ResponseWriter, r *http.Request) {\n\n\tvar err error\n\tvar msg string\n\n\t_, err = db.Exec(`CREATE TABLE IF NOT EXISTS users (\n\t\t\t\tid serial PRIMARY KEY,\n\t\t\t\temail text NOT NULL UNIQUE,\n\t\t\t\tpassword bytea NOT NULL,\n\t\t\t\tfirst_name text,\n\t\t\t\tlast_name text,\n\t\t\t\ttype text,\n\t\t\t\tuserpic text,\n\t\t\t\tdeleted_at timestamp with time zone)`)\n\n\tif err != nil {\n\t\tmsg = fmt.Sprintln(\"creating users table\\n\", err.Error())\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t_, err = db.Exec(`CREATE TABLE IF NOT EXISTS sessions (\n\t\t\t\tid serial PRIMARY KEY,\n\t\t\t\tuuid text,\n\t\t\t\tuser_id int references users(id),\n\t\t\t\tstarted_at timestamp with time zone,\n\t\t\t\tlast_activity timestamp with time zone,\n\t\t\t\tip text,\n\t\t\t\tuser_agent text)`)\n\n\tif err != nil {\n\t\tmsg = fmt.Sprintln(\"creating sessions table\\n\", err.Error())\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t_, err = db.Exec(`CREATE TABLE IF NOT EXISTS levels (\n\t\t\t\tid serial PRIMARY KEY,\n\t\t\t\tname text NOT NULL UNIQUE,\n\t\t\t\tscore text)`)\n\n\tif err != nil {\n\t\tmsg = fmt.Sprintln(\"creating levels table\\n\", err.Error())\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t_, err = db.Exec(`CREATE TABLE IF NOT EXISTS teachers (\n\t\t\t\tid serial PRIMARY KEY,\t\t\t\t\t\t\t\n\t\t\t\tuser_id int references users(id) NOT NULL,\n\t\t\t\tlevel_id int references levels(id) NOT NULL,\n\t\t\t\tdeleted_at timestamp with time zone)`)\n\n\tif err != nil {\n\t\tmsg = fmt.Sprintln(\"creating teachers table\\n\", err.Error())\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\t_, err = db.Exec(`CREATE TABLE IF NOT EXISTS students (\n\t\t\t\tid serial PRIMARY KEY,\t\t\t\t\n\t\t\t\tuser_id int references users(id) NOT NULL,\n\t\t\t\tteacher_id int references teachers(id) NOT NULL,\n\t\t\t\tlevel_id int references levels(id) NOT NULL,\n\t\t\t\tdeleted_at timestamp with time zone)`)\n\n\tif err != nil {\n\t\tmsg = fmt.Sprintln(\"creating students table\\n\", err.Error())\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t_, err = db.Exec(`CREATE TABLE IF NOT EXISTS parents (\n\t\t\t\tid serial PRIMARY KEY,\n\t\t\t\tuser_id int references users(id),\n\t\t\t\tstudent_id int references students(id),\n\t\t\t\tdeleted_at timestamp with time zone)`)\n\n\tif err != nil {\n\t\tmsg = fmt.Sprintln(\"creating parents table\\n\", err.Error())\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\t_, err = db.Exec(`CREATE TABLE IF NOT EXISTS questions (\n\t\t\t\tid serial PRIMARY KEY,\n\t\t\t\tquestion text,\n\t\t\t\ttype text,\n\t\t\t\tscore real,\n\t\t\t\tcreated_at timestamp with time zone,\n\t\t\t\tdeleted_at timestamp with time zone,\n\t \t\t\tlevel_id int references levels(id),\n\t\t\t\tteacher_id int references teachers(id))`)\n\n\tif err != nil {\n\t\tmsg = fmt.Sprintln(\"creating questions table\\n\", err.Error())\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t_, err = db.Exec(`CREATE TABLE IF NOT EXISTS answers (\n\t\t\t\tid serial PRIMARY KEY,\n\t\t\t\tname text,\n\t\t\t\tcorrect boolean,\n\t\t\t\tcreated_at timestamp with time zone,\n\t\t\t\tdeleted_at timestamp with time zone,\n\t\t\t\tquestion_id int references questions(id))`)\n\n\tif err != nil {\n\t\tmsg = fmt.Sprintln(\"creating answers table\\n\", err.Error())\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t_, err = db.Exec(`CREATE TABLE IF NOT EXISTS homeworks (\n\t\t\t\tid serial PRIMARY KEY,\n\t\t\t\tscore real,\n\t\t\t\tstarted_at timestamp with time zone,\n\t\t\t\tcompleted_at timestamp with time zone,\n\t\t\t\tlevel_id int references levels(id),\n\t\t\t\tstudent_id int references students(id),\n\t \t\t\tteacher_id int references teachers(id))`)\n\n\tif err != nil {\n\t\tmsg = fmt.Sprintln(\"creating homeworks table\\n\", err.Error())\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t_, err = db.Exec(`CREATE TABLE IF NOT EXISTS homework_specs (\n\t\t\t\tid serial PRIMARY KEY,\n\t\t\t\tanswer text,\t\t\t\t\n\t\t\t\tdate timestamp with time zone,\n\t\t\t\tquestion_id int references questions(id),\n\t\t\t\thometask_id int references homeworks(id),\t\t\t\t\n\t \t\t\tteacher_id int references teachers(id))`)\n\n\tif err != nil {\n\t\tmsg = fmt.Sprintln(\"creating homework_specs table\\n\", err.Error())\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t_, err = db.Exec(`CREATE TABLE IF NOT EXISTS logs (\n\t\t\t\tid serial PRIMARY KEY,\n\t\t\t\tdate timestamp with time zone,\n\t\t\t\tis_error boolean,\n\t\t\t\tsys_msg text,\n\t\t\t\tmsg text,\t\t\t\t\n\t\t\t\tip text,\n\t\t\t\tuser_agent text,\n\t\t\t\tuser_id int references users(id))`)\n\n\tif err != nil {\n\t\tmsg = fmt.Sprintln(\"creating logs table\\n\", err.Error())\n\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\trows, err := db.Query(SelectUserByEmail(), \"[email protected]\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer rows.Close()\n\n\tif !rows.Next() {\n\t\tencryptedPassword, _ := bcrypt.GenerateFromPassword([]byte(\"password\"), bcrypt.MinCost)\n\t\t_, err = db.Query(InsertUser(), \"[email protected]\", encryptedPassword, \"Root\", \"User\", models.UserTypeAdmin, nil)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}", "func (db *Database) createTables(tempDb bool) error {\n\tfor _, model := range []interface{}{(*events.NewBlock)(nil),\n\t\t(*events.ValidBlock)(nil),\n\t\t(*events.NewTx)(nil),\n\t\t(*events.ValidTx)(nil),\n\t\t(*events.NewAtx)(nil),\n\t\t(*events.ValidAtx)(nil),\n\t\t(*events.RewardReceived)(nil)} {\n\t\terr := db.inst.CreateTable(model, &orm.CreateTableOptions{\n\t\t\tIfNotExists: true,\n\t\t\tTemp: tempDb,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func initDB(db *sql.DB) error {\n\t_, err := db.Exec(`CREATE TABLE devices (uid varchar(1000), timestamp timestamp, version varchar(1000), data varchar(5000), PRIMARY KEY (uid, timestamp));`)\n\treturn err\n}", "func (db *DbCtxt) InitDatabase() error {\n\tvar models []interface{}\n\tmodels = append(models,\n\t\t&Hotel{},\n\t\t&Room{},\n\t\t&RatePlan{},\n\t)\n\tfor _, model := range models {\n\t\terr := db.client.AutoMigrate(model)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (this *DBHandler) Init() {\n\tvar derr error\n\tthis.db, derr = sql.Open(\"sqlite3\", DB_FILE_NAME)\n\tif derr != nil {\n\t\tfmt.Println(derr)\n\t}\n\tthis.createNewTable(TABLE_WPA)\n\tthis.createNewTable(TABLE_WORDLISTS)\n\tthis.createNewTable(TABLE_RUNS)\n}", "func InitDb() {\n\tdbConnection.MustExec(schema)\n}", "func (d *DB) Init(c *Controller, dbFile string) error {\n\td.c = c\n\tdb, err := sql.Open(\"sqlite3\", dbFile)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\td.db = db\n\t//defer d.db.Close()\n\n\t_, err = d.db.Exec(`\n create table if not exists articles(\n\t\t\tid integer not null primary key,\n\t\t\tfeed text,\n\t\t\ttitle text,\n\t\t\tcontent text,\n\t\t\tlink text,\n\t\t\tread bool,\n\t\t\tdisplay_name string,\n\t\t\tdeleted bool,\n\t\t\tpublished DATETIME\n\t\t);`)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func InitDb(drop bool) {\n\tdb, err := dbOpen()\n\tdefer db.Close()\n\tif drop {\n\t\tstatement, err := db.Prepare(\"DROP TABLE IF EXISTS mail\")\n\t\tdefer statement.Close()\n\t\tcheckError(err)\n\t\tstatement.Exec()\n\t}\n\tstatement, err := db.Prepare(\n\t\t\"CREATE TABLE IF NOT EXISTS mail (id INTEGER PRIMARY KEY, sender TEXT, receiver TEXT, subject TEXT, text TEXT, html TEXT)\",\n\t)\n\tdefer statement.Close()\n\tcheckError(err)\n\tstatement.Exec()\n}", "func DBInit() {\n\t// Mode = \"PRODUCTION\"\n\t// if Mode == \"PRODUCTION\" {\n\t// \tDatabaseURL = \"test.sqlite3\"\n\t// \tDatabaseName = \"sqlite3\"\n\t// } else if Mode == \"DEPLOY\" {\n\tDatabaseURL = os.Getenv(\"DATABASE_URL\")\n\tDatabaseName = \"postgres\"\n\t// }\n\n\tdb, err := gorm.Open(DatabaseName, DatabaseURL)\n\tif err != nil {\n\t\tpanic(\"We can't open database!(dbInit)\")\n\t}\n\t//残りのモデルはまだ入れてない。\n\tdb.AutoMigrate(&model.Post{})\n\tdb.AutoMigrate(&model.User{})\n\tdb.AutoMigrate(&model.Room{})\n\tdefer db.Close()\n}", "func CreateTables(db *gorm.DB) {\n\terrs := db.CreateTable(&entity.Session{}, &entity.Address{}, &entity.Application{}, &entity.Category{}, &entity.Job{}, &entity.Company{}, &entity.Jobseeker{}, &entity.Role{}, &entity.User{}).GetErrors()\n\tif len(errs) > 0 {\n\t\tfmt.Println(errs[0])\n\t\treturn\n\t}\n}", "func (d *DAO) CreateTables() error {\n\treturn d.dbSession.Migrator().AutoMigrate(ds.Room{})\n}", "func CreateTables() {\n\tCreateAccountTable()\n}", "func Init() {\n\tc := config.GetConfig()\n\n\tdb, err = gorm.Open(\"postgres\", \"host=\"+c.GetString(\"db.host\")+\" user=\"+c.GetString(\"db.user\")+\" dbname=\"+c.GetString(\"db.dbname\")+\" sslmode=disable password=\"+c.GetString(\"db.password\"))\n\tif err != nil {\n\t\tpanic(\"failed to connect database : \" + err.Error())\n\t}\n\n\tdb.Exec(\"CREATE EXTENSION IF NOT EXISTS \\\"uuid-ossp\\\";\")\n\n\tdb.AutoMigrate(&models.SendTemplate{}, &models.SmsTemplate{}, &models.User{})\n\n\tdb.Model(&models.SmsTemplate{}).AddForeignKey(\"user_id\", \"users(id)\", \"RESTRICT\", \"RESTRICT\")\n\tdb.Model(&models.SendTemplate{}).AddForeignKey(\"sms_template_id\", \"sms_templates(id)\", \"RESTRICT\", \"RESTRICT\")\n}", "func init() {\n\tif !DB.HasTable(TaskTableName) {\n\t\tif err := DB.Set(\"gorm:table_options\", \"ENGINE=InnoDB DEFAULT CHARSET=utf8mb4\").CreateTable(&Task{}).Error; err != nil {\n\t\t\tlog.ErrorLog.Println(err)\n\t\t}\n\t}\n}", "func InitTable(){\n\tusr,err := CreateNormalUser(\"rux\",\"[email protected]\",\"000114\")\n\tswitch err{\n\tcase ERR_EMAIL_HAVE_BEEN_REGISTED:\n\t\terr = nil\n\t\tbreak\n\tcase nil:\n\t\tbreak\n\tdefault:\n\t\tpanic(err)\n\t}\n\tusr.UserInfo.Comment = []Comment{{\n\t\tID: 1,\n\t\tCurl: \"/null\",\n\t\tUserInfoID: usr.UserInfo.ID,\n\t\tStatus: state_block,\n\t},\n\t}\n\tusr.UserInfo.Post = []Post{{\n\t\tID: 1,\n\t\tUserInfoID: usr.UserInfo.ID,\n\t\tTUrl: \"/null\",\n\t\tStatus: state_block,\n\t\tComment: usr.UserInfo.Comment,\n\t},\n\t}\n\terr = db.Save(usr).Error\n\treturn\n}", "func (db *Database) InitializeDatabase() error {\n\ttables := [...]string{global.DefaultDynamoDbThreadTableName, global.DefaultDynamoDbCommentTableName}\n\ttableModelMap := map[string]interface{}{\n\t\tglobal.DefaultDynamoDbThreadTableName: dynamoModel.Thread{},\n\t\tglobal.DefaultDynamoDbCommentTableName: dynamoModel.Comment{},\n\t}\n\ttableUnitsMap := map[string][2]int64{\n\t\tglobal.DefaultDynamoDbThreadTableName: [...]int64{*db.Config.DynamoDBThreadReadUnits, *db.Config.DynamoDBThreadWriteUnits},\n\t\tglobal.DefaultDynamoDbCommentTableName: [...]int64{*db.Config.DynamoDBCommentReadUnits, *db.Config.DynamoDBCommentWriteUnits},\n\t}\n\tprefix := \"\"\n\tif db.Config.TablePrefix != nil {\n\t\tprefix = *db.Config.TablePrefix\n\t}\n\tdb.TablePrefix = prefix\n\tfor i := range tables {\n\n\t\ttables[i] = prefix + tables[i]\n\t}\n\n\tdynamoTables, err := db.DB.ListTables().All()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, t := range tables {\n\t\tfound := false\n\t\tfor _, v := range dynamoTables {\n\t\t\tif v == t {\n\t\t\t\tfound = true\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tlog.Printf(\"Creating table %v\\n\", t)\n\t\t\tnoPrefix := strings.Replace(t, prefix, \"\", 1)\n\t\t\treadUnits := tableUnitsMap[noPrefix][0]\n\t\t\twriteUnits := tableUnitsMap[noPrefix][1]\n\t\t\tprovision := db.DB.CreateTable(t, tableModelMap[noPrefix]).Provision(readUnits, writeUnits)\n\t\t\tif t == global.DefaultDynamoDbCommentTableName {\n\t\t\t\tprovision.ProvisionIndex(\"ThreadId_index\", *db.Config.DynamoDBIndexReadUnits, *db.Config.DynamoDBIndexWriteUnits)\n\t\t\t}\n\t\t\terr := provision.Run()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.Printf(\"Tables created, waiting for them to be ready. Timeout - 1 minute\\n\")\n\tfor i := 0; i < 60; i++ {\n\t\tdt, err := db.DB.ListTables().All()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trunning := 0\n\t\tfor _, v := range dt {\n\t\t\tfor _, t := range tables {\n\t\t\t\tif t == v {\n\t\t\t\t\tdesc, err := db.DB.Table(v).Describe().Run()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif desc.Status == dynamo.ActiveStatus {\n\t\t\t\t\t\trunning++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif running == len(tables) {\n\t\t\tlog.Printf(\"Tables created, continuing...\\n\")\n\t\t\tbreak\n\t\t} else {\n\t\t\tlog.Printf(\"Waiting for tables...\\n\")\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}\n\n\treturn nil\n}", "func InitSchema(tx *sql.Tx) error {\n\tfor _, obj := range allSQL {\n\t\tif _, err := tx.Exec(obj.CreateSQL()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func createTable() {\n\t// Create platform, pipeline and job table\n\tpipeline := []string{\"packet_pipeline_v11\", \"packet_pipeline_v12\", \"packet_pipeline_v13\"}\n\tpipelineJobs := []string{\"packet_jobs_v11\", \"packet_jobs_v12\", \"packet_jobs_v13\"}\n\t// Create pipeline table in database\n\tfor i := range pipeline {\n\t\tquery := fmt.Sprintf(\"CREATE TABLE IF NOT EXISTS %s(build_pipelineid INT PRIMARY KEY, id INT, sha VARCHAR, ref VARCHAR, status VARCHAR, web_url VARCHAR, kibana_url VARCHAR);\", pipeline[i])\n\t\tvalue, err := Db.Query(query)\n\t\tif err != nil {\n\t\t\tglog.Error(err)\n\t\t}\n\t\tdefer value.Close()\n\t}\n\t// Create pipeline jobs table in database\n\tfor i := range pipelineJobs {\n\t\tquery := fmt.Sprintf(\"CREATE TABLE IF NOT EXISTS %s(pipelineid INT, id INT PRIMARY KEY,status VARCHAR, stage VARCHAR, name VARCHAR, ref VARCHAR, created_at VARCHAR, started_at VARCHAR, finished_at VARCHAR);\", pipelineJobs[i])\n\t\tvalue, err := Db.Query(query)\n\t\tif err != nil {\n\t\t\tglog.Error(err)\n\t\t}\n\t\tdefer value.Close()\n\t}\n\t// create build pipelines table for build related r/w operation\n\tquery := fmt.Sprintf(\"CREATE TABLE IF NOT EXISTS build_pipeline(project VARCHAR, id INT PRIMARY KEY, sha VARCHAR, ref VARCHAR, status VARCHAR, web_url VARCHAR, packet_v11_pid VARCHAR, packet_v12_pid VARCHAR, packet_v13_pid VARCHAR);\")\n\tvalue, err := Db.Query(query)\n\tif err != nil {\n\t\tglog.Error(err)\n\t}\n\tdefer value.Close()\n\t// create build pipeline jobs table in database\n\tquery = fmt.Sprintf(\"CREATE TABLE IF NOT EXISTS build_jobs(pipelineid INT, id INT PRIMARY KEY,status VARCHAR, stage VARCHAR, name VARCHAR, ref VARCHAR, created_at VARCHAR, started_at VARCHAR, finished_at VARCHAR, message VARCHAR, author_name VARCHAR);\")\n\tvalue, err = Db.Query(query)\n\tif err != nil {\n\t\tglog.Error(err)\n\t}\n\tdefer value.Close()\n}", "func dbInit(dbc co.DbConnectionRequest) {\n\tdb, err := sql.Open(\"mysql\", dbc.User+\":\"+dbc.Pwd+\"@tcp(\"+dbc.Server+\":\"+dbc.Port+\")/\")\n\tif err != nil {\n\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t}\n\tfor _, stmt := range organizationsSchema {\n\t\tfmt.Println(stmt)\n\t\t_, err := db.Exec(stmt)\n\t\tif err != nil {\n\t\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t\t}\n\t}\n\tdb.Close()\n\treturn\n}", "func Initialize(db *sql.DB, Schema schema.DBSchema) {\n\tfor table, schema := range Schema {\n\t\t// fmt.Println(db.QueryRow(`DROP TABLE pages cascade;`))\n\t\tfor _, scheme := range schema {\n\t\t\tif debugging {\n\t\t\t\tfmt.Printf(\"\\ndb.QueryRow: %s:\\n%s\\n\", table, scheme)\n\t\t\t}\n\t\t\t_, err := db.Exec(scheme)\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"%v\", err))\n\t\t\t}\n\t\t}\n\t}\n}", "func InitTables(db *sql.DB, dropTables []string, initQueries []string) error {\n\tfor _, v := range dropTables {\n\t\t_, err := db.Exec(\"DROP TABLE IF EXISTS \" + v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor _, v := range initQueries {\n\t\t_, err := db.Exec(v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func Create(t *contract.SQLTable) error {\n\tsqlstr := t.GetCreateSQL()\n\tif t.Database == \"\" {\n\t\tt.Database = \"default\"\n\t}\n\tdb, err := dblist.Get(t.Database)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = db.Exec(sqlstr)\n\treturn err\n}", "func InitDB(init bool) {\n\tif init {\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS kv (\n\t\t\tk TEXT PRIMARY KEY,\n\t\t\tv TEXT\n\t\t)`)\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS datasets (\n\t\t\tid INTEGER PRIMARY KEY ASC,\n\t\t\tname TEXT,\n\t\t\t-- 'data' or 'computed'\n\t\t\ttype TEXT,\n\t\t\tdata_type TEXT,\n\t\t\tmetadata TEXT DEFAULT '',\n\t\t\t-- only set if computed\n\t\t\thash TEXT,\n\t\t\tdone INTEGER DEFAULT 1\n\t\t)`)\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS annotate_datasets (\n\t\t\tid INTEGER PRIMARY KEY ASC,\n\t\t\tdataset_id INTEGER REFERENCES datasets(id),\n\t\t\tinputs TEXT,\n\t\t\ttool TEXT,\n\t\t\tparams TEXT\n\t\t)`)\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS pytorch_archs (\n\t\t\tid TEXT PRIMARY KEY,\n\t\t\tparams TEXT\n\t\t)`)\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS pytorch_components (\n\t\t\tid TEXT PRIMARY KEY,\n\t\t\tparams TEXT\n\t\t)`)\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS exec_nodes (\n\t\t\tid INTEGER PRIMARY KEY ASC,\n\t\t\tname TEXT,\n\t\t\top TEXT,\n\t\t\tparams TEXT,\n\t\t\tparents TEXT,\n\t\t\tworkspace TEXT\n\t\t)`)\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS exec_ds_refs (\n\t\t\tnode_id INTEGER,\n\t\t\tdataset_id INTEGER,\n\t\t\tUNIQUE(node_id, dataset_id)\n\t\t)`)\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS workspaces (\n\t\t\tname TEXT PRIMARY KEY\n\t\t)`)\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS ws_datasets (\n\t\t\tdataset_id INTEGER,\n\t\t\tworkspace TEXT,\n\t\t\tUNIQUE(dataset_id, workspace)\n\t\t)`)\n\t\tdb.Exec(`CREATE TABLE IF NOT EXISTS jobs (\n\t\t\tid INTEGER PRIMARY KEY ASC,\n\t\t\tname TEXT,\n\t\t\t-- e.g. 'execnode'\n\t\t\ttype TEXT,\n\t\t\t-- how to process the job output and render the job\n\t\t\top TEXT,\n\t\t\tmetadata TEXT,\n\t\t\tstart_time TIMESTAMP,\n\t\t\tstate TEXT DEFAULT '',\n\t\t\tdone INTEGER DEFAULT 0,\n\t\t\terror TEXT DEFAULT ''\n\t\t)`)\n\n\t\t// add missing pytorch components\n\t\tcomponentPath := \"python/skyhook/pytorch/components/\"\n\t\tfiles, err := ioutil.ReadDir(componentPath)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfor _, fi := range files {\n\t\t\tif !strings.HasSuffix(fi.Name(), \".json\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tid := strings.Split(fi.Name(), \".json\")[0]\n\t\t\tbytes, err := ioutil.ReadFile(filepath.Join(componentPath, fi.Name()))\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdb.Exec(\"INSERT OR REPLACE INTO pytorch_components (id, params) VALUES (?, ?)\", id, string(bytes))\n\t\t}\n\n\t\t// add missing pytorch archs\n\t\tarchPath := \"exec_ops/pytorch/archs/\"\n\t\tfiles, err = ioutil.ReadDir(archPath)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfor _, fi := range files {\n\t\t\tif !strings.HasSuffix(fi.Name(), \".json\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tid := strings.Split(fi.Name(), \".json\")[0]\n\t\t\tbytes, err := ioutil.ReadFile(filepath.Join(archPath, fi.Name()))\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdb.Exec(\"INSERT OR REPLACE INTO pytorch_archs (id, params) VALUES (?, ?)\", id, string(bytes))\n\t\t}\n\n\t\t// add default workspace if it doesn't exist\n\t\tvar count int\n\t\tdb.QueryRow(\"SELECT COUNT(*) FROM workspaces WHERE name = ?\", \"default\").Scan(&count)\n\t\tif count == 0 {\n\t\t\tdb.Exec(\"INSERT INTO workspaces (name) VALUES (?)\", \"default\")\n\t\t}\n\t}\n\n\t// now run some database cleanup steps\n\n\t// mark jobs that are still running as error\n\tdb.Exec(\"UPDATE jobs SET error = 'terminated', done = 1 WHERE done = 0\")\n\n\t// delete temporary datasetsTODO\n}", "func (d *Database) Init() error {\n\tif _, err := d.db.Exec(\"CREATE TABLE IF NOT EXISTS leaves (id INTEGER PRIMARY KEY, data BLOB)\"); err != nil {\n\t\treturn err\n\t}\n\tif _, err := d.db.Exec(\"CREATE TABLE IF NOT EXISTS tiles (height INTEGER, level INTEGER, offset INTEGER, hashes BLOB, PRIMARY KEY (height, level, offset))\"); err != nil {\n\t\treturn err\n\t}\n\t_, err := d.db.Exec(\"CREATE TABLE IF NOT EXISTS leafMetadata (id INTEGER PRIMARY KEY, module TEXT, version TEXT, fileshash TEXT, modhash TEXT)\")\n\treturn err\n}", "func InitDatabase(db *sql.DB) {\n\tcreateLinksTableSQL := `CREATE TABLE IF NOT EXISTS links (\n\t\t\"id\" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n\t\t\"url\" TEXT,\n\t\t\"created_at\" TEXT\n\t);`\n\n\tstatement, err := db.Prepare(createLinksTableSQL)\n\tif err != nil {\n\t\tlog.Printf(\"Error creating links table: %v\\n\", err)\n\t}\n\tstatement.Exec()\n}", "func createTable() {\n\tstatement1 := \"create table IF NOT EXISTS Router_Logs (log_id text primary key not null unique, source_address text not null, destination_address text not null, source_port integer not null, destination_port integer not null, protocol integer not null, packets interger not null, bytes interger not null, flags text not null, start_time text not null, duration real not null, end_time text not null, sensor text not null, hash text not null unique);\"\n\t_, err := db.Exec(statement1)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tstatement2 := \"create table IF NOT EXISTS Linux_Logs (log_id text primary key not null unique, log_type text not null, date_time text not null, data text not null, hash text not null unique);\"\n\t_, err = db.Exec(statement2)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tstatement3 := \"create table IF NOT EXISTS Windows_Logs (log_id text primary key not null unique, keywords text not null, date_time text not null, source text not null, event_id integer not null, task_category text not null, task_description text not null, hash text not null unique);\"\n\t_, err = db.Exec(statement3)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tstatement4 := \"create table IF NOT EXISTS Log_Records (log_record_id integer primary key autoincrement, log_type text not null, windows_log_id text null, linux_log_id text null, router_log_id text null, foreign key(windows_log_id) references Windows_Logs(log_id), foreign key(linux_log_id) references Linux_Logs(log_id), foreign key(router_log_id) references Router_Logs(log_id));\"\n\t_, err = db.Exec(statement4)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (mg *Schema) Create(tableName string, cb CB) {\n\ttable := &TableManager{}\n\tmg.TableName = tableName\n\tmg.CreateNew = true\n\tif mg.DB == nil {\n\t\tdb := InitDatabase()\n\t\tmg.DB = db\n\t}\n\ttable.Schema = mg\n\n\tqueryGenerator := &QueryGenerator{}\n\tqueryGenerator.Table = table\n\tqueryGenerator.Database = mg.DB\n\tcb(table)\n\n\t//Prepare SQL Statement\n\tqueryGenerator.ProcessMigration()\n}", "func (w *Workloader) createTables(ctx context.Context) error {\n\tquery := `\nCREATE TABLE IF NOT EXISTS nation (\n N_NATIONKEY BIGINT NOT NULL,\n N_NAME CHAR(25) NOT NULL,\n N_REGIONKEY BIGINT NOT NULL,\n N_COMMENT VARCHAR(152),\n PRIMARY KEY (N_NATIONKEY)\n)`\n\n\tif err := w.createTableDDL(ctx, query, \"nation\", \"creating\"); err != nil {\n\t\treturn err\n\t}\n\n\tquery = `\nCREATE TABLE IF NOT EXISTS region (\n R_REGIONKEY BIGINT NOT NULL,\n R_NAME CHAR(25) NOT NULL,\n R_COMMENT VARCHAR(152),\n PRIMARY KEY (R_REGIONKEY)\n)`\n\tif err := w.createTableDDL(ctx, query, \"region\", \"creating\"); err != nil {\n\t\treturn err\n\t}\n\n\tquery = `\n\tCREATE TABLE IF NOT EXISTS part (\n\t P_PARTKEY BIGINT NOT NULL,\n\t P_NAME VARCHAR(55) NOT NULL,\n\t P_MFGR CHAR(25) NOT NULL,\n\t P_BRAND CHAR(10) NOT NULL,\n\t P_TYPE VARCHAR(25) NOT NULL,\n\t P_SIZE BIGINT NOT NULL,\n\t P_CONTAINER CHAR(10) NOT NULL,\n\t P_RETAILPRICE DECIMAL(15, 2) NOT NULL,\n\t P_COMMENT VARCHAR(23) NOT NULL,\n\t PRIMARY KEY (P_PARTKEY)\n\t)`\n\tif err := w.createTableDDL(ctx, query, \"part\", \"creating\"); err != nil {\n\t\treturn err\n\t}\n\n\tquery = `\nCREATE TABLE IF NOT EXISTS supplier (\n S_SUPPKEY BIGINT NOT NULL,\n S_NAME CHAR(25) NOT NULL,\n S_ADDRESS VARCHAR(40) NOT NULL,\n S_NATIONKEY BIGINT NOT NULL,\n S_PHONE CHAR(15) NOT NULL,\n S_ACCTBAL DECIMAL(15, 2) NOT NULL,\n S_COMMENT VARCHAR(101) NOT NULL,\n PRIMARY KEY (S_SUPPKEY)\n)`\n\tif err := w.createTableDDL(ctx, query, \"supplier\", \"creating\"); err != nil {\n\t\treturn err\n\t}\n\n\tquery = `\n\tCREATE TABLE IF NOT EXISTS partsupp (\n\t PS_PARTKEY BIGINT NOT NULL,\n\t PS_SUPPKEY BIGINT NOT NULL,\n\t PS_AVAILQTY BIGINT NOT NULL,\n\t PS_SUPPLYCOST DECIMAL(15, 2) NOT NULL,\n\t PS_COMMENT VARCHAR(199) NOT NULL,\n\t PRIMARY KEY (PS_PARTKEY, PS_SUPPKEY)\n\t)`\n\tif err := w.createTableDDL(ctx, query, \"partsupp\", \"creating\"); err != nil {\n\t\treturn err\n\t}\n\n\tquery = `\n\tCREATE TABLE IF NOT EXISTS customer (\n\t C_CUSTKEY BIGINT NOT NULL,\n\t C_NAME VARCHAR(25) NOT NULL,\n\t C_ADDRESS VARCHAR(40) NOT NULL,\n\t C_NATIONKEY BIGINT NOT NULL,\n\t C_PHONE CHAR(15) NOT NULL,\n\t C_ACCTBAL DECIMAL(15, 2) NOT NULL,\n\t C_MKTSEGMENT CHAR(10) NOT NULL,\n\t C_COMMENT VARCHAR(117) NOT NULL,\n\t PRIMARY KEY (C_CUSTKEY)\n\t)`\n\tif err := w.createTableDDL(ctx, query, \"customer\", \"creating\"); err != nil {\n\t\treturn err\n\t}\n\n\tquery = `\n\tCREATE TABLE IF NOT EXISTS orders (\n\t O_ORDERKEY BIGINT NOT NULL,\n\t O_CUSTKEY BIGINT NOT NULL,\n\t O_ORDERSTATUS CHAR(1) NOT NULL,\n\t O_TOTALPRICE DECIMAL(15, 2) NOT NULL,\n\t O_ORDERDATE DATE NOT NULL,\n\t O_ORDERPRIORITY CHAR(15) NOT NULL,\n\t O_CLERK CHAR(15) NOT NULL,\n\t O_SHIPPRIORITY BIGINT NOT NULL,\n\t O_COMMENT VARCHAR(79) NOT NULL,\n\t PRIMARY KEY (O_ORDERKEY)\n\t)`\n\tif err := w.createTableDDL(ctx, query, \"orders\", \"creating\"); err != nil {\n\t\treturn err\n\t}\n\n\tquery = `\n\tCREATE TABLE IF NOT EXISTS lineitem (\n\t L_ORDERKEY BIGINT NOT NULL,\n\t L_PARTKEY BIGINT NOT NULL,\n\t L_SUPPKEY BIGINT NOT NULL,\n\t L_LINENUMBER BIGINT NOT NULL,\n\t L_QUANTITY DECIMAL(15, 2) NOT NULL,\n\t L_EXTENDEDPRICE DECIMAL(15, 2) NOT NULL,\n\t L_DISCOUNT DECIMAL(15, 2) NOT NULL,\n\t L_TAX DECIMAL(15, 2) NOT NULL,\n\t L_RETURNFLAG CHAR(1) NOT NULL,\n\t L_LINESTATUS CHAR(1) NOT NULL,\n\t L_SHIPDATE DATE NOT NULL,\n\t L_COMMITDATE DATE NOT NULL,\n\t L_RECEIPTDATE DATE NOT NULL,\n\t L_SHIPINSTRUCT CHAR(25) NOT NULL,\n\t L_SHIPMODE CHAR(10) NOT NULL,\n\t L_COMMENT VARCHAR(44) NOT NULL,\n\t PRIMARY KEY (L_ORDERKEY, L_LINENUMBER)\n\t)\n\t`\n\tif err := w.createTableDDL(ctx, query, \"lineitem\", \"creating\"); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *Service) initDDL() error {\n\tconn := s.db.Get(context.Background())\n\tdefer s.db.Put(conn)\n\n\t// Verify if the database exist.\n\tstmt := conn.Prep(`SELECT name FROM sqlite_master WHERE type='table' AND name=$name`)\n\tstmt.SetText(\"$name\", \"history\")\n\tdefer stmt.Finalize()\n\n\tswitch hasRow, err := stmt.Step(); {\n\tcase err != nil:\n\t\treturn err\n\n\tcase !hasRow:\n\t\treturn s.createDDL(conn, \"\")\n\n\tdefault:\n\t\tname, err := s.lastEntry(conn)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn s.createDDL(conn, name)\n\t}\n}", "func createSchema(db *pg.DB) error {\n\tvar u *Execution\n\tvar s *LoadStatus\n\tmodels := []interface{}{\n\t\tu,\n\t\ts,\n\t}\n\n\tfor _, model := range models {\n\t\terr := db.Model(model).CreateTable(&orm.CreateTableOptions{\n\t\t\tTemp: false,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func createTables(db *sql.DB) {\n\tvar sql = `\n\t\tcreate table if not exists ` + dbname + `.DemoTable\n\t\t(\n\t\t\tid int not null auto_increment,\n\t\t\tfirst_name varchar(50) not null,\n\t\t\tlast_name varchar(50) not null,\n\t\t\tprimary key (id)\n\t\t) default character set utf8mb4 collate utf8mb4_general_ci;\n\t`\n\tvar rows, rowErr = db.Query(sql)\n\tif rowErr != nil {\n\t\tfmt.Println(rowErr)\n\t\treturn\n\t}\n\tdefer rows.Close()\n}", "func (db *UserDatabase) Init() error {\n\tvar err error\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, stmt := range schemaV1 {\n\t\tlog.Println(stmt)\n\t\ttx.MustExec(stmt)\n\t}\n\tdefaultPassword := getDefaultPassword()\n\t_, err = tx.CreateUser(claudia.ApplicationAdminUsername, defaultPassword)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsessionAuthKey := securecookie.GenerateRandomKey(32)\n\tsessionCryptKey := securecookie.GenerateRandomKey(32)\n\tcrt, key := util.GenerateSelfSignedCert()\n\ttx.MustExec(\"INSERT INTO configuration (schema_version, session_auth_key, session_crypt_key, private_key, public_certificate) VALUES ($1, $2, $3, $4, $5)\",\n\t\tSchemaVersion, sessionAuthKey, sessionCryptKey, key, crt)\n\ttx.Commit()\n\ttx, err = db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tconf, err := tx.GetConfiguration()\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\ttx.Commit()\n\tlog.Printf(\"Successfully initialized database (schema: %d)\", conf.SchemaVersion)\n\treturn nil\n}", "func (s *AccountInitializerService) CreateTables() error {\n\n\tfmt.Println(\"--> AccountInitializerService:CreateTables\")\n\n\ts.db.CreateTable(&models.Account{})\n\n\tfmt.Println(\"<-- AccountInitializerService:CreateTables\")\n\treturn nil\n}", "func Init(dbpath string) {\n\tdatabase.db, err = sql.Open(\"sqlite3\", dbpath+\"?loc=auto&parseTime=true\")\n\t// database.db, err = sql.Open(\"mysql\", \"Username:Password@tcp(Host:Port)/standardnotes?parseTime=true\")\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif database.db == nil {\n\t\tlog.Fatal(\"db nil\")\n\t}\n\tdatabase.createTables()\n}", "func Init() {\n\tdb, err = gorm.Open(getDBConfig())\n\tif err != nil {\n\t\tpanic(\"failed to connect database\")\n\t}\n\n\t// スキーマのマイグレーション\n\tdb.AutoMigrate(&model.Project{})\n\tdb.AutoMigrate(model.Tag{}).AddForeignKey(\"project_id\", \"projects(id)\", \"RESTRICT\", \"RESTRICT\")\n\tdb.AutoMigrate(model.Member{}).AddForeignKey(\"project_id\", \"projects(id)\", \"RESTRICT\", \"RESTRICT\")\n\tdb.AutoMigrate(model.ShuffleLogHead{}).AddForeignKey(\"project_id\", \"projects(id)\", \"RESTRICT\", \"RESTRICT\")\n\tdb.AutoMigrate(model.ShuffleLogDetail{}).AddForeignKey(\"shuffle_log_head_id\", \"shuffle_log_heads(id)\", \"RESTRICT\", \"RESTRICT\")\n}", "func (r *DarwinTimetable) initDB() error {\n\n\tbuckets := []string{\n\t\t\"Meta\",\n\t\t\"DarwinAssoc\",\n\t\t\"DarwinJourney\"}\n\n\treturn r.db.Update(func(tx *bolt.Tx) error {\n\n\t\tfor _, n := range buckets {\n\t\t\tvar nb []byte = []byte(n)\n\t\t\tif bucket := tx.Bucket(nb); bucket == nil {\n\t\t\t\tif _, err := tx.CreateBucket(nb); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n}", "func InitDb() *gorm.DB {\n\t// Openning file\n\tdb, err := gorm.Open(\"sqlite3\", \"./data.db\")\n\t// Display SQL queries\n\tdb.LogMode(true)\n\n\t// Error\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t// Creating the table\n\tif !db.HasTable(&Users{}) {\n\t\tdb.CreateTable(&Users{})\n\t\tdb.Set(\"gorm:table_options\", \"ENGINE=InnoDB\").CreateTable(&Users{})\n\t}\n\n\treturn db\n}", "func setupDatabase(db *sql.DB) error {\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Rollback()\n\n\t// Creating a timescaledb extension for the database\n\tconst ext = `CREATE EXTENSION IF NOT EXISTS timescaledb CASCADE;`\n\tif _, err = tx.Exec(ext); err != nil {\n\t\treturn err\n\t}\n\n\t// creating schema in the database\n\tconst sch = `CREATE SCHEMA IF NOT EXISTS \"audit\"`\n\tif _, err = tx.Exec(sch); err != nil {\n\t\treturn err\n\t}\n\n\t// creating the audit log table\n\tconst tbl = `CREATE TABLE IF NOT EXISTS audit.\"Logs\" (\n\t\t\"Timestamp\" TIMESTAMPTZ NOT NULL,\n\t\t\"UserId\" text NOT NULL,\n\t\t\"Action\" text NOT NULL\n\t );`\n\tif _, err = tx.Exec(tbl); err != nil {\n\t\treturn err\n\t}\n\n\t// creating the hypertable of audit log table for timescaledb\n\tconst hptbl = `SELECT create_hypertable('audit.\"Logs\"', 'Timestamp',if_not_exists => true);`\n\tif _, err = tx.Exec(hptbl); err != nil {\n\t\treturn err\n\t}\n\treturn tx.Commit()\n}", "func Create(sqlStatement string) {\n\tif resetDB {\n\t\tfmt.Println(\"Alte Datenbank wird gelöscht!\")\n\t\tos.Remove(dbFile)\n\t}\n\n\tdb, err := sql.Open(\"sqlite3\", dbFile)\n\tcheckErr(err)\n\n\tmainDB = db\n\n\tif resetDB {\n\t\tfmt.Println(\"\\nTabelle wurde erstellt!\")\n\t\t_, err = db.Exec(sqlStatement)\n\t\tcheckErr(err)\n\t}\n}", "func (m *Mysql) CreateTables() error {\n\tfor tableName, schema := range schemas {\n\t\tquery := fmt.Sprintf(\"CREATE TABLE IF NOT EXISTS %v %v\", tableName, schema)\n\t\t_, err := m.IDB.Exec(query)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (repo *DbInitializerRepo) Init() {\n\n\t// Check database version\n\t_, err := repo.dbHandler.Execute(`CREATE TABLE IF NOT EXISTS db_info (\n\t\t\t\t\t\t\t\t\t\tVersion int(11) NOT NULL\n\t\t\t\t\t\t\t\t\t );`)\n\n\tvar version int\n\terr = repo.dbHandler.QueryRow(`SELECT * FROM db_info;`).Scan(&version)\n\tif err != nil {\n\t\tif err == ErrNoRows {\n\t\t\t// No db config yet\n\t\t\t_, err = repo.dbHandler.Execute(`INSERT INTO db_info VALUES(?);`, dbVersion)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tversion = -1\n\t\t} else {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tif version != dbVersion && checkVersion {\n\t\t// Drop all tables\n\t\t_, err = repo.dbHandler.Execute(`DROP TABLE IF EXISTS client, user, message, channel;`)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\t// Update the version\n\t\t_, err = repo.dbHandler.Execute(`UPDATE db_info SET Version = ?;`, dbVersion)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\t// Create the tables if it doesnt exists\n\t_, err = repo.dbHandler.Execute(`CREATE TABLE IF NOT EXISTS client (\n\t\t\t\t\t\t\t\t\t\tClientId int(11) NOT NULL,\n\t\t\t\t\t\t\t\t\t\tDisplayName varchar(255) NOT NULL,\n\t\t\t\t\t\t\t\t\t\tFirstName varchar(255) NOT NULL,\n\t\t\t\t\t\t\t\t\t\tLastName varchar(255) NOT NULL,\n\t\t\t\t\t\t\t\t\t\tEmail varchar(255) NOT NULL,\n\t\t\t\t\t\t\t\t\t\tPRIMARY KEY (ClientId)\n\t\t\t\t\t\t\t\t\t );`)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t_, err = repo.dbHandler.Execute(`CREATE TABLE IF NOT EXISTS user (\n\t\t\t\t\t\t\t\t \t\tUserId int(11) NOT NULL AUTO_INCREMENT,\n\t\t\t\t\t\t\t\t \t \tUsername varchar(64),\n\t\t\t\t\t\t\t\t \t\tPasswordHash varchar(64),\n\t\t\t\t\t\t\t\t \t\tGoogleId varchar(64),\n\t\t\t\t\t\t\t\t \t\tPRIMARY KEY (UserId)\n\t\t\t\t\t\t\t\t\t );`)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t_, err = repo.dbHandler.Execute(`CREATE TABLE IF NOT EXISTS channel (\n\t\t\t\t\t\t\t\t \t \tChannelId int(11) NOT NULL AUTO_INCREMENT,\n\t\t\t\t\t\t\t\t \t \tName varchar(255) NOT NULL,\n\t\t\t\t\t\t\t\t \t \tPRIMARY KEY (ChannelId)\n\t\t\t\t\t\t\t\t\t );`)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t_, err = repo.dbHandler.Execute(`CREATE TABLE IF NOT EXISTS message (\n\t\t\t\t\t\t\t\t\t\tMessageId INT NOT NULL AUTO_INCREMENT,\n\t\t\t\t\t\t\t\t\t\tBody TEXT NOT NULL,\n\t\t\t\t\t\t\t\t\t\tTime DATETIME NOT NULL,\n\t\t\t\t\t\t\t\t\t\tClientId INT NOT NULL,\n\t\t\t\t\t\t\t\t\t\tChannelId INT NOT NULL,\n\t\t\t\t\t\t\t\t\t\tPRIMARY KEY (MessageId),\n\t\t\t\t\t\t\t\t\t\tINDEX message_client_idx (ClientId ASC),\n\t\t\t\t\t\t\t\t\t\tINDEX message_channel_idx (ChannelId ASC),\n\t\t\t\t\t\t\t\t\t\tCONSTRAINT message_client\n\t\t\t\t\t\t\t\t\t\t FOREIGN KEY (ClientId)\n\t\t\t\t\t\t\t\t\t\t REFERENCES client (ClientId)\n\t\t\t\t\t\t\t\t\t\t ON DELETE NO ACTION\n\t\t\t\t\t\t\t\t\t\t ON UPDATE NO ACTION,\n\t\t\t\t\t\t\t\t\t\tCONSTRAINT message_channel\n\t\t\t\t\t\t\t\t\t\t FOREIGN KEY (ChannelId)\n\t\t\t\t\t\t\t\t\t\t REFERENCES channel (ChannelId)\n\t\t\t\t\t\t\t\t\t\t ON DELETE NO ACTION\n\t\t\t\t\t\t\t\t\t\t ON UPDATE NO ACTION\n\t\t\t\t\t\t\t\t\t);`)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (st *Store) initDB() error {\n\n\tvar err error\n\n\tver, err := st.schemaVersion()\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch ver {\n\tcase 0:\n\t\t// starting from scratch\n\t\tschema := `\nCREATE TABLE url (\n\tid INTEGER PRIMARY KEY,\n\turl TEXT NOT NULL,\n\thash TEXT NOT NULL,\n\tpage_id INTEGER NOT NULL,\n\tcreated TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,\n\tFOREIGN KEY(page_id) REFERENCES page(id)\n);\n\nCREATE TABLE page (\n\tid INTEGER PRIMARY KEY,\n\tcanonical_url TEXT NOT NULL,\n\ttitle TEXT NOT NULL,\n\tcreated TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL\n);\n\nCREATE TABLE warning (\n\tid INTEGER PRIMARY KEY,\n\tpage_id INTEGER NOT NULL,\n\tkind TEXT NOT NULL,\n\tquant INT NOT NULL,\n\tcreated TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,\n\tFOREIGN KEY(page_id) REFERENCES page(id)\n);\n\n\nCREATE TABLE version (\n\tver INTEGER NOT NULL );\n\nINSERT INTO version (ver) VALUES (1);\n`\n\t\t//\t\t`CREATE INDEX article_tag_artid ON article_tag(article_id)`,\n\t\t//\t\t`CREATE INDEX article_url_artid ON article_url(article_id)`,\n\n\t\t_, err = st.db.Exec(schema)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbreak\n\tcase 1: // all good. this is what we're expecting\n\t\tbreak\n\tdefault:\n\t\treturn fmt.Errorf(\"Bad db schema version (expected 1, got %d)\", ver)\n\t}\n\n\treturn nil\n}", "func (c *bankCase) Initialize(ctx context.Context, db *sql.DB) error {\n\tlog.Infof(\"[%s] start to init...\", c)\n\tdefer func() {\n\t\tlog.Infof(\"[%s] init end...\", c)\n\t}()\n\tfor i := 0; i < c.cfg.Tables; i++ {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil\n\t\tdefault:\n\t\t}\n\t\terr := c.initDB(ctx, db, i)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (d *Database) createTables() {\n\tquery := `\n\tCREATE TABLE IF NOT EXISTS counter\n\t(\n\t\tcounter_id INTEGER PRIMARY KEY,\n\t\tval INTEGER NOT NULL\n\t)`\n\n\tif _, err := d.db.Exec(d.ctx, query); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (db *Database) Init() {\n\tdata, dbErr := tiedot.OpenDB(db.Location)\n\tif dbErr != nil {\n\t\tlog.Error(dbConnectionError{\n\t\t\tmsg: \"Failed to connect to the tiedot database\",\n\t\t\terr: dbErr,\n\t\t})\n\t}\n\n\t// Set up the collections - throw away the error for now.\n\tfor _, c := range db.Collections {\n\t\tdata.Create(c.Name)\n\t\tdata.Use(c.Name).Index(c.Index)\n\t}\n\n\tdb.Data = data\n}", "func (s *Server) InititalizeEmptyDatabase() error {\n\tctx := context.Background()\n\tdefer ctx.Done()\n\ttx, err := s.Database.BeginTx(ctx, &sql.TxOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar count int\n\terr = tx.QueryRow(\"SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='users';\").Scan(&count)\n\tif err != nil || count == 1 {\n\t\ttx.Rollback()\n\t\treturn errors.New(\"Database already initialized\")\n\t}\n\t_, err = tx.Exec(`CREATE TABLE users (\n username VARCHAR(255) NOT NULL,\n\t\tpassword VARCHAR(1024) NOT NULL,\n\t\tPRIMARY KEY (username)\n\t);`)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\t_, err = tx.Exec(`CREATE TABLE sessions (\n username VARCHAR(255) NOT NULL,\n\t\tsession VARCHAR(255) NOT NULL,\n\t\tPRIMARY KEY (username,session)\n\t);`)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\t_, err = tx.Exec(`CREATE TABLE permissions (\n username VARCHAR(255) NOT NULL,\n\t\tscope VARCHAR(64) NOT NULL,\n\t\tpermission VARCHAR(64) NOT NULL,\n\t\tPRIMARY KEY (username,scope),\n\t\tCONSTRAINT FK_UserPermission FOREIGN KEY (username) REFERENCES username\n\t);`)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\treturn tx.Commit()\n}", "func (s *Store) InitTable(ctx context.Context, m map[string]string) error {\n\tif _, err := s.db.ExecContext(ctx, create); err != nil {\n\t\treturn err\n\t}\n\tfor path, url := range m {\n\t\tif err := s.setURL(ctx, path, url); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func initTables(ctx context.Context, ieFactory func() ie.InternalExecutor) {\n\texec := ieFactory()\n\texec.ApplySessionOverride(ie.NewOptsBuilder().Database(MetricDBConst).Internal(true).Finish())\n\tmustExec := func(sql string) {\n\t\tif err := exec.Exec(ctx, sql, ie.NewOptsBuilder().Finish()); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"[Metric] init metric tables error: %v, sql: %s\", err, sql))\n\t\t}\n\t}\n\tif metric.GetForceInit() {\n\t\tmustExec(SqlDropDBConst)\n\t}\n\tmustExec(SqlCreateDBConst)\n\tvar createCost time.Duration\n\tdefer func() {\n\t\tlogutil.Debugf(\n\t\t\t\"[Metric] init metrics tables: create cost %d ms\",\n\t\t\tcreateCost.Milliseconds())\n\t}()\n\tinstant := time.Now()\n\n\tdescChan := make(chan *prom.Desc, 10)\n\n\tgo func() {\n\t\tfor _, c := range metric.InitCollectors {\n\t\t\tc.Describe(descChan)\n\t\t}\n\t\tfor _, c := range metric.InternalCollectors {\n\t\t\tc.Describe(descChan)\n\t\t}\n\t\tclose(descChan)\n\t}()\n\n\tmustExec(SingleMetricTable.ToCreateSql(ctx, true))\n\tfor desc := range descChan {\n\t\tview := getView(ctx, desc)\n\t\tsql := view.ToCreateSql(ctx, true)\n\t\tmustExec(sql)\n\t}\n\n\tcreateCost = time.Since(instant)\n}", "func CreateTables(svc *psql.PSQL) error {\n\n\tfor _, v := range Tables {\n\n\t\terr := svc.CreateTable(v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\tfor _, v := range DefaultInserts {\n\n\t\terr := svc.InsertInto(v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\treturn nil\n\n}", "func CreateTable() error {\n\tgorm.MysqlConn().AutoMigrate(&User{},\n\t\t&AppInfo{},\n\t\t&Article{},\n\t\t&ArticlePic{},\n\t\t&Category{},\n\t\t&CategoryArticle{},\n\t\t&Comment{})\n\treturn nil\n}", "func (uts *UnapprovedTransactions) InitDB() error {\n\treturn uts.DB.CreateTable(uts.getTableName(), \"VARBINARY(100)\", \"LONGBLOB\")\n}", "func createTable() {\n sql := `CREATE TABLE IF NOT EXISTS userinfo_tab_0 (\nid INT(11) NOT NULL AUTO_INCREMENT COMMENT 'primary key',\nusername VARCHAR(64) NOT NULL COMMENT 'unique id',\nnickname VARCHAR(128) NOT NULL DEFAULT '' COMMENT 'user nickname, can be empty',\npasswd VARCHAR(32) NOT NULL COMMENT 'md5 result of real password and key',\nskey VARCHAR(16) NOT NULL COMMENT 'secure key of each user',\nheadurl VARCHAR(128) NOT NULL DEFAULT '' COMMENT 'user headurl, can be empty',\nuptime int(64) NOT NULL DEFAULT 0 COMMENT 'update time: unix timestamp',\nPRIMARY KEY(id),\nUNIQUE KEY username_unique (username)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='user info table';`\n db.Exec(sql)\n for i := 1; i < 20; i++ {\n tableName := fmt.Sprintf(\"userinfo_tab_%d\", i)\n db.Exec(fmt.Sprintf(\"create table if not exists %s like userinfo_tab_0\", tableName))\n }\n}", "func InitDb() {\n\tconfig, err := dbConfig()\n\tif err != nil {\n\t\tglog.Fatalln(err)\n\t}\n\tpsqlInfo := fmt.Sprintf(\"host=%s port=%s user=%s \"+\n\t\t\"password=%s dbname=%s sslmode=disable\",\n\t\tconfig[dbhost], config[dbport],\n\t\tconfig[dbuser], config[dbpass], config[dbname])\n\n\tDb, err = sql.Open(\"postgres\", psqlInfo)\n\tif err != nil {\n\t\tglog.Fatalln(err)\n\t}\n\terr = Db.Ping()\n\tif err != nil {\n\t\tglog.Fatalln(err)\n\t}\n\tglog.Infoln(\"Successfully connected to Database!\")\n\t// Create table in database if not present\n\tcreateTable()\n}", "func dbInit() {\n\t//User Input\n\tusernm := creds.UserName\n\tpass := creds.Password\n\tDBName := creds.DBName\n\tlit.Debug(\"Hit dbInit \" + DBName)\n\tlog.Println(usernm + \":\" + pass + \"@tcp(127.0.0.1:3306)/\")\n\n\tdb, err := sql.Open(\"mysql\", usernm+\":\"+pass+\"@tcp(127.0.0.1:3306)/\")\n\terr = db.Ping() //Need to ping to generate connection and trigger err\n\tif err != nil {\n\t\tlit.Error(\"Error in Init Log-in\")\n\t\tcreds = getCreds()\n\t\tfile, _ := json.MarshalIndent(creds, \"\", \"\\t\")\n\t\t_ = ioutil.WriteFile(\"configs/creds.json\", file, 0644)\n\t} else {\n\t\tlit.Debug(\"Attempt DB Creation\")\n\t\t_, err = db.Exec(\"CREATE DATABASE \" + DBName)\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t} else {\n\t\t\tlog.Println(\"Database Created:\", \"\\\"\"+DBName+\"\\\"\")\n\t\t}\n\t\tdb.Exec(\"USE \" + DBName)\n\t\tstmt, err := db.Prepare(\"CREATE TABLE `employee` (`id` int(6) unsigned NOT NULL AUTO_INCREMENT,`name` varchar(30) NOT NULL,`city` varchar(30) NOT NULL,PRIMARY KEY (`id`));\")\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t} else {\n\t\t\t_, err = stmt.Exec()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err.Error())\n\t\t\t} else {\n\t\t\t\tlog.Println(\"Table Created\", \"\\\"\"+\"employees\"+\"\\\"\")\n\t\t\t}\n\t\t}\n\t}\n}", "func TestInitSchemaNoMigrations(t *testing.T) {\n\tforEachDatabase(t, func(db *sqlx.DB) {\n\t\tm := New(db, DefaultOptions, []*Migration{})\n\t\tm.InitSchema(func(tx *sqlx.DB) error {\n\t\t\tq := `CREATE TABLE \"animals\" (\"id\" serial,\"created_at\" timestamp with time zone,\"updated_at\" timestamp with time zone,\"deleted_at\" timestamp with time zone,\"name\" text , PRIMARY KEY (\"id\"))`\n\t\t\t_, err := tx.Exec(q)\n\t\t\treturn err\n\t\t})\n\n\t\tassert.NoError(t, m.Migrate())\n\t\tassert.True(t, m.hasTable(\"animals\"))\n\t\tassert.Equal(t, 1, tableCount(t, db, \"migrations\"))\n\t})\n}", "func CreateTables() error {\n\tfor _, currency := range common.Currencies {\n\t\tquery := fmt.Sprintf(\n\t\t\t\"CREATE TABLE IF NOT EXISTS %s (effective_date DATE PRIMARY KEY, %s)\",\n\t\t\tcurrency,\n\t\t\tstrings.Join(common.Currencies, \" DOUBLE PRECISION, \")+\" DOUBLE PRECISION\",\n\t\t)\n\t\t_, err := db.Exec(query)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (db *Database) Start() error {\n\treturn db.createTables(false)\n}", "func init() {\n\n\t//load in environment variables from .env\n\t//will print error message when running from docker image\n\t//because env file is passed into docker run command\n\tenvErr := godotenv.Load(\"/home/ubuntu/go/src/github.com/200106-uta-go/BAM-P2/.env\")\n\tif envErr != nil {\n\t\tif !strings.Contains(envErr.Error(), \"no such file or directory\") {\n\t\t\tlog.Println(\"Error loading .env: \", envErr)\n\t\t}\n\t}\n\n\tvar server = os.Getenv(\"DB_SERVER\")\n\tvar dbPort = os.Getenv(\"DB_PORT\")\n\tvar dbUser = os.Getenv(\"DB_USER\")\n\tvar dbPass = os.Getenv(\"DB_PASS\")\n\tvar db = os.Getenv(\"DB_NAME\")\n\n\t// Build connection string\n\tconnString := fmt.Sprintf(\"server=%s;user id=%s;password=%s;port=%s;database=%s;\", server, dbUser, dbPass, dbPort, db)\n\n\t// Create connection pool\n\tvar err error\n\tdatabase, err = sql.Open(\"sqlserver\", connString)\n\tif err != nil {\n\t\tlog.Fatal(\"Error creating connection pool: \", err.Error())\n\t}\n\tctx := context.Background()\n\terr = database.PingContext(ctx)\n\thttputil.GenericErrHandler(\"error\", err)\n\n\t//create user table if it doesn't exist\n\tstatement, err := database.Prepare(`IF NOT EXISTS (SELECT * FROM sysobjects WHERE name='user_table' and xtype='U') \n\t\tCREATE TABLE user_table (id INT NOT NULL IDENTITY(1,1) PRIMARY KEY, username VARCHAR(255), password VARCHAR(255))`)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t_, err = statement.Exec()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}", "func InitDatabase(dbName *string, dst ...interface{}) {\n\tlog.Info().Msgf(\"Loading database %v\", *dbName)\n\tvar err error\n\tdbFile = sqlite.Open(fmt.Sprintf(\"%v.db\", *dbName))\n\tdatastore, err = gorm.Open(dbFile, &gorm.Config{\n\t\tDisableForeignKeyConstraintWhenMigrating: true,\n\t})\n\tif err != nil {\n\t\tpanic(\"failed to connect database\")\n\t}\n\n\t// Migrate the schema\n\terr = datastore.AutoMigrate(dst...)\n\tif err != nil {\n\t\tlog.Fatal().Err(err).Msg(\"Migration failed! Please check the logs!\")\n\t}\n}", "func createTable() {\n\t_, err := Db.Query(`\nCREATE TABLE IF NOT EXISTS people (\nid INT PRIMARY KEY,\nName TEXT ,\nHeight TEXT ,\nMass TEXT ,\nHairColor TEXT ,\nSkinColor TEXT ,\nEyeColor TEXT ,\nBirthYear TEXT ,\nGender TEXT ,\nHomeworld TEXT ,\nURL TEXT ,\nCreated TEXT ,\nEdited TEXT \n\t );\n\t `)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Println(\"Table Created\")\n}", "func init() {\n\tvar err error\n\t//db, err = sql.Open(\"postgres\", \"postgres://wookie:[email protected]/wookie?sslmode=disable\")\n\tdb, err = sql.Open(\"postgres\", \"user=wookie dbname=wookie sslmode=disable\")\n\tif err != nil {\n\t\tERROR.Println(\"init db\", err.Error())\n\t\treturn\n\t}\n\n\t//////////////////////////////////////\n\t// drop tables\n\t// DANGER this will empty the db\n\t//\n\t//////////////////////////////////////\n\t_, err = db.Exec(`DROP TABLE classes, users, quiz, attendance CASCADE`)\n\tfmt.Println(err)\n\n\t/////////////////////////////////////////////\n\t//////creating\n\t/////////////////////////////////////////////\n\n\t_, err = db.Exec(`CREATE TABLE users (\n uid serial PRIMARY KEY,\n email text UNIQUE,\n password bytea,\n salt bytea\n )`)\n\tfmt.Println(err)\n\n\t_, err = db.Exec(`CREATE TABLE attendance (\n cid integer PRIMARY KEY,\n students json,\n date_created date\n )`)\n\tfmt.Println(err)\n\n\t_, err = db.Exec(`CREATE TABLE classes (\n cid serial PRIMARY KEY,\n name text,\n students json,\n uid integer REFERENCES users (uid),\n semester text\n )`)\n\tfmt.Println(err)\n\n\t_, err = db.Exec(`CREATE TABLE quiz (\n qid serial PRIMARY KEY,\n info json,\n type integer,\n cid integer REFERENCES classes (cid)\n )`)\n\tfmt.Println(err)\n}", "func Create() {\n\tif err := DB.CreateTablesIfNotExists(); err != nil {\n\t\tlog.Fatal(\"Error creating tables: \", err)\n\t}\n\tfor _, query := range createSQL {\n\t\tif _, err := DB.Exec(query); err != nil {\n\t\t\tlog.Fatalf(\"Error running query %q: %s\", query, err)\n\t\t}\n\t}\n}", "func (c *cockroachdb) createTables(tx *gorm.DB) error {\n\tlog.Tracef(\"createTables\")\n\n\tif !tx.HasTable(tableVersions) {\n\t\terr := tx.CreateTable(&Version{}).Error\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif !tx.HasTable(tableRecords) {\n\t\terr := tx.CreateTable(&Record{}).Error\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif !tx.HasTable(tableMetadataStreams) {\n\t\terr := tx.CreateTable(&MetadataStream{}).Error\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif !tx.HasTable(tableFiles) {\n\t\terr := tx.CreateTable(&File{}).Error\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar v Version\n\terr := tx.Where(\"id = ?\", cacheID).\n\t\tFind(&v).\n\t\tError\n\tif err == gorm.ErrRecordNotFound {\n\t\terr = tx.Create(\n\t\t\t&Version{\n\t\t\t\tID: cacheID,\n\t\t\t\tVersion: cacheVersion,\n\t\t\t\tTimestamp: time.Now().Unix(),\n\t\t\t}).Error\n\t}\n\n\treturn err\n}", "func TestTableCreation(t *testing.T) {\n\tcommon.InitConfig()\n\n\tdb := GetInstance()\n\n\t// A map of table name to creation function,\n\t// as in database.go\n\tvar tableMap = map[string]func() error{\n\t\tpaymentsTable: db.createPaymentsTable,\n\t}\n\n\t// Loop through our creation funcs, execute and test\n\tfor _, createFunc := range tableMap {\n\t\terr := createFunc()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}", "func createTables(conn sql.DB) {\n\tvar reqErr error\n\tvar respErr error\n\n\tdropTable(conn, \"requests\")\n\tdropTable(conn, \"responses\")\n\n\tcreateRequests := \"create table requests (ID int IDENTITY(1,1) PRIMARY KEY, cmd varchar(max), filecontent varbinary(max))\"\n\t_, reqErr = conn.Exec(createRequests)\n\tif reqErr != nil {\n\t\tlog.Fatal(reqErr)\n\t}\n\n\tcreateResponses := \"create table responses (ID int IDENTITY(1,1) PRIMARY KEY, response varchar(max), filecontent varbinary(max), request_id int)\"\n\t_, respErr = conn.Exec(createResponses)\n\tif respErr != nil {\n\t\tlog.Fatal(respErr)\n\t}\n}", "func (con *dbAccess) prepareDatabase() {\n\tcon.createCommandListTable()\n\tcon.createAmharicWordsTable()\n\tcon.truncateCommandListTable()\n\tcon.truncateAmharicWordsTable()\n\tcon.insertCommandList()\n\tcon.insertAmharicWords()\n\n}", "func InitDB() {\n\tos.Remove(\"./threat_analyser.db\")\n\n\tvar err error\n\tdb, err = sql.Open(\"sqlite3\", \"./threat_analyser.db\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tcreateCmd := `\n\tcreate table ip (ip_address TEXT PRIMARY KEY,\n\t\t\t\t\t uuid TEXT,\n\t\t\t\t\t created_at DATETIME,\n\t\t\t\t\t updated_at DATETIME,\n\t\t\t\t\t response_code TEXT);\n\t`\n\t_, err = db.Exec(createCmd)\n\tif err != nil {\n\t\tlog.Fatal(\"Error creating DB table\", err)\n\t\treturn\n\t}\n}", "func CreateTestTables() (err error) {\n\terr = createTable(\"users\")\n\treturn\n}", "func (db *MySQLDB) CreateAllTable(ctx context.Context) error {\n\tfLog := mysqlLog.WithField(\"func\", \"CreateAllTable\").WithField(\"RequestID\", ctx.Value(constants.RequestID))\n\n\thansipDomain := config.Get(\"hansip.domain\")\n\thansipAdmin := config.Get(\"hansip.admin\")\n\n\t_, err := db.instance.ExecContext(ctx, CreateTenantMySQL)\n\tif err != nil {\n\t\tfLog.Errorf(\"db.instance.ExecContext HANSIP_TENANT Got %s. SQL = %s\", err.Error(), CreateTenantMySQL)\n\t\treturn &ErrDBExecuteError{\n\t\t\tWrapped: err,\n\t\t\tMessage: \"Error while trying to create table HANSIP_TENANT\",\n\t\t\tSQL: CreateTenantMySQL,\n\t\t}\n\t}\n\t_, err = db.CreateTenantRecord(ctx, \"Hansip System\", \"hansip\", \"Hansip built in tenant\")\n\tif err != nil {\n\t\tfLog.Errorf(\"db.CreateTenantRecord Got %s\", err.Error())\n\t\treturn err\n\t}\n\t_, err = db.instance.ExecContext(ctx, CreateUserMySQL)\n\tif err != nil {\n\t\tfLog.Errorf(\"db.instance.ExecContext HANSIP_USER Got %s. SQL = %s\", err.Error(), CreateUserMySQL)\n\t\treturn &ErrDBExecuteError{\n\t\t\tWrapped: err,\n\t\t\tMessage: \"Error while trying to create table HANSIP_USER\",\n\t\t\tSQL: CreateUserMySQL,\n\t\t}\n\t}\n\t_, err = db.instance.ExecContext(ctx, CreateGroupMySQL)\n\tif err != nil {\n\t\tfLog.Errorf(\"db.instance.ExecContext HANSIP_GROUP Got %s. SQL = %s\", err.Error(), CreateGroupMySQL)\n\t\treturn &ErrDBExecuteError{\n\t\t\tWrapped: err,\n\t\t\tMessage: \"Error while trying to create table HANSIP_GROUP\",\n\t\t\tSQL: CreateGroupMySQL,\n\t\t}\n\t}\n\t_, err = db.instance.ExecContext(ctx, CreateRoleMySQL)\n\tif err != nil {\n\t\tfLog.Errorf(\"db.instance.ExecContext HANSIP_ROLE Got %s. SQL = %s\", err.Error(), CreateRoleMySQL)\n\t\treturn &ErrDBExecuteError{\n\t\t\tWrapped: err,\n\t\t\tMessage: \"Error while trying to create table HANSIP_ROLE\",\n\t\t\tSQL: CreateRoleMySQL,\n\t\t}\n\t}\n\t_, err = db.instance.ExecContext(ctx, CreateUserRoleMySQL)\n\tif err != nil {\n\t\tfLog.Errorf(\"db.instance.ExecContext HANSIP_USER_ROLE Got %s. SQL = %s\", err.Error(), CreateUserRoleMySQL)\n\t\treturn &ErrDBExecuteError{\n\t\t\tWrapped: err,\n\t\t\tMessage: \"Error while trying to create table HANSIP_USER_ROLE\",\n\t\t\tSQL: CreateUserRoleMySQL,\n\t\t}\n\t}\n\t_, err = db.instance.ExecContext(ctx, CreateUserGroupMySQL)\n\tif err != nil {\n\t\tfLog.Errorf(\"db.instance.ExecContext HANSIP_USER_GROUP Got %s. SQL = %s\", err.Error(), CreateUserGroupMySQL)\n\t\treturn &ErrDBExecuteError{\n\t\t\tWrapped: err,\n\t\t\tMessage: \"Error while trying to create table HANSIP_USER_GROUP\",\n\t\t\tSQL: CreateUserGroupMySQL,\n\t\t}\n\t}\n\t_, err = db.instance.ExecContext(ctx, CreateGroupRoleMySQL)\n\tif err != nil {\n\t\tfLog.Errorf(\"db.instance.ExecContext HANSIP_GROUP_ROLE Got %s. SQL = %s\", err.Error(), CreateGroupRoleMySQL)\n\t\treturn &ErrDBExecuteError{\n\t\t\tWrapped: err,\n\t\t\tMessage: \"Error while trying to create table HANSIP_GROUP_ROLE\",\n\t\t\tSQL: CreateGroupRoleMySQL,\n\t\t}\n\t}\n\t_, err = db.instance.ExecContext(ctx, CreateTOTPRecoveryCodeMySQL)\n\tif err != nil {\n\t\tfLog.Errorf(\"db.instance.ExecContext HANSIP_TOTP_RECOVERY_CODES Got %s. SQL = %s\", err.Error(), CreateTOTPRecoveryCodeMySQL)\n\t\treturn &ErrDBExecuteError{\n\t\t\tWrapped: err,\n\t\t\tMessage: \"Error while trying to create table HANSIP_TOTP_RECOVERY_CODES\",\n\t\t\tSQL: CreateTOTPRecoveryCodeMySQL,\n\t\t}\n\t}\n\t_, err = db.instance.ExecContext(ctx, CreateRevocationMySQL)\n\tif err != nil {\n\t\tfLog.Errorf(\"db.instance.ExecContext HANSIP_REVOCATION Got %s. SQL = %s\", err.Error(), CreateRevocationMySQL)\n\t\treturn &ErrDBExecuteError{\n\t\t\tWrapped: err,\n\t\t\tMessage: \"Error while trying to create table HANSIP_REVOCATION\",\n\t\t\tSQL: CreateRevocationMySQL,\n\t\t}\n\t}\n\t_, err = db.CreateRole(ctx, hansipAdmin, hansipDomain, \"Administrator role\")\n\tif err != nil {\n\t\tfLog.Errorf(\"db.CreateRole Got %s\", err.Error())\n\t\treturn err\n\t}\n\treturn nil\n}", "func InitDb(appConfig *AppConfig) {\n\tlog.Info(\"Initialize database connection\")\n\tDbs = fmt.Sprintf(\"host=%s port=%s user=%s password=%s dbname=%s sslmode=%s\",\n\t\tappConfig.Db.Host,\n\t\tappConfig.Db.Port,\n\t\tappConfig.Db.User,\n\t\tappConfig.Db.Password,\n\t\tappConfig.Db.DbName,\n\t\tappConfig.Db.SSLMode,\n\t)\n\tlog.Info(\"Successfully initialize database connection\")\n\tdb := GetDB()\n\tlog.Info(\"Start table migrations\")\n\tdb.AutoMigrate(\n\t\t&Session{},\n\t)\n\tlog.Info(\"Table migrations achieved\")\n}", "func Init() {\n\n\tdb, err := Connect()\n\tif err != nil {\n\t\tlog.Error(helpers.OutputMessage(\"ErrorDBConnection\"))\n\t}\n\tdefer db.Close()\n\n\tdb.DropTableIfExists(&model.User{})\n\tlog.Info(\"User Table Dropped\")\n\n\tdb.CreateTable(&model.User{})\n\tlog.Info(\"User Table Created\")\n}", "func (mdb *MemoryDB) Init() (err error) {\n\tmdb.table = make(map[string]*TableRow)\n\treturn nil\n}", "func createDBTable(tableName string) {\n\tadapter := adapters[db.DriverName()]\n\tquery := fmt.Sprintf(`\n\tCREATE TABLE %s (\n\t\tid serial NOT NULL PRIMARY KEY\n\t)\n\t`, adapter.quoteTableName(tableName))\n\tdbExecuteNoTx(query)\n}", "func (s *Sqlite) Init() error {\n\tdb, err := s.openConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\tif !db.HasTable(&gogios.CheckHistory{}) {\n\t\tdb.AutoMigrate(&gogios.User{}, &gogios.Check{})\n\t\tdb.AutoMigrate(&gogios.CheckHistory{}).AddForeignKey(\"check_id\", \"checks(id)\", \"RESTRICT\", \"RESTRICT\")\n\t}\n\n\treturn nil\n}", "func CreateSyncTables(db *sql.DB) {\n\t_, err := db.Exec(createSyncModelTablesSQL)\n\tif err != nil {\n\t\tmsg := \"Error getting results from database. Error:%s\"\n\t\tsyncutil.Error(msg, err.Error())\n\t\tpanic(err)\n\t}\n\tsyncutil.Info(\"Created sync_* tables\")\n}", "func CreateDB(config *defs.Config) error {\n\tvar db, err = sql.Open(\"postgres\", \"user=\"+config.DatabaseUserName+\n\t\t\" dbname=\"+config.DatabaseName+\" sslmode=disable\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar statements = [][]string{\n\t\t{\"Creating table users...\", `CREATE TABLE users (\n\t\t\tid serial PRIMARY KEY,\n\t\t\temail text NOT NULL,\n\t\t\tname text,\n\t\t\trole text NOT NULL,\n\t\t\ttoken text,\n\t\t\tcreation_date timestamp WITH TIME ZONE NOT NULL\n\t\t\t\t\t\t\t\tDEFAULT CURRENT_TIMESTAMP,\n\t\t\tlastlog timestamp WITH TIME ZONE)`},\n\t\t{\"Creating table photos...\", `CREATE TABLE photos (\n\t\t\tid \tserial PRIMARY KEY,\n\t\t\tfilename\t\ttext NOT NULL,\n\t\t\tmimetype\t\ttext NOT NULL,\n\t\t\tsize\t\t\tinteger NOT NULL,\n\t\t\tcreation_date timestamp WITH TIME ZONE NOT NULL\n\t\t\t\t\t\t\t\tDEFAULT CURRENT_TIMESTAMP,\n\t\t\tauthor_id \tinteger NOT NULL REFERENCES users(id),\n\t\t\tcaption\t\t\ttext NOT NULL DEFAULT '',\n\t\t\timage\t\t\tbytea NOT NULL,\n\t\t\tthumbnail\t\tbytea NOT NULL,\n\t\t\tbig_thumbnail\tbytea NOT NULL)`},\n\t\t{\"Creating table albums...\", `CREATE TABLE albums (\n\t\t\tname text PRIMARY KEY,\n\t\t\tcover_image_id\tinteger REFERENCES photos(id))`},\n\t\t{\"Creating table photo_albums...\", `CREATE TABLE photo_albums (\n\t\t\tphoto_id\tinteger REFERENCES photos(id) ON DELETE CASCADE NOT NULL,\n\t\t\talbum_name \ttext REFERENCES albums(name) ON DELETE CASCADE\n\t\t\t\t\t\t\tON UPDATE CASCADE NOT NULL,\n\t\t\tUNIQUE (photo_id, album_name))`},\n\t}\n\tfor _, s := range statements {\n\t\tif s[0] != \"\" {\n\t\t\tlog.Println(s[0])\n\t\t}\n\t\t_, err = db.Exec(s[1])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Add the default admins listed in the config file to the users table.\n\tlog.Println(\"Inserting default admins...\")\n\tfor _, admin := range config.DefaultAdmins {\n\t\t_, err = db.Exec(`INSERT INTO users (email, role) VALUES ($1, $2)`,\n\t\t\tadmin, \"Admin\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Println(\"Database setup complete!\")\n\treturn nil\n}", "func (dbh *DBHandle) Init() error {\n\tif dbh.DB == nil {\n\t\treturn ErrMissingDBConnection\n\t}\n\terr := dbh.createSchema()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (db *Database) CreateSchema() error {\n\tdb.mux.Lock()\n\tdefer db.mux.Unlock()\n\tfor _, model := range []interface{}{\n\t\t(*models.User)(nil), // Make the users table\n\t\t(*models.Post)(nil), // make the posts table\n\t\t(*token)(nil)} { // make the tokens table\n\t\terr := db.DB.CreateTable(model, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (s *Service) setsInit() error {\n\tlog.Println(\"Initing sets table\")\n\n\t_, err := s.connection.Exec(`\n\t\tCREATE TABLE IF NOT EXISTS sets (\n\t\t\tid text not null primary key,\n\t\t\tname text\n\t\t)\n\t`)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"can't create sets table\")\n\t}\n\n\t_, err = s.connection.Exec(\"CREATE UNIQUE INDEX IF NOT EXISTS name ON sets (name)\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"can't create index ON sets (name)\")\n\t}\n\n\treturn nil\n}", "func (c *BlockWriterCase) Initialize(ctx context.Context, db *sql.DB) error {\n\tfor i := 0; i < c.cfg.TableNum; i++ {\n\t\tvar s string\n\t\tif i > 0 {\n\t\t\ts = fmt.Sprintf(\"%d\", i)\n\t\t}\n\t\tmustExec(db, fmt.Sprintf(\"CREATE TABLE IF NOT EXISTS block_writer%s %s\", s, `\n\t(\n block_id BIGINT NOT NULL,\n writer_id VARCHAR(64) NOT NULL,\n block_num BIGINT NOT NULL,\n raw_bytes BLOB NOT NULL,\n PRIMARY KEY (block_id, writer_id, block_num)\n)`))\n\t}\n\treturn nil\n}", "func (empHandler *EmployeeHandler) CreateTable() {\n\tquery := `CREATE TABLE article (\n\t\tid int,\n\t\ttitle varchar(50),\n\t\tdesc varchar(50),\n\t\tContent varchar(50)\n\t)`\n\t_, err := empHandler.DB.Query(query)\n\tif err != nil {\n\t\tfmt.Println(\"tale not created\")\n\t}\n}", "func (d *TileDB) Init() error {\n\t// TODO(mhutchinson): Consider storing the entries too:\n\t// CREATE TABLE IF NOT EXISTS entries (revision INTEGER, keyhash BLOB, key STRING, value STRING, PRIMARY KEY (revision, keyhash))\n\n\tif _, err := d.db.Exec(\"CREATE TABLE IF NOT EXISTS revisions (revision INTEGER PRIMARY KEY, datetime TIMESTAMP, logroot BLOB, count INTEGER)\"); err != nil {\n\t\treturn err\n\t}\n\tif _, err := d.db.Exec(\"CREATE TABLE IF NOT EXISTS tiles (revision INTEGER, path BLOB, tile BLOB, PRIMARY KEY (revision, path))\"); err != nil {\n\t\treturn err\n\t}\n\tif _, err := d.db.Exec(\"CREATE TABLE IF NOT EXISTS logs (module TEXT, revision INTEGER, leaves BLOB, PRIMARY KEY (module, revision))\"); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func setupDB(db *sql.DB) error {\n\tsqlScript, err := ioutil.ReadFile(\"dbSchema.sql\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstatements := strings.Split(string(sqlScript), \";\")\n\tif len(statements) > 0 {\n\t\tstatements = statements[:len(statements)-1]\n\t}\n\n\tfor _, statement := range statements {\n\t\t_, err = db.Exec(statement)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func Init() {\n\tvar err error\n\tdb, err = sql.Open(\"sqlite3\", \"db-data/app.db\")\n\tif err != nil {\n\t\tlog.Fatal(\"failed to open db: \", err)\n\t}\n\n\tif _, err := db.Exec(createStmt); err != nil {\n\t\tlog.Fatal(\"failed to initialize db: \", err)\n\t}\n}", "func Init() {\n\tdb, err = gorm.Open(\"postgres\", \"host=db port=5432 user=LikeTwitterApp-backend dbname=LikeTwitterApp-backend password=LikeTwitterApp-backend sslmode=disable\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tautoMigration()\n\tuser := models.User{\n\t\tID: 1,\n\t\tName: \"aoki\",\n\t\tPosts: []models.Post{{ID: 1, Content: \"tweet1\"}, {ID: 2, Content: \"tweet2\"}},\n\t}\n\tdb.Create(&user)\n}", "func (d *DB) CreateTablesFor(models ...interface{}) error {\n for _, model := range models {\n if err := d.Conn.CreateTable(\n model,\n &orm.CreateTableOptions{\n IfNotExists: true,\n },\n ); err != nil {\n return err\n }\n }\n return nil\n}", "func (c *cockroachdb) Setup() error {\n\tlog.Tracef(\"Setup tables\")\n\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tif c.shutdown {\n\t\treturn cache.ErrShutdown\n\t}\n\n\ttx := c.recordsdb.Begin()\n\terr := c.createTables(tx)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\treturn tx.Commit().Error\n}", "func InitializeDB(dynamoInitChan chan struct{}) chan struct{} {\n\tdbInitChan := make(chan struct{})\n\tgo func() {\n\n\t\t<-dynamoInitChan\n\n\t\t// Create all tables\n\t\tvar c Complex\n\t\tif err := dynahelpers.CreateTable(c); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\t// chatbot.RegisterType(c)\n\n\t\tvar pd ParkingDeck\n\t\tif err := dynahelpers.CreateTable(pd); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\t// chatbot.RegisterType(pd)\n\n\t\tvar ps ParkingSpace\n\t\tif err := dynahelpers.CreateTable(ps); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\t// chatbot.RegisterType(ps)\n\n\t\tvar r Residence\n\t\tif err := dynahelpers.CreateTable(r); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\t// chatbot.RegisterType(r)\n\n\t\tvar rsdnt Resident\n\t\tif err := dynahelpers.CreateTable(rsdnt); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\t// chatbot.RegisterType(rsdnt)\n\n\t\tdbInitChan <- struct{}{}\n\t}()\n\treturn dbInitChan\n}" ]
[ "0.7706803", "0.7667855", "0.76524407", "0.7566775", "0.7562417", "0.74877805", "0.7485864", "0.7475944", "0.73747253", "0.7373994", "0.7367066", "0.723283", "0.72112584", "0.7204502", "0.7166722", "0.712205", "0.71092635", "0.708194", "0.70771164", "0.7050126", "0.70442826", "0.7035267", "0.701372", "0.70028925", "0.7002308", "0.7001127", "0.6960139", "0.69572514", "0.69567084", "0.69457126", "0.6943869", "0.6929944", "0.69268143", "0.69168717", "0.6878932", "0.68747854", "0.6873112", "0.6840056", "0.6838317", "0.6831377", "0.680363", "0.6802744", "0.680027", "0.67997855", "0.6757237", "0.67415166", "0.67211676", "0.67096037", "0.6706154", "0.67021024", "0.6677904", "0.66772246", "0.6666078", "0.6666014", "0.66644543", "0.6658291", "0.6653725", "0.6650239", "0.6632883", "0.6631532", "0.6631045", "0.6630174", "0.6607153", "0.65979004", "0.659383", "0.6587095", "0.6581618", "0.6580327", "0.65700203", "0.6567852", "0.6565479", "0.6561814", "0.656006", "0.65520465", "0.6536138", "0.65303516", "0.65252936", "0.652356", "0.65176487", "0.65135473", "0.6508198", "0.650273", "0.65024096", "0.64988834", "0.64985657", "0.64943504", "0.6488833", "0.6470715", "0.6469159", "0.64687747", "0.64637226", "0.64515555", "0.6449068", "0.6438967", "0.64288527", "0.64137197", "0.64077866", "0.6402006", "0.64015603", "0.64013803" ]
0.6555467
73
NewAccessTokenHandlerFactory return new fake access token handler factory
func NewAccessTokenHandlerFactory(userIDFactory UserIDFactory) middleware.AccessTokenHandlerFactory { return &accessTokenHandlerFactory{ userIDFactory: userIDFactory, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewHandler(service services.Service) AccessTokenHandler {\n\treturn &accessTokenhandler{\n\t\tservice: service,\n\t}\n\n}", "func accessTokenHandlerConfig(oasvr *osin.Server) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tdbg.Println(\"Token start\")\n\t\tdefer dbg.Println(\"Token end\")\n\t\tresp := oasvr.NewResponse()\n\t\tdefer resp.Close()\n\t\tdbg.Println(\"Token obtain\")\n\t\tif ar := oasvr.HandleAccessRequest(resp, r); ar != nil {\n\t\t\tar.Authorized = true\n\t\t\toasvr.FinishAccessRequest(resp, r, ar)\n\t\t\tdbg.Println(\"Token generated\")\n\t\t\tosin.OutputJSON(resp, w, r)\n\t\t}\n\t}\n}", "func CreateAccessTokenPostHandler(appCtx *appctx.Context) http.Handler {\n\thandlerFunc := func(w http.ResponseWriter, r *http.Request, user *models.User) {\n\n\t\tif err := r.ParseForm(); err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tappCtx.RequestLogger().Error(r, err)\n\t\t\treturn\n\t\t}\n\n\t\tname := r.Form.Get(\"name\")\n\n\t\tif token, err := appCtx.TokensService().CreateToken(user.ID, name); err != nil {\n\t\t\tappCtx.Sessions().Flash(r, w, \"danger\", err.Error())\n\t\t} else {\n\t\t\tmsg := fmt.Sprintf(\"Your access token is %s\", token.Token)\n\t\t\tappCtx.Sessions().Flash(r, w, \"success\", msg)\n\t\t\tappCtx.Sessions().Flash(r, w, \"success\", \"This is the last time that your token will be displayed to you.\")\n\t\t}\n\n\t\thttp.Redirect(w, r, \"/profile/access-tokens\", http.StatusSeeOther)\n\t}\n\n\thandler := auth.WithUserOrRedirect(handlerFunc, appCtx)\n\n\thandler = middleware.HTMLHeaders(handler)\n\n\treturn handler\n}", "func newTokenHandler(w http.ResponseWriter, r *http.Request) {\n\t// Read the bytes from the body\n\tbodyBytes, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tresultErrorJSON(w, http.StatusInternalServerError, err.Error())\n\t}\n\n\t// Schema Validation:\n\tjsonErrors, err := validateRequestSchema(tokenRequestSchema, bodyBytes)\n\t// General validation error\n\tif err != nil {\n\t\tcode := http.StatusInternalServerError\n\t\tif err == errInvalidJSON {\n\t\t\tcode = http.StatusBadRequest\n\t\t}\n\t\tresultErrorJSON(w, code, err.Error())\n\t\treturn\n\t}\n\n\t// JSON Schema errors\n\tif jsonErrors != nil {\n\t\tresultSchemaErrorJSON(w, jsonErrors)\n\t\treturn\n\t}\n\n\tvar payload tokenPayload\n\terr = json.Unmarshal(bodyBytes, &payload)\n\tif err != nil {\n\t\tresultErrorJSON(w, http.StatusBadRequest, errInvalidPayload.Error())\n\t\treturn\n\t}\n\n\t// TODO: Use your own methods to log someone in and then return a new Token\n\n\tif response, err := bjwt.Generate(123456); err != nil {\n\t\tresultErrorJSON(w, http.StatusInternalServerError, err.Error())\n\t} else {\n\t\tresultResponseJSON(w, http.StatusOK, response)\n\t}\n}", "func (o *oauth) createTokenHandler(auth authable) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tuserId, err := auth.findUserId(extractCookie(r).Value)\n\t\tif err != nil {\n\t\t\t// user not found, return\n\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\n\t\trecords, err := o.clientStore.GetByUserID(userId)\n\t\tif err != nil && !strings.Contains(err.Error(), \"not found\") {\n\t\t\tinternalError(w, err, \"oauth\")\n\t\t\treturn\n\t\t}\n\t\tif len(records) == 0 { // nothing found, so fake one\n\t\t\trecords = append(records, &models.Client{})\n\t\t}\n\n\t\tclients := make([]*models.Client, len(records))\n\t\tfor i := range records {\n\t\t\terr = o.clientStore.DeleteByID(records[i].GetID())\n\t\t\tif err != nil && !strings.Contains(err.Error(), \"not found\") {\n\t\t\t\tinternalError(w, err, \"oauth\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tclients[i] = &models.Client{\n\t\t\t\tID: generateID()[:12],\n\t\t\t\tSecret: generateID(),\n\t\t\t\tDomain: Domain,\n\t\t\t\tUserID: userId,\n\t\t\t}\n\n\t\t\t// Write client into oauth clients db.\n\t\t\tif err := o.clientStore.Set(clients[i].GetID(), clients[i]); err != nil {\n\t\t\t\tinternalError(w, err, \"oauth\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// metrics\n\t\tclientGenerations.Add(1)\n\n\t\t// render back new client info\n\t\ttype response struct {\n\t\t\tClients []*models.Client `json:\"clients\"`\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\t\tif err := json.NewEncoder(w).Encode(&response{clients}); err != nil {\n\t\t\tinternalError(w, err, \"oauth\")\n\t\t\treturn\n\t\t}\n\t}\n}", "func newOAuthAccessTokenWatcher(wrappedWatcher watch.Interface, username string) *OAuthAccessTokenWatcher {\n\treturn &OAuthAccessTokenWatcher{\n\t\twrappedWatcher: wrappedWatcher,\n\t\tincoming: wrappedWatcher.ResultChan(),\n\t\toutgoing: make(chan watch.Event),\n\t\tstopCh: make(chan struct{}),\n\t\tstopped: false,\n\n\t\tusername: username,\n\t}\n}", "func NewAccessTokenServer(t *testing.T, json string) *httptest.Server {\n\treturn NewTestServerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tassert.Equal(t, \"POST\", req.Method)\n\t\tw.Header().Set(contentType, jsonContentType)\n\t\tw.Write([]byte(json))\n\t})\n}", "func NewUserAccessTokenHandler(base baseHandler) *UserAccessTokenHandler {\n\treturn &UserAccessTokenHandler{\n\t\tbaseHandler: base,\n\t}\n}", "func oauth2FactoryToken(w http.ResponseWriter, r *http.Request) {\n\tparas, incName := r.URL.Query(), mux.Vars(r)[\"incName\"]\n\t// if error parameter exists.\n\tif _, ok := paras[\"error\"]; ok {\n\t\thttp.Redirect(w, r, \"/index\", http.StatusFound)\n\t\treturn\n\t}\n\tform := url.Values{}\n\tform.Add(\"client_id\", oauth2Infos[incName].clientId)\n\tform.Add(\"client_secret\", oauth2Infos[incName].clientSecret)\n\tform.Add(\"code\", paras[\"code\"][0])\n\tform.Add(\"redirect_uri\", oauth2Infos[incName].redirectUrl)\t// the redirectUrl should be my host index\n\tform.Add(\"state\", incName)\n\n\tres, _ := http.Post(oauth2Infos[incName].tokenUrl, \"application/x-www-form-urlencoded\", strings.NewReader(form.Encode()))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\tres, _ = http.Get(oauth2Infos[incName].infoUrl + \"?access_token=\" + jsonProcessString(string(body))[\"access_token\"])\n\tbody, _ = ioutil.ReadAll(res.Body)\n\t// Get the access_token and put user information to mydatabase\n\tinfos := &githubUser{}\n\tjson.Unmarshal(body, &infos)\n\tif users := SelectUser(map[string]interface{}{\"username\": infos.Login, \"source\": incName}); len(users) == 0 {\n\t\tgo AddUser(User{Source: incName, UserId: uuid.Must(uuid.NewV4()).String(), Username: infos.Login, Password: infos.Login, Avatar: infos.AvatarURL, InfoURL: infos.URL, Bio: infos.Bio})\n\t}\n\n\t// Later, we will marsh a better user info cookie.\n\thttp.SetCookie(w, &http.Cookie{\n\t\tName: \"user\",\n\t\tValue: infos.Login,\t// user struct json\n\t\tPath: \"/\",\n\t\tExpires: time.Now().AddDate(0, 1, 0),\n\t\tMaxAge: 86400,\t// 100 hours' validate time\n\t})\n\thttp.Redirect(w, r, \"/index\", http.StatusFound)\t\t// redirect to the index page\n}", "func (f JwtFactory) NewAccessToken(userSID, username, host, clientID, nonce string, groups []auth.Group) (string, error) {\r\n\tt := jwt.New(jwt.GetSigningMethod(\"RS256\"))\r\n\tgroupSids := []string{}\r\n\tgroupNiceNames := []string{}\r\n\tfor _, group := range groups {\r\n\t\tgroupSids = append(groupSids, group.SID)\r\n\t\tgroupNiceNames = append(groupNiceNames, group.NiceName)\r\n\t}\r\n\r\n\tt.Claims = &struct {\r\n\t\tUserSid string `json:\"userSID\"`\r\n\t\tUsername string `json:\"username\"`\r\n\t\tGroups string `json:\"groups\"`\r\n\t\tGroupsNiceName string `json:\"groupsNiceName\"`\r\n\r\n\t\t// Purpose defines what this JWT is for, either access_token or\r\n\t\t// id_token.\r\n\t\tPurpose string `json:\"purpose\"`\r\n\r\n\t\tjwt.StandardClaims\r\n\t}{\r\n\t\tuserSID,\r\n\t\tusername,\r\n\t\tstrings.Join(groupSids, \",\"),\r\n\t\tstrings.Join(groupNiceNames, \",\"),\r\n\t\t\"access_token\",\r\n\t\tgetStandardClaims(host, username, clientID),\r\n\t}\r\n\r\n\treturn f.sign(t)\r\n}", "func (env *Env) GenerateAccessToken(c *gin.Context) {\n\n\ttype refreshRequest struct {\n\t\tRefreshToken string `json:\"refresh_token\"`\n\t}\n\n\ttype refreshResponse struct {\n\t\tAccessToken string `json:\"access_token\"`\n\t}\n\n\t//decode request body\n\tjsonData, err := ioutil.ReadAll(c.Request.Body)\n\tif err != nil {\n\t\tLog.WithField(\"module\", \"handler\").WithError(err)\n\t\tc.AbortWithStatusJSON(http.StatusBadRequest, errs.RQST001)\n\t\treturn\n\t}\n\n\tvar request refreshRequest\n\terr = json.Unmarshal(jsonData, &request)\n\tif err != nil {\n\t\tLog.WithField(\"module\", \"handler\").WithError(err)\n\t\tc.AbortWithStatusJSON(http.StatusBadRequest, errs.RQST001)\n\t\treturn\n\t}\n\n\tuser := mysql.User{}\n\tuser.RefreshToken = request.RefreshToken\n\n\ttoken, _ := utils.JWTAuthService(config.JWTRefreshSecret).ValidateToken(user.RefreshToken)\n\tif token == nil || !token.Valid {\n\t\tLog.WithField(\"module\", \"handler\").Error(\"Invalid JWT Token\")\n\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, errs.AUTH005)\n\t\treturn\n\t}\n\tclaims := token.Claims.(jwt.MapClaims)\n\tuser.ID = int32(claims[\"userid\"].(float64))\n\tLog.Debug(claims[\"userid\"])\n\n\tvar exists int64\n\n\t//Check if Refresh Token is valid\n\tresult := env.db.Model(&user).Where(\"id = ? and refresh_token = ?\", user.ID, request.RefreshToken).Count(&exists)\n\tif result.Error != nil {\n\t\tLog.WithField(\"module\", \"sql\").WithError(result.Error)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\treturn\n\t}\n\n\tif exists == 0 {\n\t\tLog.WithField(\"module\", \"handler\").Error(\"Invalid RefreshToken\")\n\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, errs.AUTH005)\n\t\treturn\n\t}\n\n\tLog.WithField(\"model\", \"handler\").Debug(user)\n\n\t//Generate JWT AccessToken\n\taccessToken, err := utils.JWTAuthService(config.JWTAccessSecret).GenerateToken(user.ID, claims[\"deviceid\"].(string), time.Hour*24)\n\tif err != nil {\n\t\tLog.WithField(\"module\", \"jwt\").WithError(err)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.AUTH002)\n\t\treturn\n\t}\n\n\t//Add AccessToken to Redis\n\terr = env.rdis.AddPair(fmt.Sprint(user.ID), accessToken, time.Hour*24)\n\tif err != nil {\n\t\tLog.WithField(\"module\", \"redis\").WithError(err).Error(\"Error adding AccessToken to Redis.\")\n\t\terr = nil\n\t}\n\n\tc.JSON(http.StatusOK, refreshResponse{AccessToken: accessToken})\n\n}", "func NewAccessHandler(logger *zap.Logger, oauth2Config oauth2.Config, verifier *oidc.IDTokenVerifier, state string) AccessHandler {\n\treturn &accessHandler{\n\t\tlogger: logger,\n\t\toauth2Config: oauth2Config,\n\t\tstate: state,\n\t\tverifier: verifier,\n\t}\n}", "func createAccessToken(user model.User) (string, error) {\n\tvar newUser = model.User{}\n\tnewUser.ID = user.ID\n\texpiresAt := time.Now().Add(time.Duration(config.Conf.JwtTokenExpiration) * time.Millisecond)\n\tclaims := UserClaim{\n\t\tnewUser,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: expiresAt.Unix(),\n\t\t},\n\t}\n\n\t// Generates access accessToken and refresh accessToken\n\tunSignedToken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\treturn unSignedToken.SignedString([]byte(config.Conf.JwtSecret))\n}", "func GenerateNewAccessToken(u *domain.User) (string, error) {\n\t// Set secret key from .env file.\n\tsecret := os.Getenv(\"JWT_SECRET_KEY\")\n\n\t// Set expires minutes count for secret key from .env file.\n\tminutesCount, _ := strconv.Atoi(os.Getenv(\"JWT_SECRET_KEY_EXPIRE_MINUTES\"))\n\n\t// Create token\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\t// Set claims\n\tclaims := token.Claims.(jwt.MapClaims)\n\tclaims[\"id\"] = u.ID\n\tclaims[\"email\"] = u.Email\n\tclaims[\"username\"] = u.Username\n\tclaims[\"full_name\"] = u.FullName\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * time.Duration(minutesCount)).Unix()\n\n\t// Generate encoded token and send it as response.\n\tt, err := token.SignedString([]byte(secret))\n\tif err != nil {\n\t\t// Return error, it JWT token generation failed.\n\t\treturn \"\", err\n\t}\n\n\treturn t, nil\n}", "func (a AccessTokens) Create(w http.ResponseWriter, r *http.Request) error {\n\tvar req createAccessTokenRequest\n\n\tlogger, err := middleware.GetLogger(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := jsonapi.UnmarshalPayload(r.Body, &req); err != nil {\n\t\tapi.InvalidJSONError.Render(w, http.StatusBadRequest)\n\t\treturn nil\n\t}\n\n\tstate := req.State\n\n\tcallback := make(chan OAuthCallback)\n\ta.Callbacks[state] = callback\n\n\ttoken, err := waitForCallback(callback)\n\tdelete(a.Callbacks, state)\n\n\tif err != nil {\n\t\tlogger.With(\"error\", err.Error()).Info(\"oauth request failed\")\n\t\tapi.OauthError.Render(w, http.StatusBadRequest) // TODO: improve error\n\t\treturn nil\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n\terr = json.NewEncoder(w).Encode(token)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to encode access token\")\n\t}\n\treturn nil\n}", "func (ah *AppHandler) AccessTokenHandler(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tconst tokenType string = \"Bearer\"\n\n\t\t\tlogger := *hlog.FromRequest(r)\n\t\t\tvar token string\n\n\t\t\t// retrieve the context from the http.Request\n\t\t\tctx := r.Context()\n\n\t\t\t// Pull the token from the Authorization header\n\t\t\t// by retrieving the value from the Header map with\n\t\t\t// \"Authorization\" as the key\n\t\t\t// format: Authorization: Bearer\n\t\t\theaderValue, ok := r.Header[\"Authorization\"]\n\t\t\tif ok && len(headerValue) >= 1 {\n\t\t\t\ttoken = headerValue[0]\n\t\t\t\ttoken = strings.TrimPrefix(token, tokenType+\" \")\n\t\t\t}\n\n\t\t\t// If the token is empty...\n\t\t\tif token == \"\" {\n\t\t\t\t// For Unauthenticated and Unauthorized errors,\n\t\t\t\t// the response body should be empty. Use logger\n\t\t\t\t// to log the error and then just send\n\t\t\t\t// http.StatusUnauthorized (401) or http.StatusForbidden (403)\n\t\t\t\t// depending on the circumstances. \"In summary, a\n\t\t\t\t// 401 Unauthorized response should be used for missing or bad authentication,\n\t\t\t\t// and a 403 Forbidden response should be used afterwards, when the user is\n\t\t\t\t// authenticated but isn’t authorized to perform the requested operation on\n\t\t\t\t// the given resource.\"\n\t\t\t\terrs.HTTPErrorResponse(w, logger, errs.E(errs.Unauthenticated, errors.New(\"Unauthenticated - empty Bearer token\")))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// add access token to context\n\t\t\tctx = auth.SetAccessToken2Context(ctx, token, tokenType)\n\n\t\t\t// call original, adding access token to request context\n\t\t\th.ServeHTTP(w, r.WithContext(ctx))\n\t\t})\n}", "func createAccessToken(userID string, refreshUUID string, expires int64) (string, error) {\n\tclaims := CustomClaimsAcessToken{\n\t\tUser_id: userID,\n\t\tRefresh_uuid: refreshUUID,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expires,\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS512, claims)\n\tsignedToken, err := token.SignedString([]byte(os.Getenv(\"TOKEN_SECRET\")))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn signedToken, nil\n}", "func GenerateNewAccessToken() (string, error) {\n\t// Set secret key from .env file.\n\tsecret := os.Getenv(\"JWT_SECRET_KEY\")\n\n\t// Set expires minutes count for secret key from .env file.\n\tminutesCount, _ := strconv.Atoi(os.Getenv(\"JWT_SECRET_KEY_EXPIRE_MINUTES_COUNT\"))\n\n\t// Create a new claims.\n\tclaims := jwt.MapClaims{}\n\n\t// Set public claims:\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * time.Duration(minutesCount)).Unix()\n\n\t// Create a new JWT access token with claims.\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\t// Generate token.\n\tt, err := token.SignedString([]byte(secret))\n\tif err != nil {\n\t\t// Return error, it JWT token generation failed.\n\t\treturn \"\", err\n\t}\n\n\treturn t, nil\n}", "func TestTokenCreateHandler4(t *testing.T) {\n\tapp, trx, down, err := models.NewAppForTest(nil, t)\n\tassert.Nil(t, err)\n\tdefer down(t)\n\n\tctx, _ := gin.CreateTestContext(httptest.NewRecorder())\n\tbw := &bodyWriter{ResponseWriter: ctx.Writer, body: bytes.NewBufferString(\"\")}\n\tctx.Writer = bw\n\tctx.Set(\"db\", trx)\n\tctx.Set(\"app\", app)\n\tctx.Set(\"requestId\", rand.Int63n(100000000))\n\n\tpath := \"/wrong path//\"\n\tavailableTimes := 1\n\tctx.Set(\"inputParam\", &tokenCreateInput{\n\t\tPath: &path,\n\t\tAvailableTimes: &availableTimes,\n\t})\n\n\tTokenCreateHandler(ctx)\n\tassert.Contains(t, bw.body.String(), \"path is not a legal unix path\")\n}", "func (m *Manager) GenerateAccessToken(gt oauth2.GrantType, tgr *oauth2.TokenGenerateRequest) (accessToken oauth2.TokenInfo, err error) {\n\tif gt == oauth2.AuthorizationCode {\n\t\tti, terr := m.getAuthorizationCode(tgr.Code)\n\t\tif terr != nil {\n\t\t\terr = terr\n\t\t\treturn\n\t\t} else if ti.GetRedirectURI() != tgr.RedirectURI || ti.GetClientID() != tgr.ClientID {\n\t\t\terr = errors.ErrInvalidAuthorizeCode\n\t\t\treturn\n\t\t} else if verr := m.delAuthorizationCode(tgr.Code); verr != nil {\n\t\t\terr = verr\n\t\t\treturn\n\t\t}\n\t\ttgr.UserID = ti.GetUserID()\n\t\ttgr.Scope = ti.GetScope()\n\t\tif exp := ti.GetAccessExpiresIn(); exp > 0 {\n\t\t\ttgr.AccessTokenExp = exp\n\t\t}\n\t}\n\tcli, err := m.GetClient(tgr.ClientID)\n\tif err != nil {\n\t\treturn\n\t} else if tgr.ClientSecret != cli.GetSecret() {\n\t\terr = errors.ErrInvalidClient\n\t\treturn\n\t}\n\t_, ierr := m.injector.Invoke(func(ti oauth2.TokenInfo, gen oauth2.AccessGenerate, stor oauth2.TokenStore) {\n\t\tti = m.newTokenInfo(ti)\n\t\ttd := &oauth2.GenerateBasic{\n\t\t\tClient: cli,\n\t\t\tUserID: tgr.UserID,\n\t\t\tCreateAt: time.Now(),\n\t\t}\n\t\tav, rv, terr := gen.Token(td, m.gtcfg[gt].IsGenerateRefresh)\n\t\tif terr != nil {\n\t\t\terr = terr\n\t\t\treturn\n\t\t}\n\t\tti.SetClientID(tgr.ClientID)\n\t\tti.SetUserID(tgr.UserID)\n\t\tti.SetRedirectURI(tgr.RedirectURI)\n\t\tti.SetScope(tgr.Scope)\n\t\tti.SetAccessCreateAt(td.CreateAt)\n\t\tti.SetAccess(av)\n\n\t\taexp := m.gtcfg[gt].AccessTokenExp\n\t\tif exp := tgr.AccessTokenExp; exp > 0 {\n\t\t\taexp = exp\n\t\t}\n\t\tti.SetAccessExpiresIn(aexp)\n\t\tif rv != \"\" && m.gtcfg[gt].IsGenerateRefresh {\n\t\t\tti.SetRefreshCreateAt(td.CreateAt)\n\t\t\tti.SetRefreshExpiresIn(m.gtcfg[gt].RefreshTokenExp)\n\t\t\tti.SetRefresh(rv)\n\t\t}\n\n\t\terr = stor.Create(ti)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\taccessToken = ti\n\t})\n\tif ierr != nil && err == nil {\n\t\terr = ierr\n\t}\n\treturn\n}", "func newTokenInjector(meta *metadata.Client, acc string) func(*http.Request) error {\n\tif acc == \"\" {\n\t\tacc = \"default\"\n\t}\n\tacc = url.PathEscape(acc)\n\treturn func(req *http.Request) error {\n\t\taud := fmt.Sprintf(\"%s://%s\", req.URL.Scheme, req.URL.Host)\n\t\taud = url.QueryEscape(aud)\n\t\t// TODO(smut): Cache the token and reuse if not yet expired.\n\t\t// Currently the only user of this package only makes one\n\t\t// request per boot so caching isn't too important yet.\n\t\ttok, err := meta.Get(fmt.Sprintf(tokMetadata, acc, aud))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq.Header.Set(vmtoken.Header, tok)\n\t\treturn nil\n\t}\n}", "func TestTokenCreateHandler2(t *testing.T) {\n\tapp, trx, down, err := models.NewAppForTest(nil, t)\n\tassert.Nil(t, err)\n\tdefer down(t)\n\ttestDBConn = trx\n\trouter := Routers()\n\n\tw := httptest.NewRecorder()\n\tapi := buildRoute(config.DefaultConfig.HTTP.APIPrefix, \"/token/create\")\n\tbody := fmt.Sprintf(\"appUid=%s&nonce=%s\", app.UID, models.RandomWithMD5(128))\n\tsign := SignStrWithSecret(body, app.Secret)\n\tbody = fmt.Sprintf(\"%s&sign=%s\", body, sign)\n\treq, _ := http.NewRequest(\"POST\", api, strings.NewReader(body))\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\trouter.ServeHTTP(w, req)\n\tassert.Equal(t, http.StatusOK, w.Code)\n\n\tvar response = &Response{}\n\tjson := janitor.ConfigCompatibleWithStandardLibrary\n\tassert.Nil(t, json.Unmarshal(w.Body.Bytes(), response))\n\tassert.True(t, response.Success)\n\n\trespData := response.Data.(map[string]interface{})\n\trespAvailableTimes := respData[\"availableTimes\"].(float64)\n\tassert.Equal(t, -1, int(respAvailableTimes))\n\trespTokenValue := respData[\"token\"].(string)\n\tassert.Equal(t, 32, len(respTokenValue))\n\tassert.Nil(t, respData[\"ip\"])\n\trespReadOnly := respData[\"readOnly\"].(float64)\n\tassert.Equal(t, 0, int(respReadOnly))\n\trespReadPath := respData[\"path\"].(string)\n\tassert.Equal(t, \"/\", respReadPath)\n\tassert.Nil(t, respData[\"expiredAt\"])\n}", "func NewAccessToken(token, tokenType string) AccessToken {\n\treturn AccessToken{\n\t\tToken: token,\n\t\tTokenType: tokenType,\n\t}\n}", "func NewFakeHandler(allow bool) Handler {\n\treturn &fakeHandler{allow: allow}\n}", "func NewAccessTokenCfg(auths map[string]map[string]interface{}) *AccessTokenCfg {\n\tthis := AccessTokenCfg{}\n\tthis.Auths = auths\n\treturn &this\n}", "func NewTokenerFactory(url, audience string, doer Doer) TokenerFactory {\n\treturn func(clientID, clientSecret string) Tokener {\n\t\treturn NewTokener(url, clientID, clientSecret, audience, doer)\n\t}\n}", "func TestTokenCreateHandler3(t *testing.T) {\n\tapp, trx, down, err := models.NewAppForTest(nil, t)\n\tassert.Nil(t, err)\n\tdefer down(t)\n\ttestDBConn = trx\n\trouter := Routers()\n\n\tw := httptest.NewRecorder()\n\tapi := buildRoute(config.DefaultConfig.HTTP.APIPrefix, \"/token/create\")\n\texpiredAt := time.Now().Add(10 * time.Hour)\n\texpiredAtUnix := expiredAt.Unix()\n\tsecret := SignStrWithSecret(\"\", \"\")\n\tbody := fmt.Sprintf(\n\t\t\"appUid=%s&availableTimes=1000&expiredAt=%d&ip=192.168.0.1&nonce=%s&path=/test&readOnly=1&secret=%s\",\n\t\tapp.UID, expiredAtUnix, models.RandomWithMD5(128), secret,\n\t)\n\tsign := SignStrWithSecret(body, app.Secret)\n\tbody = fmt.Sprintf(\"%s&sign=%s\", body, sign)\n\treq, _ := http.NewRequest(\"POST\", api, strings.NewReader(body))\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\trouter.ServeHTTP(w, req)\n\tassert.Equal(t, 200, w.Code)\n\n\tvar response = &Response{}\n\tjson := janitor.ConfigCompatibleWithStandardLibrary\n\tassert.Nil(t, json.Unmarshal(w.Body.Bytes(), response))\n\tassert.True(t, response.Success)\n\n\trespData := response.Data.(map[string]interface{})\n\trespAvailableTimes := respData[\"availableTimes\"].(float64)\n\tassert.Equal(t, 1000, int(respAvailableTimes))\n\trespTokenValue := respData[\"token\"].(string)\n\tassert.Equal(t, 32, len(respTokenValue))\n\trespIP := respData[\"ip\"].(string)\n\tassert.Equal(t, \"192.168.0.1\", respIP)\n\trespReadOnly := respData[\"readOnly\"].(float64)\n\tassert.Equal(t, 1, int(respReadOnly))\n\trespReadPath := respData[\"path\"].(string)\n\tassert.Equal(t, \"/test\", respReadPath)\n\trespExpiredAt := respData[\"expiredAt\"].(float64)\n\tassert.Equal(t, int64(respExpiredAt), expiredAtUnix)\n}", "func (h *handler) AccessToken(w http.ResponseWriter, r *http.Request) error {\n\tdefer flushAndClose(r.Body)\n\n\tvar data model.AccessRequest\n\terr := json.NewDecoder(r.Body).Decode(&data)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttok, err := h.access(r.Context(), data.Domain, data.Role, data.ProxyForPrincipal, data.Expiry)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw.Header().Set(\"Content-type\", \"application/json; charset=utf-8\")\n\treturn json.NewEncoder(w).Encode(tok)\n}", "func (am AppModule) NewHandler() sdk.Handler {\n\treturn NewHandler(am.keeper)\n}", "func (am AppModule) NewHandler() sdk.Handler {\n\treturn NewHandler(am.keeper)\n}", "func (am AppModule) NewHandler() sdk.Handler {\n\treturn NewHandler(am.keeper)\n}", "func (a *TokenAdapter) NewAccessToken(c *oauth.TokenContext) *oauth.TokenResponse {\n\tfor k, v := range a.CustomValues {\n\t\tvar ok bool\n\t\tvar rawVal interface{}\n\t\tif rawVal, ok = c.Values[k]; !ok {\n\t\t\treturn nil\n\t\t}\n\n\t\tvar v2 []string\n\t\tif v2, ok = rawVal.([]string); !ok {\n\t\t\treturn nil\n\t\t}\n\n\t\tif !slice.String(v2).\n\t\t\tExistsAll(v, false) {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tresp := oauth.NewTokenResponse(\n\t\ta.AccessToken,\n\t\t\"bearer\",\n\t\t3600,\n\t\t\"\",\n\t\ta.Scope,\n\t\t\"CudYQpuw\",\n\t)\n\treturn &resp\n}", "func NewAccessToken(t *AccessToken) error {\n\tuuid := uuid.NewV4()\n\tt.Sha1 = base.EncodeSha1(uuid.String())\n\t_, err := x.Insert(t)\n\treturn err\n}", "func FakeHandler() http.Handler {\n\tgin.SetMode(gin.TestMode)\n\n\te := gin.New()\n\n\te.GET(\"/api/v1/users\", getUsers)\n\te.GET(\"/api/v1/users/:user\", getUser)\n\te.POST(\"/api/v1/users\", createUser)\n\te.PUT(\"/api/v1/users/:user\", updateUser)\n\te.DELETE(\"/api/v1/users/:user\", deleteUser)\n\n\treturn e\n}", "func HttpAuthorizerFactory(ac *AuthorizerConfig) (result Authorizer, err error) {\n\n\tvar (\n\t\tconfig *HttpAuthorizerConfig\n\t)\n\n\t// get config\n\tif config, err = NewHttpAuthorizerConfig(ac); err != nil {\n\t\treturn\n\t}\n\n\tha := &HttpAuthorizer{\n\t\tconfig: config,\n\t}\n\n\tresult = ha\n\treturn\n}", "func TppHTTPServerFactory(s TppHTTPServer) http.Handler {\n\troutes := httprouter.New()\n\troutes.GET(\"/login\", s.Login)\n\troutes.GET(\"/\", s.AxaPay)\n\troutes.POST(\"/test/payments/embedded\", s.TestPaymentEmbedded)\n\n\treturn routes\n}", "func NewHandler(k Keeper) sdk.Handler {\n\treturn func(ctx sdk.Context, msg sdk.Msg) sdk.Result {\n\t\tswitch msg := msg.(type) {\n\t\tcase MsgFactoryIssueFiats:\n\t\t\treturn handleMsgFactoryIssueFiat(ctx, k, msg)\n\t\tcase MsgFactoryRedeemFiats:\n\t\t\treturn handleMsgFactoryRedeemFiat(ctx, k, msg)\n\t\tcase MsgFactorySendFiats:\n\t\t\treturn handleMsgFactorySendFiats(ctx, k, msg)\n\t\tcase MsgFactoryExecuteFiats:\n\t\t\treturn handleMsgFactoryExecuteFiats(ctx, k, msg)\n\t\tdefault:\n\t\t\terrMsg := \"Unrecognized fiatFactory Msg type: \" + reflect.TypeOf(msg).Name()\n\t\t\treturn sdk.ErrUnknownRequest(errMsg).Result()\n\t\t}\n\t}\n}", "func AccessToken(w http.ResponseWriter, r *http.Request) {\n\n\taccessTokenRequest := &models.AccessTokenRequestBody{}\n\tif !accessTokenRequest.Validate(w, r) {\n\t\treturn\n\t}\n\taccesstoken := accessTokenRequest.GenerateAccessToken(w)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\n\tresponse := map[string]interface{}{\"data\": map[string]interface{}{\n\t\t\"access_token\": accesstoken.Token,\n\t\t\"expires_at\": accesstoken.ExpiresAt,\n\t}, \"status\": 1}\n\n\tjson.NewEncoder(w).Encode(response)\n}", "func NewHandler(keeper Keeper) sdk.Handler {\n\treturn func(ctx sdk.Context, msg sdk.Msg) sdk.Result {\n\t\tswitch msg := msg.(type) {\n\t\tcase MsgRegisterKey:\n\t\t\treturn handleMsgRegisterKey(ctx, keeper, msg)\n\t\tcase MsgUpdateParams:\n\t\t\treturn handleMsgUpdateParams(ctx, keeper, msg)\n\t\tdefault:\n\t\t\terrMsg := fmt.Sprintf(\"Unrecognized auth message type: %T\", msg)\n\t\t\treturn sdk.ErrUnknownRequest(errMsg).Result()\n\t\t}\n\t}\n}", "func AccessToken(w http.ResponseWriter, r *http.Request) {\n\tvar accessTokenQuery accessTokenQuery\n\n\tbody, err := in.JSON(r, &accessTokenQuery)\n\tif err != nil {\n\t\tout.JSON(w, 400, err)\n\t\treturn\n\t}\n\n\tresult := core.ValidateJSON(\"auth\", \"accessTokenQuery\", string(body))\n\tif !result.Valid {\n\t\tout.JSON(w, 422, result.Errors)\n\t\treturn\n\t}\n\n\tuser := userSrv.Login(accessTokenQuery.Username, accessTokenQuery.Password)\n\tif user == nil {\n\t\tout.Unauthorized(w)\n\t\treturn\n\t}\n\ttoken := authSrv.GenerateToken(user.ID, user.Roles)\n\tout.JSON(w, 200, token)\n}", "func newClient(token string) *github.Client {\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: token},\n\t)\n\ttc := oauth2.NewClient(context.Background(), ts)\n\n\treturn github.NewClient(tc)\n}", "func NewMockAccessToken(ctrl *gomock.Controller) *MockAccessToken {\n\tmock := &MockAccessToken{ctrl: ctrl}\n\tmock.recorder = &MockAccessTokenMockRecorder{mock}\n\treturn mock\n}", "func GenAuthTokenHandler(c *gin.Context) {\r\n\t// Create a new token object, specifying signing method and the claims\r\n\t// you would like it to contain.\r\n\r\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\r\n\t\t\"foo\": \"bar\",\r\n\t\t\"expire\": func() int64 {\r\n\t\t\tnow := time.Now()\r\n\t\t\tduration, _ := time.ParseDuration(\"14d\")\r\n\t\t\tm1 := now.Add(duration)\r\n\t\t\treturn m1.Unix()\r\n\t\t}(),\r\n\t})\r\n\r\n\t// Sign and get the complete encoded token as a string using the secret\r\n\ttokenString, err := token.SignedString([]byte(utils.AppConfig.Server.SecretKey))\r\n\r\n\tfmt.Println(tokenString, err)\r\n\tc.String(http.StatusOK, tokenString)\r\n}", "func NewFactory(config io.Reader) (admission.Interface, error) {\n\treturn New()\n}", "func GenerateAccessToken(user models.User) string {\n\tclaims := AccessTokenClaims{\n\t\tuser.ID,\n\t\tuser.Admin,\n\t\tuser.Helper,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Minute * 15).Unix(), // 15 minutes\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS384, claims)\n\tres, _ := token.SignedString([]byte(os.Getenv(\"ACCESS_TOKEN_SECRET\")))\n\n\treturn res\n}", "func CreateAccessToken(clientID string, userID string, expiresIn int, scope string) *AccessToken {\n\treturn &AccessToken{\n\t\tClientID: clientID,\n\t\tUserID: userID,\n\t\tIAT: time.Now().Unix(),\n\t\tExpiresAt: time.Now().Add(time.Second * time.Duration(expiresIn)).Unix(),\n\t\tScope: scope,\n\t}\n}", "func (api42 *API42) NewToken() {\n\tvar err error\n\n\turlAuth, _ := url.Parse(cst.AuthURL)\n\tparamAuth := url.Values{}\n\tparamAuth.Add(cst.AuthVarClt, api42.keys.uid)\n\tparamAuth.Add(cst.AuthVarRedirectURI, cst.AuthValRedirectURI)\n\tparamAuth.Add(cst.AuthVarRespType, cst.AuthValRespType)\n\turlAuth.RawQuery = paramAuth.Encode()\n\n\tlog.Info().Msg(\"Need new access token\")\n\tfmt.Print(\"Please, enter the following URL in your web browser, authenticate and authorize:\\n\" + urlAuth.String() + \"\\nPaste the code generated (input hidden):\\n\")\n\n\tcode := tools.ReadAndHideData()\n\tcode = strings.TrimSpace(code)\n\n\ttokenData := tokenReqNew{\n\t\tTokenGrant: cst.TokenReqGrantAuthCode,\n\t\tTokenCltID: api42.keys.uid,\n\t\tTokenCltSecret: api42.keys.secret,\n\t\tTokenCode: code,\n\t\tTokenRedirect: cst.TokenReqRedirectURI,\n\t}\n\n\ttokenJSON, _ := json.Marshal(tokenData)\n\n\trsp, err := http.Post(cst.TokenURL, \"application/json\", bytes.NewBuffer(tokenJSON))\n\tif err != nil {\n\t\tlog.Fatal().Err(err).Msg(\"Failed to retrieve access token\")\n\t}\n\tdefer rsp.Body.Close()\n\n\tvar rspJSON tokenRsp\n\tdecoder := json.NewDecoder(rsp.Body)\n\tdecoder.DisallowUnknownFields()\n\tif err = decoder.Decode(&rspJSON); err != nil {\n\t\tlog.Fatal().Err(err).Msg(\"Failed to decode JSON values of the new access token\")\n\t}\n\n\tapi42.setNewToken(rspJSON.TokenAccess, rspJSON.TokenRefresh)\n}", "func newAccessTokenDBSession(session *gocql.Session) *accessTokenDBSession {\n\treturn &accessTokenDBSession{\n\t\tsession: session,\n\t}\n}", "func GetAccessToken(config provider.ProviderConfig, r *http.Request) (token Token, err error) {\n\tqueryValues, err := query.Values(accessTokenRequest{\n\t\tClientId: config.Key,\n\t\tClientSecret: config.Secret,\n\t\tCode: r.URL.Query().Get(\"code\"),\n\t\tGrantType: \"authorization_code\",\n\t\tRedirectURI: genCallbackURL(config, r),\n\t})\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresp, err := http.PostForm(config.Provider.AccessURL, queryValues)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvalues, err := url.ParseQuery(string(body))\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif _, ok := values[\"error\"]; ok == true {\n\t\terr = Error{\n\t\t\tCode: values[\"error\"][0],\n\t\t\tDescription: values[\"error_description\"][0],\n\t\t\tURI: values[\"error_uri\"][0],\n\t\t}\n\t\treturn\n\t}\n\n\t// everything went A-OK!\n\ttoken.AccessToken = values[\"access_token\"][0]\n\ttoken.Type = values[\"token_type\"][0]\n\n\t// TODO: maybe store the scope with the state token and set it there if not returned\n\t// by the service\n\tif scope, ok := values[\"scope\"]; ok == true {\n\t\ttoken.Scope = strings.Split(scope[0], config.Provider.ScopeDelimiter)\n\t}\n\n\treturn\n}", "func (app AppModule) NewHandler() sdk.Handler {\n\treturn NewHandler(app.msKeeper, app.poaKeeper)\n}", "func OAuth2AccessTokenMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttok, tokType, err := readBearerToken(r)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"OAuth2TokenMiddleware: error in readBearerToken: %s.\", err)\n\t\t\thttp.Error(w, \"error reading access token from HTTP request\", http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\n\t\tif len(tok) == 255 {\n\t\t\tlog.Println(\"WARNING: The server received an OAuth2 access token that is exactly 255 characters long. This may indicate that the client's version of libcurl is older than 7.33.0 and does not support longer passwords in HTTP Basic auth. Sourcegraph's access tokens may exceed 255 characters, in which case libcurl will truncate them and auth will fail. If you notice auth failing, try upgrading both the OpenSSL and GnuTLS flavours of libcurl to a version 7.33.0 or greater. If that doesn't solve the issue, please report it.\")\n\t\t}\n\n\t\tif tok != \"\" {\n\t\t\tctx := httpctx.FromRequest(r)\n\t\t\tctx = sourcegraph.WithCredentials(ctx, oauth2.StaticTokenSource(&oauth2.Token{AccessToken: tok, TokenType: tokType}))\n\t\t\thttpctx.SetForRequest(r, ctx)\n\n\t\t\t// Vary based on Authorization header if the request is\n\t\t\t// operating with any level of authorization, so that the\n\t\t\t// response can't be cached and mixed in with unauthorized\n\t\t\t// responses in an HTTP cache.\n\t\t\tw.Header().Add(\"vary\", \"Authorization\")\n\t\t}\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func BuildTokenHandler(srv *server.Server) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) error {\n\t\tif err := srv.HandleTokenRequest(w, r); err != nil {\n\t\t\treturn apperrors.Wrap(err)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn httpjson.HandlerFunc(fn)\n}", "func createOAuthClient(token string) *http.Client {\n\tctx := context.Background()\n\tsrc := oauth2.StaticTokenSource(&oauth2.Token{\n\t\tAccessToken: token,\n\t})\n\n\treturn oauth2.NewClient(ctx, src)\n}", "func CreateOrphanTokenHandler(resWriter http.ResponseWriter, req *http.Request) {\n\n\tcorsHandler(resWriter, req)\n\n\t// first lets get the credentials off the request\n\tvaultCredentials, err := authenticator.GetCredentials(req)\n\tif err != nil {\n\t\twriteHTTPResponse(resWriter, \"error\", \"\", \"Bad Request: auth required\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// lets get our createTokenPayload struct\n\tpayload, err := extractCreateTokenPayload(&resWriter, req)\n\tif err != nil {\n\t\tlog.Error(\"Invalid payload: \" + err.Error())\n\t\twriteHTTPResponse(resWriter, \"error\", \"\", err.Error(), http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t// must have at least one policy\n\tif len(payload.Policies) == 0 {\n\t\twriteHTTPResponse(resWriter, \"error\", \"\", \"one or more vault 'policies' are required\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// auth the actual user against value and get\n\t// the client access/auth token which we can then\n\t// use to create the actual orphan token\n\tuserToken, err := authenticator.Auth(vaultCredentials)\n\tif err != nil {\n\t\tlog.Error(\"Failed to authenticated againsg vault w/ VaultCredentials: \" + err.Error())\n\t\twriteHTTPResponse(resWriter, \"error\", \"\", err.Error(), http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\ttoken, err := createOrphanToken(userToken, payload)\n\tif err != nil {\n\t\tlog.Error(\"Failed to create orphan token: \" + err.Error())\n\t\twriteHTTPResponse(resWriter, \"error\", \"\", err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\twriteHTTPResponse(resWriter, \"ok\", token,\n\t\tfmt.Sprintf(\"renewable:%v period:%v policies:%v\",\n\t\t\tpayload.Renewable,\n\t\t\tpayload.Period,\n\t\t\tpayload.Policies), http.StatusOK)\n\n}", "func (o *oauth) tokenHandler(w http.ResponseWriter, r *http.Request) {\n\tw = &rememberingWriter{ResponseWriter: w}\n\n\t// This block is copied from o.server.HandleTokenRequest\n\t// We needed to inspect what's going on a bit.\n\tgt, tgr, verr := o.server.ValidationTokenRequest(r)\n\tif verr != nil {\n\t\tencodeError(w, verr)\n\t\treturn\n\t}\n\tti, verr := o.server.GetAccessToken(gt, tgr)\n\tif verr != nil {\n\t\tencodeError(w, verr)\n\t\treturn\n\t}\n\tdata := o.server.GetTokenData(ti)\n\tbs, err := json.Marshal(data)\n\tif err != nil {\n\t\tencodeError(w, err)\n\t\treturn\n\t}\n\t// (end of copy)\n\n\t// HandleTokenRequest currently returns nil even if the token request\n\t// failed. That menas we can't clearly know if token generation passed or failed.\n\t//\n\t// So we need to find out if an error is written, which we can\n\t// infer by w.WriteHeader call (a 4xx or 5xx status code).\n\tif ww, ok := w.(*rememberingWriter); ok && ww.statusCode > 400 { // wrote error\n\t\ttokenGenerations.Add(1)\n\t\tw.Header().Set(\"X-User-Id\", ti.GetUserID()) // only on non-errors\n\t}\n\n\t// Write our response\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(bs)\n}", "func newClient(conf Config) (*github.Client, error) {\n\tctx := context.Background()\n\n\tvar ts oauth2.TokenSource\n\tswitch {\n\tcase conf.HasAPIToken():\n\t\tts = oauth2.StaticTokenSource(\n\t\t\t&oauth2.Token{AccessToken: conf.GetAPIToken()},\n\t\t)\n\tdefault:\n\t\treturn nil, errors.New(\"Cannot find GitHub credentials\")\n\t}\n\n\ttc := oauth2.NewClient(ctx, ts)\n\treturn github.NewClient(tc), nil\n}", "func sideTwistHandlerFactory(responseWrapper responseWrapperFunc, templatePath string, encryptFn, decryptFn encryptFunc) *SideTwistHandler {\n\treturn &SideTwistHandler{\n\t\ttemplatePath: templatePath,\n\t\tcommandNumbers: make(map[string]int),\n\t\tpendingCommandOutput: make(map[string]map[int]bool),\n\t\tpendingUploads: make(map[string]map[int]string),\n\t\tresponseWrapper: responseWrapper,\n\t\tencryptFn: encryptFn,\n\t\tdecryptFn: decryptFn,\n\t}\n}", "func newVaultAuthConfigHandler(secretName string, vaultClient vault.Client) VaultAuthConfigHandler {\n\treturn VaultAuthConfigHandler{\n\t\tsecretName: secretName,\n\t\tvaultClient: vaultClient,\n\t}\n}", "func (K *KWAPI) newToken(username, password string) (auth *KWAuth, err error) {\n\n\tpath := fmt.Sprintf(\"https://%s/oauth/token\", K.Server)\n\n\treq, err := http.NewRequest(http.MethodPost, path, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thttp_header := make(http.Header)\n\thttp_header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\thttp_header.Set(\"User-Agent\", K.AgentString)\n\n\treq.Header = http_header\n\n\tclient_id := K.ApplicationID\n\n\tpostform := &url.Values{\n\t\t\"client_id\": {client_id},\n\t\t\"client_secret\": {K.secrets.decrypt(K.secrets.client_secret_key)},\n\t\t\"redirect_uri\": {K.RedirectURI},\n\t}\n\n\tif password != NONE {\n\t\tpostform.Add(\"grant_type\", \"password\")\n\t\tpostform.Add(\"username\", username)\n\t\tpostform.Add(\"password\", password)\n\t} else {\n\t\tsignature := K.secrets.decrypt(K.secrets.signature_key)\n\t\trandomizer := rand.New(rand.NewSource(int64(time.Now().Unix())))\n\t\tnonce := randomizer.Int() % 999999\n\t\ttimestamp := int64(time.Now().Unix())\n\n\t\tbase_string := fmt.Sprintf(\"%s|@@|%s|@@|%d|@@|%d\", client_id, username, timestamp, nonce)\n\n\t\tmac := hmac.New(sha1.New, []byte(signature))\n\t\tmac.Write([]byte(base_string))\n\t\tsignature = hex.EncodeToString(mac.Sum(nil))\n\n\t\tauth_code := fmt.Sprintf(\"%s|@@|%s|@@|%d|@@|%d|@@|%s\",\n\t\t\tbase64.StdEncoding.EncodeToString([]byte(client_id)),\n\t\t\tbase64.StdEncoding.EncodeToString([]byte(username)),\n\t\t\ttimestamp, nonce, signature)\n\n\t\tpostform.Add(\"grant_type\", \"authorization_code\")\n\t\tpostform.Add(\"code\", auth_code)\n\n\t}\n\n\tif K.Snoop {\n\t\tStdout(\"\\n[kiteworks]: %s\\n--> ACTION: \\\"POST\\\" PATH: \\\"%s\\\"\", username, path)\n\t\tfor k, v := range *postform {\n\t\t\tif k == \"grant_type\" || k == \"redirect_uri\" || k == \"scope\" {\n\t\t\t\tStdout(\"\\\\-> POST PARAM: %s VALUE: %s\", k, v)\n\t\t\t} else {\n\t\t\t\tStdout(\"\\\\-> POST PARAM: %s VALUE: [HIDDEN]\", k)\n\t\t\t}\n\t\t}\n\t}\n\n\treq.Body = ioutil.NopCloser(bytes.NewReader([]byte(postform.Encode())))\n\n\tclient := K.Session(username).NewClient()\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := K.decodeJSON(resp, &auth); err != nil {\n\t\treturn nil, err\n\t}\n\n\tauth.Expires = auth.Expires + time.Now().Unix()\n\treturn\n}", "func getClient(config *oauth2.Config) *http.Client {\n // The file token.json stores the user's access and refresh tokens, and is\n // created automatically when the authorization flow completes for the first\n // time.\n tokFile := \"token.json\"\n tok, err := tokenFromFile(tokFile)\n if err != nil {\n tok = getTokenFromWeb(config)\n saveToken(tokFile, tok)\n }\n return config.Client(context.Background(), tok)\n}", "func client(accessToken string) *github.Client {\n\tif c == nil {\n\t\t// log.Println(\"create auth client\")\n\t\tctx := context.Background()\n\t\tts := oauth2.StaticTokenSource(\n\t\t\t&oauth2.Token{AccessToken: accessToken},\n\t\t)\n\t\ttc := oauth2.NewClient(ctx, ts)\n\t\tc = github.NewClient(tc)\n\t}\n\treturn c\n}", "func AccessToken(code string, w http.ResponseWriter,r *http.Request) Access {\n\tswitch os.Getenv(\"connection\") {\n\tcase \"DEV\":\n\t\tlink = \"https://auth-dev.vatsim.net/oauth/token\"\n\tcase \"LIVE\":\n\t\tlink = \"https://auth.vatsim.net/oauth/token\"\n\tdefault:\n\t\thttp.Redirect(w, r, \"/\", http.StatusBadRequest)\n\t}\n\n\n\tdata := url.Values{}\n\tdata.Set(\"grant_type\", \"authorization_code\")\n\tdata.Set(\"client_id\", os.Getenv(\"client_id\"))\n\tdata.Set(\"client_secret\", os.Getenv(\"secret\"))\n\tdata.Set(\"redirect_uri\", os.Getenv(\"redirect\"))\n\tdata.Set(\"code\", code)\n\n\trequest, requestError := http.PostForm(link, data)\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\n\tif requestError != nil {\n\t\tlog.Fatal(requestError)\n\t}\n\n\n\tdefer request.Body.Close()\n\n\tbody, errorReading := ioutil.ReadAll(request.Body)\n\tif errorReading != nil {\n\t\tlog.Fatal(errorReading)\n\t}\n\n\tvar res Access\n\terrDecoding := json.Unmarshal(body, &res)\n\n\tif errDecoding != nil {\n\t\tlog.Fatal(errDecoding)\n\t}\n\n\treturn res\n}", "func (o OktaAuthPlugin) MakeAccessToken(creds Credentials) (string, error) {\n\tclientID := creds.ClientID\n\n\tif clientID == \"\" {\n\t\treturn \"\", fmt.Errorf(\"client ID required\")\n\t}\n\n\tif creds.ClientSecret == \"\" {\n\t\treturn \"\", fmt.Errorf(\"client secret required\")\n\t}\n\n\tclientCreds := client.Credentials{ClientID: clientID, ClientSecret: creds.ClientSecret}\n\tot, err := o.backend.RequestAccessToken(clientCreds)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn ot.AccessToken, nil\n}", "func New(cfg config.Proxy, bp httputil.BufferPool, token ntokend.TokenProvider, access service.AccessProvider, role service.RoleProvider, svcCert service.SvcCertProvider) Handler {\n\treturn &handler{\n\t\tproxy: &httputil.ReverseProxy{\n\t\t\tBufferPool: bp,\n\t\t},\n\t\ttoken: token,\n\t\taccess: access,\n\t\trole: role,\n\t\tcfg: cfg,\n\t\tsvcCert: svcCert,\n\t}\n}", "func New(token string, appid string, secret string) *Weixin {\n\twx := &Weixin{}\n\twx.token = token\n\tif len(appid) > 0 && len(secret) > 0 {\n\t\twx.tokenChan = make(chan accessToken)\n\t\tgo createAccessToken(wx.tokenChan, appid, secret)\n\t}\n\treturn wx\n}", "func ProfileAccessTokensGetHandler(appCtx *appctx.Context) http.Handler {\n\thandlerFunc := func(w http.ResponseWriter, r *http.Request, user *models.User) {\n\t\taccessTokens, err := appCtx.TokensService().GetUserTokens(user.ID)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tappCtx.RequestLogger().Error(r, err)\n\t\t\treturn\n\t\t}\n\n\t\tdata := struct {\n\t\t\tAccessTokens []*models.AccessToken\n\t\t}{\n\t\t\tAccessTokens: accessTokens,\n\t\t}\n\t\trenderWithBase(r, w, appCtx, user, \"profile_access_tokens.html\", data)\n\t}\n\n\thandler := auth.WithUserOrRedirect(handlerFunc, appCtx)\n\n\thandler = middleware.HTMLHeaders(handler)\n\n\treturn handler\n}", "func (c *config) newClientToken(token, secret string) *internal.Client {\n\treturn internal.NewClientToken(\n\t\tc.API,\n\t\tc.Client,\n\t\tc.Secret,\n\t\t&oauth2.Token{\n\t\t\tAccessToken: token,\n\t\t\tRefreshToken: secret,\n\t\t},\n\t\ttoken,\n\t)\n}", "func NewHandler(am AccountKeeper) sdk.Handler {\n\treturn func(ctx sdk.Context, msg sdk.Msg) sdk.Result {\n\t\tswitch msg := msg.(type) {\n\t\tcase types.TransferMsg:\n\t\t\treturn handleTransferMsg(ctx, am, msg)\n\t\tcase types.TransferV2Msg:\n\t\t\treturn handleTransferV2Msg(ctx, am, msg)\n\t\tcase types.RecoverMsg:\n\t\t\treturn handleRecoverMsg(ctx, am, msg)\n\t\tcase types.RegisterV2Msg:\n\t\t\treturn handleRegisterV2Msg(ctx, am, msg)\n\t\tcase types.UpdateAccountMsg:\n\t\t\treturn handleUpdateAccountMsg(ctx, am, msg)\n\t\tdefault:\n\t\t\terrMsg := fmt.Sprintf(\"Unrecognized account msg type: %v\", reflect.TypeOf(msg).Name())\n\t\t\treturn sdk.ErrUnknownRequest(errMsg).Result()\n\t\t}\n\t}\n}", "func (client *FactoriesClient) getGitHubAccessTokenHandleResponse(resp *http.Response) (FactoriesClientGetGitHubAccessTokenResponse, error) {\n\tresult := FactoriesClientGetGitHubAccessTokenResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GitHubAccessTokenResponse); err != nil {\n\t\treturn FactoriesClientGetGitHubAccessTokenResponse{}, err\n\t}\n\treturn result, nil\n}", "func (c Client) CreateAccessToken(state string) (oauth2.Token, error) {\n\tvar token oauth2.Token\n\trequest := createAccessTokenRequest{State: state}\n\n\tvar payload bytes.Buffer\n\terr := jsonapi.MarshalOnePayloadWithoutIncluded(&payload, &request)\n\tif err != nil {\n\t\treturn token, err\n\t}\n\n\tresp, err := c.post(\"/access_tokens\", &payload)\n\tif err != nil {\n\t\treturn token, err\n\t}\n\n\tif resp.StatusCode != http.StatusCreated {\n\t\treturn token, parseError(resp.Body)\n\t}\n\n\terr = json.NewDecoder(resp.Body).Decode(&token)\n\treturn token, err\n}", "func New(c Config) Client {\n\t// Generate secret proof. See https://developers.facebook.com/docs/graph-api/securing-requests/#appsecret_proof\n\tmac := hmac.New(sha256.New, []byte(c.Secret))\n\tmac.Write([]byte(c.Token))\n\n\tapi := strings.TrimSuffix(c.API, \"/\")\n\tif api == \"\" {\n\t\tapi = defaultAPI\n\t}\n\n\treturn Client{\n\t\ttoken: c.Token,\n\t\tsecretProof: hex.EncodeToString(mac.Sum(nil)),\n\t\tapi: api,\n\t}\n}", "func mockOAuthServer() *httptest.Server {\n\t// prepare a port for the mocked server\n\tserver := httptest.NewUnstartedServer(http.DefaultServeMux)\n\n\t// mock the used REST path for the tests\n\tmockedHandler := http.NewServeMux()\n\tmockedHandler.HandleFunc(\"/.well-known/openid-configuration\", func(writer http.ResponseWriter, request *http.Request) {\n\t\ts := fmt.Sprintf(`{\n \"issuer\":\"%s\",\n \"authorization_endpoint\":\"%s/authorize\",\n \"token_endpoint\":\"%s/oauth/token\",\n \"device_authorization_endpoint\":\"%s/oauth/device/code\"\n}`, server.URL, server.URL, server.URL, server.URL)\n\t\tfmt.Fprintln(writer, s)\n\t})\n\tmockedHandler.HandleFunc(\"/oauth/token\", func(writer http.ResponseWriter, request *http.Request) {\n\t\tfmt.Fprintln(writer, \"{\\n \\\"access_token\\\": \\\"token-content\\\",\\n \\\"token_type\\\": \\\"Bearer\\\"\\n}\")\n\t})\n\tmockedHandler.HandleFunc(\"/authorize\", func(writer http.ResponseWriter, request *http.Request) {\n\t\tfmt.Fprintln(writer, \"true\")\n\t})\n\n\tserver.Config.Handler = mockedHandler\n\tserver.Start()\n\n\treturn server\n}", "func RegisterTokenHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TokenClient) error {\n\n\tmux.Handle(\"POST\", pattern_Token_Allowance_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_Allowance_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_Allowance_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_Approve_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_Approve_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_Approve_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_ApproveAndCall_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_ApproveAndCall_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_ApproveAndCall_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_BalanceOf_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_BalanceOf_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_BalanceOf_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_Burn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_Burn_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_Burn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_BurnFrom_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_BurnFrom_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_BurnFrom_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_Name_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_Name_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_Name_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_TotalSupply_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_TotalSupply_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_TotalSupply_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_Transfer_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_Transfer_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_Transfer_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_TransferFrom_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_TransferFrom_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_TransferFrom_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_OnApproval_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_OnApproval_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_OnApproval_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_OnBurn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_OnBurn_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_OnBurn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_OnTransfer_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_OnTransfer_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_OnTransfer_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\treturn nil\n}", "func (r *oauthProxy) oauthCallbackHandler(w http.ResponseWriter, req *http.Request) {\n\tctx, span, logger := r.traceSpan(req.Context(), \"oauthCallbackHandler\")\n\tif span != nil {\n\t\tdefer span.End()\n\t}\n\n\tif r.config.SkipTokenVerification {\n\t\tr.errorResponse(w, req.WithContext(ctx), \"\", http.StatusNotAcceptable, nil)\n\t\treturn\n\t}\n\t// step: ensure we have a authorization code\n\tcode := req.URL.Query().Get(\"code\")\n\tif code == \"\" {\n\t\tr.errorResponse(w, req.WithContext(ctx), \"no code in query\", http.StatusBadRequest, nil)\n\t\treturn\n\t}\n\n\tclient, err := r.getOAuthClient(r.getRedirectionURL(w, req.WithContext(ctx)))\n\tif err != nil {\n\t\tr.errorResponse(w, req.WithContext(ctx), \"unable to create a oauth2 client\", http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tresp, err := exchangeAuthenticationCode(client, code)\n\tif err != nil {\n\t\tr.accessForbidden(w, req.WithContext(ctx), \"unable to exchange code for access token\", err.Error())\n\t\treturn\n\t}\n\n\t// Flow: once we exchange the authorization code we parse the ID Token; we then check for an access token,\n\t// if an access token is present and we can decode it, we use that as the session token, otherwise we default\n\t// to the ID Token.\n\ttoken, identity, err := parseToken(resp.IDToken)\n\tif err != nil {\n\t\tr.accessForbidden(w, req.WithContext(ctx), \"unable to parse ID token for identity\", err.Error())\n\n\t\treturn\n\t}\n\taccess, id, err := parseToken(resp.AccessToken)\n\tif err == nil {\n\t\ttoken = access\n\t\tidentity = id\n\t} else {\n\t\tlogger.Warn(\"unable to parse the access token, using id token only\", zap.Error(err))\n\t}\n\n\t// step: check the access token is valid\n\tif err = r.verifyToken(r.client, token); err != nil {\n\t\t// if not, we may have a valid session but fail to match extra criteria: logout first so the user does not remain\n\t\t// stuck with a valid session, but no access\n\t\tvar sessionToken string\n\t\tif resp.RefreshToken != \"\" {\n\t\t\tsessionToken = resp.RefreshToken\n\t\t} else {\n\t\t\tsessionToken = resp.IDToken\n\t\t}\n\t\tr.commonLogout(ctx, w, req, sessionToken, func(ww http.ResponseWriter) {\n\t\t\t// always return an error after logout in this case\n\t\t\tr.accessForbidden(w, req.WithContext(ctx), \"unable to verify the ID token\", err.Error())\n\t\t}, logger.With(zap.String(\"email\", identity.Email)))\n\n\t\treturn\n\t}\n\taccessToken := token.Encode()\n\n\t// step: are we encrypting the access token?\n\tif r.config.EnableEncryptedToken || r.config.ForceEncryptedCookie {\n\t\tif accessToken, err = encodeText(accessToken, r.config.EncryptionKey); err != nil {\n\t\t\tr.errorResponse(w, req.WithContext(ctx), \"unable to encode the access token\", http.StatusInternalServerError, err)\n\n\t\t\treturn\n\t\t}\n\t}\n\n\tlogger.Info(\"issuing access token for user\",\n\t\tzap.String(\"email\", identity.Email),\n\t\tzap.String(\"expires\", identity.ExpiresAt.Format(time.RFC3339)),\n\t\tzap.String(\"duration\", time.Until(identity.ExpiresAt).String()))\n\n\t// @metric a token has been issued\n\toauthTokensMetric.WithLabelValues(\"issued\").Inc()\n\n\t// step: does the response have a refresh token and we do NOT ignore refresh tokens?\n\tif r.config.EnableRefreshTokens && resp.RefreshToken != \"\" {\n\t\tvar encrypted string\n\t\tencrypted, err = encodeText(resp.RefreshToken, r.config.EncryptionKey)\n\t\tif err != nil {\n\t\t\tr.errorResponse(w, req.WithContext(ctx), \"failed to encrypt the refresh token\", http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\t// drop in the access token - cookie expiration = access token\n\t\tr.dropAccessTokenCookie(req.WithContext(ctx), w, accessToken, r.getAccessCookieExpiration(token, resp.RefreshToken))\n\n\t\tswitch r.useStore() {\n\t\tcase true:\n\t\t\tif err = r.StoreRefreshToken(token, encrypted); err != nil {\n\t\t\t\tlogger.Warn(\"failed to save the refresh token in the store\", zap.Error(err))\n\t\t\t}\n\t\tdefault:\n\t\t\t// notes: not all idp refresh tokens are readable, google for example, so we attempt to decode into\n\t\t\t// a jwt and if possible extract the expiration, else we default to 10 days\n\t\t\tif _, ident, err := parseToken(resp.RefreshToken); err != nil {\n\t\t\t\tr.dropRefreshTokenCookie(req.WithContext(ctx), w, encrypted, 0)\n\t\t\t} else {\n\t\t\t\tr.dropRefreshTokenCookie(req.WithContext(ctx), w, encrypted, time.Until(ident.ExpiresAt))\n\t\t\t}\n\t\t}\n\t} else {\n\t\tr.dropAccessTokenCookie(req.WithContext(ctx), w, accessToken, time.Until(identity.ExpiresAt))\n\t}\n\n\t// step: decode the request variable\n\tredirectURI := \"/\"\n\tif req.URL.Query().Get(\"state\") != \"\" {\n\t\t// if the authorization has set a state, we now check if the calling client\n\t\t// requested a specific landing URL to end the authentication handshake\n\t\tif encodedRequestURI, _ := req.Cookie(requestURICookie); encodedRequestURI != nil {\n\t\t\t// some clients URL-escape padding characters\n\t\t\tunescapedValue, err := url.PathUnescape(encodedRequestURI.Value)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Warn(\"app did send a corrupted redirectURI in cookie: invalid url espcaping\", zap.Error(err))\n\t\t\t}\n\t\t\t// Since the value is passed with a cookie, we do not expect the client to use base64url (but the\n\t\t\t// base64-encoded value may itself be url-encoded).\n\t\t\t// This is safe for browsers using atob() but needs to be treated with care for nodeJS clients,\n\t\t\t// which natively use base64url encoding, and url-escape padding '=' characters.\n\t\t\tdecoded, err := base64.StdEncoding.DecodeString(unescapedValue)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Warn(\"app did send a corrupted redirectURI in cookie: invalid base64url encoding\",\n\t\t\t\t\tzap.Error(err),\n\t\t\t\t\tzap.String(\"encoded_value\", unescapedValue))\n\t\t\t}\n\t\t\tredirectURI = string(decoded)\n\t\t}\n\t}\n\n\tif r.config.BaseURI != \"\" {\n\t\t// assuming state starts with slash\n\t\tredirectURI = r.config.BaseURI + redirectURI\n\t}\n\n\tr.redirectToURL(redirectURI, w, req.WithContext(ctx), http.StatusTemporaryRedirect)\n}", "func NewTestServer(accessToken string, userInfo *UserInfo) *httptest.Server {\n\treturn httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == userInfoEndpointPath {\n\t\t\tif r.Header[\"Authorization\"][0] == \"Bearer \"+accessToken {\n\t\t\t\tpayload, _ := json.Marshal(&userInfo)\n\n\t\t\t\tw.WriteHeader(200)\n\t\t\t\tw.Write(payload)\n\t\t\t} else {\n\t\t\t\tw.WriteHeader(401)\n\t\t\t}\n\t\t} else {\n\t\t\tw.WriteHeader(404)\n\t\t}\n\t}))\n}", "func New(store stores.Store) (*Handler, error) {\n\tif !util.GetConfig().EnableDebugMode {\n\t\tgin.SetMode(gin.ReleaseMode)\n\t}\n\th := &Handler{\n\t\tstore: store,\n\t\tengine: gin.New(),\n\t}\n\tif err := h.setHandlers(); err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not set handlers\")\n\t}\n\tif util.GetConfig().AuthBackend == \"oauth\" {\n\t\tif !DoNotPrivateKeyChecking {\n\t\t\tif err := util.CheckForPrivateKey(); err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, \"could not check for private key\")\n\t\t\t}\n\t\t}\n\t\th.initOAuth()\n\t} else if util.GetConfig().AuthBackend == \"proxy\" {\n\t\th.initProxyAuth()\n\t}\n\treturn h, nil\n}", "func New(cognitoAPI cognitoidentityprovideriface.CognitoIdentityProviderAPI) (*Handler, error) {\n\treturn &Handler{\n\t\tcognitoAPI: cognitoAPI,\n\t}, nil\n}", "func SaveAccessToken(\n\tstate, code, expectedState, env, controllerURL string,\n\thosts map[string]string,\n\tfs FileSystem,\n\tclient ClientInterface,\n) error {\n\tif state != expectedState {\n\t\terr := errors.NewOAuthError(\"GoogleCallback\", fmt.Sprintf(\"invalid oauth state, expected '%s', got '%s'\", expectedState, state), http.StatusBadRequest)\n\t\treturn err\n\t}\n\n\turl := fmt.Sprintf(\"%s/access?code=%s\", controllerURL, code)\n\tresp, status, err := client.Get(url, hosts[\"controller\"])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif status != http.StatusOK {\n\t\treturn errors.NewOAuthError(\"GoogleCallback\", string(resp), status)\n\t}\n\n\tvar bodyObj map[string]interface{}\n\tjson.Unmarshal(resp, &bodyObj)\n\ttoken := bodyObj[\"token\"].(string)\n\n\tc := NewConfig(env, token, controllerURL, hosts)\n\terr = c.Write(fs)\n\treturn err\n}", "func NewHandler(keeper Keeper) sdk.Handler {\n\treturn func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) {\n\t\tctx = ctx.WithEventManager(sdk.NewEventManager())\n\t\tswitch msg := msg.(type) {\n\t\tcase MsgSetPrivate:\n\t\t\treturn handleMsgSetPrivate(ctx, keeper, msg)\n\t\tcase MsgSetPublic:\n\t\t\treturn handleMsgSetPublic(ctx, keeper, msg)\n\t\tdefault:\n\t\t\terrMsg := fmt.Sprintf(\"unrecognized %s message type: %T\", ModuleName, msg)\n\t\t\treturn nil, sdkerrors.Wrap(sdkerrors.ErrUnknownRequest, errMsg)\n\t\t}\n\t}\n}", "func AccessTokenRequest(data utils.H) (utils.H, error) {\n\tvar user models.User\n\tvar client models.Client\n\n\tvar code string\n\tvar redirectURI string\n\n\tif data[\"code\"] == nil || data[\"redirect_uri\"] == nil || data[\"client\"] == nil {\n\t\treturn invalidRequestResult(\"\")\n\t}\n\n\tredirectURI = data[\"redirect_uri\"].(string)\n\tcode = data[\"code\"].(string)\n\tclient = data[\"client\"].(models.Client)\n\n\tauthorizationSession := services.FindSessionByToken(code, models.GrantToken)\n\tdefer services.InvalidateSession(authorizationSession)\n\tif authorizationSession.ID == 0 {\n\t\treturn invalidGrantResult(\"\")\n\t}\n\tuser = authorizationSession.User\n\tuser = services.FindUserByPublicID(user.PublicID)\n\tif authorizationSession.Client.ID != client.ID {\n\t\treturn invalidGrantResult(\"\")\n\t}\n\tif !slices.Contains(authorizationSession.Client.RedirectURI, redirectURI) {\n\t\treturn invalidGrantResult(\"\")\n\t}\n\n\taccessToken := services.CreateSession(user,\n\t\tclient,\n\t\tauthorizationSession.IP,\n\t\tauthorizationSession.UserAgent,\n\t\tauthorizationSession.Scopes,\n\t\tmodels.AccessToken)\n\trefreshToken := services.CreateSession(user,\n\t\tclient,\n\t\tauthorizationSession.IP,\n\t\tauthorizationSession.UserAgent,\n\t\tauthorizationSession.Scopes,\n\t\tmodels.RefreshToken)\n\n\tif accessToken.ID == 0 || refreshToken.ID == 0 {\n\t\treturn serverErrorResult(\"\")\n\t}\n\n\treturn utils.H{\n\t\t\"user_id\": user.PublicID,\n\t\t\"access_token\": accessToken.Token,\n\t\t\"token_type\": \"Bearer\",\n\t\t\"expires_in\": accessToken.ExpiresIn,\n\t\t\"refresh_token\": refreshToken.Token,\n\t\t\"scope\": authorizationSession.Scopes,\n\t}, nil\n}", "func (j *JWT) CreateAccessToken(id int) (string, error) {\n\tjat := jwt.New(jwt.SigningMethodHS256)\n\tatClaims := jat.Claims.(jwt.MapClaims)\n\tatClaims[\"authorized\"] = true\n\tatClaims[\"access_uuid\"] = j.AccessUUID\n\tatClaims[\"id\"] = id\n\tatClaims[\"exp\"] = j.AtExpires\n\tvar err error\n\tif j.AccessToken, err = jat.SignedString([]byte(os.Getenv(\"JWT_ACCESS_TOKEN_SECRET\"))); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn j.AccessToken, nil\n}", "func TokenCreateHandler(ctx *gin.Context) {\n\tvar (\n\t\tinput *tokenCreateInput\n\t\tdb *gorm.DB\n\t\tapp *models.App\n\t\ttokenCreateSrv *service.TokenCreate\n\t\treadOnlyI8 int8\n\t\ttokenCreateValue interface{}\n\t\terr error\n\n\t\tcode = 400\n\t\treErrors map[string][]string\n\t\tsuccess bool\n\t\tdata interface{}\n\t)\n\n\tdefer func() {\n\t\tctx.JSON(code, &Response{\n\t\t\tRequestID: ctx.GetInt64(\"requestId\"),\n\t\t\tSuccess: success,\n\t\t\tErrors: reErrors,\n\t\t\tData: data,\n\t\t})\n\t}()\n\n\tinput = ctx.MustGet(\"inputParam\").(*tokenCreateInput)\n\tdb = ctx.MustGet(\"db\").(*gorm.DB)\n\tapp = ctx.MustGet(\"app\").(*models.App)\n\n\tif input.ReadOnly != nil && *input.ReadOnly {\n\t\treadOnlyI8 = 1\n\t}\n\n\ttokenCreateSrv = &service.TokenCreate{\n\t\tBaseService: service.BaseService{\n\t\t\tDB: db,\n\t\t},\n\t\tIP: input.IP,\n\t\tApp: app,\n\t\tPath: *input.Path,\n\t\tSecret: input.Secret,\n\t\tReadOnly: readOnlyI8,\n\t\tExpiredAt: input.ExpiredAt,\n\t\tAvailableTimes: *input.AvailableTimes,\n\t}\n\n\tif err := tokenCreateSrv.Validate(); !reflect.ValueOf(err).IsNil() {\n\t\treErrors = generateErrors(err, \"\")\n\t\treturn\n\t}\n\n\tif tokenCreateValue, err = tokenCreateSrv.Execute(context.Background()); err != nil {\n\t\treErrors = generateErrors(err, \"\")\n\t\treturn\n\t}\n\n\tdata = tokenResp(tokenCreateValue.(*models.Token))\n\tsuccess = true\n\tcode = 200\n}", "func NewAuthenticatorFake(current time.Time, validPeriod time.Duration) Authenticator {\n\ttokenizer := mdtest.NewCryptoTokenizerFake()\n\ttimer := mdtest.NewTimerFake(current)\n\treturn NewAuthenticator(tokenizer, timer, validPeriod)\n}", "func LoginV0Handler(config *types.ConfigMap) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\t\tdefer log.Debugf(\"LoginV0Handler Elapsed - %s\", time.Since(start))\n\n\t\t//Check for valid username and password\n\t\tusername, password, ok := r.BasicAuth()\n\t\tif !ok {\n\t\t\tsendResponse(http.StatusUnauthorized, \"\", types.RawAuthResponse{}, fmt.Errorf(\"Need valid username and password as basic auth\"), w)\n\t\t\treturn\n\t\t}\n\t\tuserDetailFromConfig, err := validateAndGetUser(config, username, password)\n\t\tif err != nil {\n\t\t\terrHandle(w, fmt.Sprintf(\"Unable to validate : %s\", err), \"Authentication failed\", 401)\n\t\t\treturn\n\t\t}\n\t\tuser := types.User{\n\t\t\tUsername: userDetailFromConfig.UserName,\n\t\t\tEMail: userDetailFromConfig.Email,\n\t\t\tUID: userDetailFromConfig.UID,\n\t\t\tGroups: userDetailFromConfig.Groups}\n\t\tif user.UID == \"\" {\n\t\t\tuser.UID = user.Username\n\t\t}\n\t\ttoken, err := auth.GenerateToken(user, \"\", auth.V0)\n\t\tif err != nil {\n\t\t\terrHandle(w, fmt.Sprintf(\"Something is wrong with auth token. : %s\", err), \"Authentication failed\", 401)\n\t\t\treturn\n\t\t}\n\n\t\tv1Token := types.V1Token{\n\t\t\tToken: token.JWT,\n\t\t\tExpiry: token.Expiry,\n\t\t}\n\n\t\tdata, _ := json.Marshal(v1Token)\n\t\tresponse := JSONResponse{}\n\t\tresponse.status = http.StatusCreated\n\t\tresponse.data = data\n\n\t\tresponse.Write(w)\n\t}\n}", "func NewHandler(keeper Keeper) sdk.Handler {\n\treturn func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) {\n\t\tctx = ctx.WithEventManager(sdk.NewEventManager())\n\n\t\tswitch msg := msg.(type) {\n\t\tcase types.MsgCreateSession:\n\t\t\treturn handleMsgCreateSession(ctx, keeper, msg)\n\t\tdefault:\n\t\t\treturn nil, sdkerrors.Wrap(sdkerrors.ErrUnknownRequest,\n\t\t\t\tfmt.Sprintf(\"unrecognized magpie message type: %v\", msg.Type()))\n\t\t}\n\t}\n}", "func NewHandler(keeper Keeper) sdk.Handler {\r\n\tms := NewServer(keepers)\r\n\r\n\treturn func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) {\r\n\t\tswitch msg := msg.(type) {\r\n\t\tcase *types.MsgCreateAgreement:\r\n\t\t\tres, err := ms.Create(sdk.WrapSDKContext(ctx), msg)\r\n\t\t\treturn sdk.WrapServiceResult(ctx, res, err)\r\n\r\n\t\tcase *types.MsgUpdateAgreement:\r\n\t\t\tres, err := ms.Update(sdk.WrapSDKContext(ctx), msg)\r\n\t\t\treturn sdk.WrapServiceResult(ctx, res, err)\r\n\r\n\t\tcase *types.MsgDeleteAgreement:\r\n\t\t\tres, err := ms.Delete(sdk.WrapSDKContext(ctx), msg)\r\n\t\t\treturn sdk.WrapServiceResult(ctx, res, err)\r\n\r\n\t\tcase *types.MsgRenewAgreement:\r\n\t\t\tres, err := ms.Renew(sdk.WrapSDKContext(ctx), msg)\r\n\t\t\treturn sdk.WrapServiceResult(ctx, res, err)\r\n\r\n\t\tcase *types.MsgAmendAgreement:\r\n\t\t\tres, err := ms.Amend(sdk.WrapSDKContext(ctx), msg)\r\n\t\t\treturn sdk.WrapServiceResult(ctx, res, err)\r\n\r\n\t\tcase *types.MsgTerminateAgreement:\r\n\t\t\tres, err := ms.Terminate(sdk.WrapSDKContext(ctx), msg)\r\n\t\t\treturn sdk.WrapServiceResult(ctx, res, err)\r\n\r\n\t\tcase *types.MsgExpireAgreement:\r\n\t\t\tres, err := ms.Expire(sdk.WrapSDKContext(ctx), msg)\r\n\t\t\treturn sdk.WrapServiceResult(ctx, res, err)\r\n\r\n\t\tdefault:\r\n\t\t\treturn nil, sdkerrors.ErrUnknownRequest\r\n\t\t}\r\n\t}\r\n}", "func newProxy(config *Config) (*oauthProxy, error) {\n\t// create the service logger\n\tlog, err := createLogger(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Info(\"starting the service\", zap.String(\"prog\", prog), zap.String(\"author\", author), zap.String(\"version\", version))\n\tsvc := &oauthProxy{\n\t\tconfig: config,\n\t\tlog: log,\n\t\tmetricsHandler: prometheus.Handler(),\n\t}\n\n\t// parse the upstream endpoint\n\tif svc.endpoint, err = url.Parse(config.Upstream); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// initialize the store if any\n\tif config.StoreURL != \"\" {\n\t\tif svc.store, err = createStorage(config.StoreURL); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// initialize the openid client\n\tif !config.SkipTokenVerification {\n\t\tif svc.client, svc.idp, svc.idpClient, err = svc.newOpenIDClient(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tlog.Warn(\"TESTING ONLY CONFIG - the verification of the token have been disabled\")\n\t}\n\n\tif config.ClientID == \"\" && config.ClientSecret == \"\" {\n\t\tlog.Warn(\"client credentials are not set, depending on provider (confidential|public) you might be unable to auth\")\n\t}\n\n\t// are we running in forwarding mode?\n\tif config.EnableForwarding {\n\t\tif err := svc.createForwardingProxy(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tif err := svc.createReverseProxy(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn svc, nil\n}", "func GenerateAccessToken(ID string) (string, error) {\n\n\texpirationTimeAccessToken := time.Now().Add(60 * time.Minute).Unix()\n\tAtJwtKey := []byte(\"my_secret_key\")\n\n\t// Declare the token with the algorithm used for signing, and the claims\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\ttoken.Header[\"kid\"] = \"signin_1\"\n\tclaims := token.Claims.(jwt.MapClaims)\n\tclaims[\"id\"] = ID\n\tclaims[\"exp\"] = expirationTimeAccessToken\n\n\t// Create the JWT string\n\ttokenString, err := token.SignedString(AtJwtKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn tokenString, nil\n}", "func NewHandler(dm DeveloperManager, am acc.AccountManager, gm *global.GlobalManager) sdk.Handler {\n\treturn func(ctx sdk.Context, msg sdk.Msg) sdk.Result {\n\t\tswitch msg := msg.(type) {\n\t\tcase DeveloperRegisterMsg:\n\t\t\treturn handleDeveloperRegisterMsg(ctx, dm, am, msg)\n\t\tcase DeveloperUpdateMsg:\n\t\t\treturn handleDeveloperUpdateMsg(ctx, dm, am, msg)\n\t\tcase GrantPermissionMsg:\n\t\t\treturn handleGrantPermissionMsg(ctx, dm, am, msg)\n\t\tcase PreAuthorizationMsg:\n\t\t\treturn handlePreAuthorizationMsg(ctx, dm, am, msg)\n\t\tcase DeveloperRevokeMsg:\n\t\t\treturn handleDeveloperRevokeMsg(ctx, dm, am, gm, msg)\n\t\tcase RevokePermissionMsg:\n\t\t\treturn handleRevokePermissionMsg(ctx, dm, am, msg)\n\t\tdefault:\n\t\t\terrMsg := fmt.Sprintf(\"Unrecognized developer msg type: %v\", reflect.TypeOf(msg).Name())\n\t\t\treturn sdk.ErrUnknownRequest(errMsg).Result()\n\t\t}\n\t}\n}", "func (client *Client) AccessTokenWithCallback(request *AccessTokenRequest, callback func(response *AccessTokenResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *AccessTokenResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.AccessToken(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func getAccessToken(tokenEndpoint, clientID string, codeVerifier string, authorizationCode string, callbackURL string) (*oauth2.Token, error) {\n\t// set the url and form-encoded data for the POST to the access token endpoint\n\tdata := fmt.Sprintf(\n\t\t\"grant_type=authorization_code&client_id=%s\"+\n\t\t\t\"&code_verifier=%s\"+\n\t\t\t\"&code=%s\"+\n\t\t\t\"&redirect_uri=%s\",\n\t\tclientID, codeVerifier, authorizationCode, callbackURL)\n\tpayload := strings.NewReader(data)\n\n\t// create the request and execute it\n\treq, _ := http.NewRequest(\"POST\", tokenEndpoint, payload)\n\treq.Header.Add(\"content-type\", \"application/x-www-form-urlencoded\")\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tfmt.Printf(\"snap: HTTP error: %s\", err)\n\t\treturn nil, err\n\t}\n\n\t// process the response\n\tdefer res.Body.Close()\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\t// unmarshal the json into a string map\n\tvar token oauth2.Token\n\terr = json.Unmarshal(body, &token)\n\tif err != nil {\n\t\tfmt.Printf(\"snap: JSON error: %s\", err)\n\t\treturn nil, err\n\t}\n\n\treturn &token, nil\n}", "func (client *FactoriesClient) getGitHubAccessTokenCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, gitHubAccessTokenRequest GitHubAccessTokenRequest, options *FactoriesClientGetGitHubAccessTokenOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/getGitHubAccessToken\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, gitHubAccessTokenRequest)\n}", "func newCallBackHandler() (raw.OnewayHandler, <-chan map[string]string) {\n\tserverCalledBack := make(chan map[string]string)\n\treturn func(ctx context.Context, body []byte) error {\n\t\tserverCalledBack <- extractBaggage(ctx)\n\t\treturn nil\n\t}, serverCalledBack\n}", "func NewTokenHandler(client users.Client, alg gojwt.Algorithm, expiryMinutes int) *TokenHandler {\n\treturn &TokenHandler{\n\t\tclient: client,\n\t\talg: alg,\n\t\texpiryMinutes: expiryMinutes,\n\t}\n}", "func NewBaseHandler(cfg *config.Config) *baseHandler {\n\t// Created a client by the given node address\n\trpcClient, err := rpc.Dial(cfg.NodeURL)\n\tif err != nil {\n\t\tlog.Fatal(\"failed to deal with ETH node\", err)\n\t}\n\tnodeClient := ethclient.NewClient(rpcClient)\n\n\t// Parse private key\n\td := new(big.Int).SetBytes(common.FromHex(cfg.PrivateKey))\n\tpkX, pkY := crypto.S256().ScalarBaseMult(d.Bytes())\n\tprivateKey := &ecdsa.PrivateKey{\n\t\tPublicKey: ecdsa.PublicKey{\n\t\t\tCurve: crypto.S256(),\n\t\t\tX: pkX,\n\t\t\tY: pkY,\n\t\t},\n\t\tD: d,\n\t}\n\n\t// Init from address\n\tpublicKey := privateKey.Public()\n\tpublicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey)\n\tif !ok {\n\t\tlog.Fatal(\"error casting public key to ECDSA\")\n\t}\n\tfromAddr := crypto.PubkeyToAddress(*publicKeyECDSA)\n\n\t// Create link token wrapper\n\tlinkToken, err := link.NewLinkToken(common.HexToAddress(cfg.LinkTokenAddr), nodeClient)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tapproveAmount := big.NewInt(0)\n\tapproveAmount.SetString(cfg.ApproveAmount, 10)\n\n\treturn &baseHandler{\n\t\tcfg: cfg,\n\t\tclient: nodeClient,\n\t\trpcClient: rpcClient,\n\t\tprivateKey: privateKey,\n\t\tlinkToken: linkToken,\n\t\tfromAddr: fromAddr,\n\t\tapproveAmount: approveAmount,\n\t}\n}", "func New(ctx context.Context, next http.Handler, config *Config, name string) (http.Handler, error) {\n\treturn &Jwt{\n\t\tnext: next,\n\t\trequiredField: config.RequiredField,\n\t}, nil\n}", "func OauthLoginHandler(db *sqlx.DB, cfg config.Config) http.HandlerFunc {\n\t// The jwk.AutoRefresh and jwk.Whitelist objects only get created once.\n\t// They are shared between all handlers\n\t// Note: This assumes two things:\n\t// 1) that the cfg.ConfigTrafficOpsGolang.WhitelistedOAuthUrls is not updated once it has been initialized\n\t// 2) OauthLoginHandler is not called conccurently\n\tif jwksFetcher == nil {\n\t\tar := jwk.NewAutoRefresh(context.TODO())\n\t\twl := &whitelist{urls: cfg.ConfigTrafficOpsGolang.WhitelistedOAuthUrls}\n\t\tjwksFetcher = &jwksFetch{\n\t\t\tar: ar,\n\t\t\twl: wl,\n\t\t}\n\t}\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer r.Body.Close()\n\t\tresp := struct {\n\t\t\ttc.Alerts\n\t\t}{}\n\n\t\tform := auth.PasswordForm{}\n\t\tparameters := struct {\n\t\t\tAuthCodeTokenUrl string `json:\"authCodeTokenUrl\"`\n\t\t\tCode string `json:\"code\"`\n\t\t\tClientId string `json:\"clientId\"`\n\t\t\tRedirectUri string `json:\"redirectUri\"`\n\t\t}{}\n\n\t\tif err := json.NewDecoder(r.Body).Decode(&parameters); err != nil {\n\t\t\tapi.HandleErr(w, r, nil, http.StatusBadRequest, err, nil)\n\t\t\treturn\n\t\t}\n\n\t\tmatched, err := VerifyUrlOnWhiteList(parameters.AuthCodeTokenUrl, cfg.ConfigTrafficOpsGolang.WhitelistedOAuthUrls)\n\t\tif err != nil {\n\t\t\tapi.HandleErr(w, r, nil, http.StatusInternalServerError, nil, err)\n\t\t\treturn\n\t\t}\n\t\tif !matched {\n\t\t\tapi.HandleErr(w, r, nil, http.StatusForbidden, nil, errors.New(\"Key URL from token is not included in the whitelisted urls. Received: \"+parameters.AuthCodeTokenUrl))\n\t\t\treturn\n\t\t}\n\n\t\tdata := url.Values{}\n\t\tdata.Add(\"code\", parameters.Code)\n\t\tdata.Add(\"client_id\", parameters.ClientId)\n\t\tdata.Add(\"grant_type\", \"authorization_code\") // Required by RFC6749 section 4.1.3\n\t\tdata.Add(\"redirect_uri\", parameters.RedirectUri)\n\n\t\treq, err := http.NewRequest(http.MethodPost, parameters.AuthCodeTokenUrl, bytes.NewBufferString(data.Encode()))\n\t\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\t\tif cfg.OAuthClientSecret != \"\" {\n\t\t\treq.Header.Set(\"Authorization\", \"Basic \"+base64.StdEncoding.EncodeToString([]byte(parameters.ClientId+\":\"+cfg.OAuthClientSecret))) // per RFC6749 section 2.3.1\n\t\t}\n\t\tif err != nil {\n\t\t\tapi.HandleErr(w, r, nil, http.StatusInternalServerError, nil, fmt.Errorf(\"obtaining token using code from oauth provider: %w\", err))\n\t\t\treturn\n\t\t}\n\n\t\tclient := http.Client{\n\t\t\tTimeout: 30 * time.Second,\n\t\t}\n\t\tresponse, err := client.Do(req)\n\t\tif err != nil {\n\t\t\tapi.HandleErr(w, r, nil, http.StatusInternalServerError, nil, fmt.Errorf(\"getting an http client: %w\", err))\n\t\t\treturn\n\t\t}\n\t\tdefer response.Body.Close()\n\n\t\tbuf := new(bytes.Buffer)\n\t\tbuf.ReadFrom(response.Body)\n\t\tencodedToken := \"\"\n\n\t\tvar result map[string]interface{}\n\t\tif err := json.Unmarshal(buf.Bytes(), &result); err != nil {\n\t\t\tlog.Warnf(\"Error parsing JSON response from OAuth: %s\", err)\n\t\t\tencodedToken = buf.String()\n\t\t} else if _, ok := result[rfc.IDToken]; !ok {\n\t\t\tsysErr := fmt.Errorf(\"Missing access token in response: %s\\n\", buf.String())\n\t\t\tusrErr := errors.New(\"Bad response from OAuth2.0 provider\")\n\t\t\tapi.HandleErr(w, r, nil, http.StatusBadGateway, usrErr, sysErr)\n\t\t\treturn\n\t\t} else {\n\t\t\tswitch t := result[rfc.IDToken].(type) {\n\t\t\tcase string:\n\t\t\t\tencodedToken = result[rfc.IDToken].(string)\n\t\t\tdefault:\n\t\t\t\tsysErr := fmt.Errorf(\"Incorrect type of access_token! Expected 'string', got '%v'\\n\", t)\n\t\t\t\tusrErr := errors.New(\"Bad response from OAuth2.0 provider\")\n\t\t\t\tapi.HandleErr(w, r, nil, http.StatusBadGateway, usrErr, sysErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tif encodedToken == \"\" {\n\t\t\tapi.HandleErr(w, r, nil, http.StatusBadRequest, errors.New(\"Token not found in request but is required\"), nil)\n\t\t\treturn\n\t\t}\n\n\t\tvar decodedToken jwt.Token\n\t\tif decodedToken, err = jwt.Parse(\n\t\t\t[]byte(encodedToken),\n\t\t\tjwt.WithVerifyAuto(true),\n\t\t\tjwt.WithJWKSetFetcher(jwksFetcher),\n\t\t); err != nil {\n\t\t\tif decodedToken, err = jwt.Parse(\n\t\t\t\t[]byte(encodedToken),\n\t\t\t\tjwt.WithVerifyAuto(false),\n\t\t\t\tjwt.WithJWKSetFetcher(jwksFetcher),\n\t\t\t); err != nil {\n\t\t\t\tapi.HandleErr(w, r, nil, http.StatusInternalServerError, nil, fmt.Errorf(\"error decoding token with message: %w\", err))\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tvar userIDInterface interface{}\n\t\tvar userID string\n\t\tvar ok bool\n\t\tif cfg.OAuthUserAttribute != \"\" {\n\t\t\tattributes := decodedToken.PrivateClaims()\n\t\t\tif userIDInterface, ok = attributes[cfg.OAuthUserAttribute]; !ok {\n\t\t\t\tapi.HandleErr(w, r, nil, http.StatusInternalServerError, nil, fmt.Errorf(\"Non-existent OAuth attribute : %s\", cfg.OAuthUserAttribute))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tuserID = userIDInterface.(string)\n\t\t} else {\n\t\t\tuserID = decodedToken.Subject()\n\t\t}\n\t\tform.Username = userID\n\n\t\tdbCtx, cancelTx := context.WithTimeout(r.Context(), time.Duration(cfg.DBQueryTimeoutSeconds)*time.Second)\n\t\tdefer cancelTx()\n\t\tuserAllowed, err, blockingErr := auth.CheckLocalUserIsAllowed(form.Username, db, dbCtx)\n\t\tif blockingErr != nil {\n\t\t\tapi.HandleErr(w, r, nil, http.StatusServiceUnavailable, nil, fmt.Errorf(\"error checking local user password: %s\\n\", blockingErr.Error()))\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"checking local user: %s\\n\", err)\n\t\t}\n\n\t\tif userAllowed {\n\t\t\t_, dbErr := db.Exec(UpdateLoginTimeQuery, form.Username)\n\t\t\tif dbErr != nil {\n\t\t\t\tdbErr = fmt.Errorf(\"unable to update authentication time for user '%s': %w\", form.Username, dbErr)\n\t\t\t\tapi.HandleErr(w, r, nil, http.StatusInternalServerError, nil, dbErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thttpCookie := tocookie.GetCookie(userID, defaultCookieDuration, cfg.Secrets[0])\n\t\t\thttp.SetCookie(w, httpCookie)\n\t\t\tresp = struct {\n\t\t\t\ttc.Alerts\n\t\t\t}{tc.CreateAlerts(tc.SuccessLevel, \"Successfully logged in.\")}\n\t\t} else {\n\t\t\tresp = struct {\n\t\t\t\ttc.Alerts\n\t\t\t}{tc.CreateAlerts(tc.ErrorLevel, \"Invalid username or password.\")}\n\t\t}\n\n\t\trespBts, err := json.Marshal(resp)\n\t\tif err != nil {\n\t\t\tapi.HandleErr(w, r, nil, http.StatusInternalServerError, nil, fmt.Errorf(\"encoding response: %w\", err))\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(rfc.ContentType, rfc.ApplicationJSON)\n\t\tif !userAllowed {\n\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t}\n\t\tfmt.Fprintf(w, \"%s\", respBts)\n\n\t}\n}", "func (m *Manager) RefreshAccessToken(tgr *oauth2.TokenGenerateRequest) (accessToken oauth2.TokenInfo, err error) {\n\tcli, err := m.GetClient(tgr.ClientID)\n\tif err != nil {\n\t\treturn\n\t} else if tgr.ClientSecret != cli.GetSecret() {\n\t\terr = errors.ErrInvalidClient\n\t\treturn\n\t}\n\tti, err := m.LoadRefreshToken(tgr.Refresh)\n\tif err != nil {\n\t\treturn\n\t} else if ti.GetClientID() != tgr.ClientID {\n\t\terr = errors.ErrInvalidRefreshToken\n\t\treturn\n\t}\n\toldAccess := ti.GetAccess()\n\t_, ierr := m.injector.Invoke(func(stor oauth2.TokenStore, gen oauth2.AccessGenerate) {\n\t\ttd := &oauth2.GenerateBasic{\n\t\t\tClient: cli,\n\t\t\tUserID: ti.GetUserID(),\n\t\t\tCreateAt: time.Now(),\n\t\t}\n\t\tisGenRefresh := false\n\t\tif rcfg, ok := m.gtcfg[oauth2.Refreshing]; ok {\n\t\t\tisGenRefresh = rcfg.IsGenerateRefresh\n\t\t}\n\t\ttv, rv, terr := gen.Token(td, isGenRefresh)\n\t\tif terr != nil {\n\t\t\terr = terr\n\t\t\treturn\n\t\t}\n\t\tti.SetAccess(tv)\n\t\tti.SetAccessCreateAt(td.CreateAt)\n\t\tif scope := tgr.Scope; scope != \"\" {\n\t\t\tti.SetScope(scope)\n\t\t}\n\t\tif rv != \"\" {\n\t\t\tti.SetRefresh(rv)\n\t\t}\n\t\tif verr := stor.Create(ti); verr != nil {\n\t\t\terr = verr\n\t\t\treturn\n\t\t}\n\t\t// remove the old access token\n\t\tif verr := stor.RemoveByAccess(oldAccess); verr != nil {\n\t\t\terr = verr\n\t\t\treturn\n\t\t}\n\t\taccessToken = ti\n\t})\n\tif ierr != nil && err == nil {\n\t\terr = ierr\n\t}\n\treturn\n}", "func NewHandler(k Keeper) sdk.Handler {\n\treturn func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) {\n\t\tctx = ctx.WithEventManager(sdk.NewEventManager())\n\t\tswitch msg := msg.(type) {\n\t\t// TODO: Define your msg cases\n\t\t//\n\t\t//Example:\n\t\t// case Msg<Action>:\n\t\t// \treturn handleMsg<Action>(ctx, k, msg)\n\t\tcase MsgSetFileAuth:\n\t\t\treturn handleMsgSetFileAuth(ctx, k, msg)\n\t\tcase MsgTransFileAuth:\n\t\t\treturn handleMsgTransFileAuth(ctx, k, msg)\n\t\tcase MsgDelFileAuth:\n\t\t\treturn handleMsgDelFileAuth(ctx, k, msg)\n\t\tdefault:\n\t\t\terrMsg := fmt.Sprintf(\"unrecognized %s message type: %T\", ModuleName, msg)\n\t\t\treturn nil, sdkerrors.Wrap(sdkerrors.ErrUnknownRequest, errMsg)\n\t\t}\n\t}\n}", "func refreshTokenHandler(w http.ResponseWriter, r *http.Request) {\n\n\t// TODO: Use your own methods to verify an existing user is\n\t// able to refresh their token and then give them a new one\n\n\tif response, err := bjwt.Generate(123456); err != nil {\n\t\tresultErrorJSON(w, http.StatusInternalServerError, err.Error())\n\t} else {\n\t\tresultResponseJSON(w, http.StatusOK, response)\n\t}\n}" ]
[ "0.6828804", "0.6349177", "0.63403344", "0.60474855", "0.6034249", "0.5923264", "0.59155315", "0.5915281", "0.58693445", "0.5771537", "0.5720014", "0.570903", "0.56361717", "0.5632971", "0.5632587", "0.5615423", "0.56022596", "0.55027896", "0.54784465", "0.54771185", "0.5451622", "0.5401005", "0.536686", "0.53335136", "0.5324792", "0.5321096", "0.5270371", "0.52692455", "0.5251732", "0.5251732", "0.5251732", "0.52273864", "0.5217423", "0.5214516", "0.52106845", "0.5180318", "0.51781064", "0.5162169", "0.5160676", "0.5148703", "0.51215994", "0.51163846", "0.5112469", "0.51052105", "0.5103311", "0.5094679", "0.5041819", "0.5031428", "0.5024282", "0.50091076", "0.50009066", "0.49974614", "0.49881136", "0.49859416", "0.49821267", "0.49720845", "0.49626485", "0.4960815", "0.49490914", "0.4942208", "0.49393782", "0.49387246", "0.4937146", "0.49364203", "0.49203867", "0.49179733", "0.4916599", "0.49099472", "0.49087796", "0.49079737", "0.49052307", "0.4904918", "0.4897061", "0.4895811", "0.48956856", "0.48918688", "0.48883998", "0.48786268", "0.48774934", "0.48765737", "0.48742884", "0.48734435", "0.48719412", "0.48687878", "0.48686948", "0.48656976", "0.48648632", "0.48635584", "0.4861334", "0.4855014", "0.48513806", "0.48385847", "0.48379877", "0.48354164", "0.48331448", "0.48178017", "0.4817124", "0.48154044", "0.48150027", "0.48118636" ]
0.7244731
0
/ go run extended.go foo peter w bla everybody c=12 else Hello peter, how is the bla? Say hello to everybody Say hello to else You can count real high! Custom 1: bar2 Custom 2: bar1
func setStyle(style string, c *clif.Cli) { switch style { case "sunburn": if c != nil { c.Output().SetFormatter(clif.NewDefaultFormatter(clif.SunburnStyles)) } clif.DefaultStyles = clif.SunburnStyles case "winter": if c != nil { c.Output().SetFormatter(clif.NewDefaultFormatter(clif.WinterStyles)) } clif.DefaultStyles = clif.WinterStyles } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func main() {\n\ts1 := stringAppender(\"hoo\")\n\tfmt.Println(s1(\"woo\"))\n\n\ts2 := stringAppender(\"Orioles\")\n\tfmt.Println(s2(\"Baltimore \"))\n\n}", "func main() {\n\targs := os.Args[1:]\n\t// if len(args) == 5 {\n\t// \tfmt.Println(\"There are 5 arguments\")\n\t// } else if len(args) == 2 {\n\t// \tfmt.Printf(\"There are %d arguments: %s\\n\", len(args), args)\n\t// } else {\n\t// \tfmt.Println(strings.TrimSpace(usage))\n\t// }\n\tif len(args) >= 2 {\n\t\tfmt.Printf(\"There are %d arguments: %s\\n\", len(args), strings.Join(args, \" \"))\n\t} else {\n\t\tfmt.Println(usage)\n\t}\n}", "func main() {\n\tfmt.Println(\"Hello, World\")\n\t// Modify name, with argument:\n\tif len(os.Args) > 1 {\n\t\tfmt.Println(\"Hello \" + os.Args[1])\n\t} else {\n\t\tlog.Println(\"No argument has been given!\")\n\t}\n}", "func usageCommon(w io.Writer, line int) {\n\toutputPara(w, line, 0, usageCommonPara)\n}", "func main() {\n\n\tvar argValues string //defining an argValues\n\tif len(os.Args) > 1 { //checking the argument values for ex: go run hello.go hello bhuppal kumar\n\t\targValues = strings.Join(os.Args[1:], \" \")\n\t}\n\tfmt.Println(argValues)\n}", "func main() {\n\tfmt.Printf(\"importing ...\\n\")\n\tm := python.PyImport_ImportModule(\"go-python\")\n\tif m == nil {\n\t\tlog.Fatalf(\"could not import 'modoe'. Use command export PYTHONPATH=./\\n\")\n\t}\n\n\tfoo := m.GetAttrString(\"foo\")\n\tif foo == nil {\n\t\tlog.Fatalf(\"could not getattr(kwargs, 'foo')\\n\")\n\t}\n\n\t// keyword arguments\n\tkw := python.PyDict_New()\n\terr := python.PyDict_SetItem(\n\t\tkw,\n\t\tpython.PyString_FromString(\"Fernando Perez\"),\n\t\tpython.PyInt_FromLong(327524),\n\t)\n\tif err != nil {\n\t\tlog.Fatalf(\"error: %v\\n\", err)\n\t}\n\n\targs := python.PyList_New(0)\n\tout := foo.Call(args, kw)\n\tif out == nil {\n\t\tlog.Fatalf(\"%s\\n\", out)\n\t}\n\n\tstr := python.PyString_AsString(out)\n\tfmt.Printf(\"%s\\n\", str)\n}", "func hello(name string){\n\tfmt.Println(\"Hello\" , name)\n}", "func main() {\n\tswitch len(os.Args) {\n\tcase 2:\n\t\treplacer.Produce(os.Args[1])\n\tcase 3:\n\t\treplacer.Produce(os.Args[2])\n\tdefault:\n\t\tpanic(\"Bad usage. Pass 1 or 2 arguments. The last one should be path to file, estimated arguments will be ignored.\")\n\t}\n}", "func (machine *Dishwasher) RunCustomCommand(custom string) {\r\n machine.Append(func() (string, error) {\r\n var output string = \"\"\r\n var oops error = nil\r\n\r\n custom = strings.TrimSpace(custom)\r\n if custom[len(custom)-1] == '$' {\r\n go RunCommand(custom)\r\n } else {\r\n output, oops = RunCommand(custom)\r\n machine.SideEffect(output, oops)\r\n }\r\n\r\n return output, oops\r\n })\r\n}", "func fooUsage() {\n\tfmt.Fprintf(os.Stderr, `Service is the foo service interface.\nUsage:\n %s [globalflags] foo COMMAND [flags]\n\nCOMMAND:\n foo1: Foo1 implements foo1.\n foo2: Foo2 implements foo2.\n foo3: Foo3 implements foo3.\n foo-options: FooOptions implements fooOptions.\n\nAdditional help:\n %s foo COMMAND --help\n`, os.Args[0], os.Args[0])\n}", "func Hello(name string){\n\tfmt.Printf(\"Hello %s\\n\", name)\n}", "func main() {\n\tfoo := \"\"\n\tshowCommand := flag.NewFlagSet(\"show\", flag.ExitOnError)\n\tshowCommand.StringVar(&foo, \"foo\", \"DEFAULT\", \"foo param\")\n\n\tif len(os.Args) == 1 {\n\t\tfmt.Println(\"go run flag-practice.go <COMMAND> [--foo=<FOO>]\")\n\t\treturn\n\t}\n\n\tswitch os.Args[1] {\n\tcase \"show\":\n\t\tshowCommand.Parse(os.Args[2:])\n\t\tfmt.Println(foo)\n\tdefault:\n\t\tfmt.Printf(\"%q is not valid command.\\n\", os.Args[1])\n\t\tos.Exit(2)\n\t}\n}", "func main() {\n\t//Hello world\n\t//hello_world()\n\t//Define variables\n\t//define_variables()\n\t/*Data type*/\n\tdata_type()\n}", "func (mt *MTService) HelloY(strArg string, nameArr []string) string {\n\treturn fmt.Sprintf(\"strArg:%s-%+v\", strArg, nameArr)\n}", "func bar(s string) {\n\tfmt.Println(\"hello\" s)\n}", "func main() {\n\n\t// There is a function we called that shorthand but this we can prevent globally define value to call.\n\t// Always remember if value is declare but not used it will raise an error in Golang\n\t// This will print the output\n\tname := \"Gautam\"\n\tage := 34\n\tprice := 11.1\n\tcontact, email := 98765432, \"[email protected]\"\n\tfmt.Println(value1)\n\tfmt.Println(age)\n\tfmt.Println(price)\n\tfmt.Println(name)\n\tfmt.Println(contact)\n\tfmt.Println(email)\n\n\n}", "func main() {\n\tx := 5\n\tw := 3\n\ty := 4\n\ta := `amen `\n\tb := `sister`\n\tc := `we are love`\n\tvar q int\n\tfmt.Print(`how old are u ?`)\n\tfmt.Scanln(&q)\n\tfmt.Println(`i am `, q, ` years old`)\n\n\tfmt.Println(onefunc(c))\n\tfmt.Println(stringop(a, b))\n\t//start(w, y)\n\tzero(x)\n\tfmt.Println(x) // x is still 5\n\tfmt.Println(start(w, y))\n}", "func wantGoUsage() {\n\tfmt.Fprintf(os.Stderr, `The WantGo service\nUsage:\n %s [globalflags] want-go COMMAND [flags]\n\nCOMMAND:\n get-simple-card-list: GetSimpleCardList implements getSimpleCardList.\n get-card-info: GetCardInfo implements getCardInfo.\n post-card-info: PostCardInfo implements postCardInfo.\n put-card-info: PutCardInfo implements putCardInfo.\n delete-card-info: DeleteCardInfo implements deleteCardInfo.\n\nAdditional help:\n %s want-go COMMAND --help\n`, os.Args[0], os.Args[0])\n}", "func foo() string {\n\treturn \"hello world\"\n}", "func main() {\n\tentero, cadena, booleano := 2, \"Hola\", true\n\tfmt.Println(entero, cadena, booleano) // imprimir en nueva linea\n}", "func main() {\n\tfor i := 0; i < len(os.Args); i++ {\n\t\tfmt.Println(\"Argument at index\", i, \"is\", os.Args[i])\n\t}\n}", "func main() {\n\tvar x, y, z int = 1, 2, 3\n\tc, python, java := true, false, \"no!\"\n\n\tfmt.Println(x, y, z, c, python, java)\n}", "func main() {\n\tfmt.Println(a)\n\tfmt.Println(b)\n\tfmt.Println(c)\n}", "func SayHi(name string) {\n fmt.Printf(\"Hi, %s\\n\", name)\n}", "func main() {\n\t// TODO:\n\tfmt.Println(\"NOT IMPLEMENTED\")\n}", "func main() {\r\n\t/*\r\n\t\tMultiLine\r\n\t\tcomments\r\n\r\n\t\tVERRY goOD samples at: http://blog.golang.org/\r\n\t*/\r\n\r\n\t// print a line to console\r\n\tfmt.Println(\"Hello World!\")\r\n\r\n\t// print formated to console\r\n\tfmt.Printf(\"%s -- %s -- %s<br>\\n\", time.Now(), iGlobalMessage, iGlobalConst)\r\n\r\n\t// get user input by line(return)\r\n\tfmt.Println(\"enter new text for iGlobalMessage\")\r\n\tfmt.Scanln(&iGlobalMessage)\r\n\t// string concat with +\r\n\tfmt.Println(\"new global text:\" + iGlobalMessage)\r\n\r\n\t//# playing with vars\r\n\r\n\tvar someString string\r\n\tvar someStringWithText string = \"some text\"\r\n\t// vars can be autocasted to a type by its assigned value\r\n\tsomeStringToo := \"!<>some string<>!\"\r\n\t// we ALWAYS HAVE to use vars!\r\n\t// no var left behind! (the compiler does not like unused vars)\r\n\tfmt.Printf(\"'%s' - '%s' - '%s'\\n\", someString, someStringWithText, someStringToo)\r\n\r\n\tvar someInt int\r\n\tsomeIntToo := 22\r\n\tfmt.Printf(\"%d -- %d\\n\", someInt, someIntToo)\r\n\r\n\t// change values\r\n\tsomeInt = 99\r\n\tsomeIntToo = 33\r\n\tfmt.Printf(\"%d -- %d\\n\", someInt, someIntToo)\r\n\r\n\t// increment ints\r\n\tsomeInt++\r\n\tsomeIntToo += 1\r\n\tfmt.Printf(\"%d -- %d\\n\", someInt, someIntToo)\r\n\r\n\tvar multipleStrings1, multipleStrings2, multipleStrings3 string\r\n\tfmt.Printf(\"%s -- %s -- %s<br>\\n\", multipleStrings1, multipleStrings2, multipleStrings3)\r\n\r\n\tmultipleStringsToo1, multipleStringsToo2, multipleStringsToo3 := \"foo1\", \"foo2\", \"foo3\"\r\n\tfmt.Printf(\"%s -- %s -- %s<br>\\n\", multipleStringsToo1, multipleStringsToo2, multipleStringsToo3)\r\n\r\n\t// you can ONLY instance a var once\r\n\t// this will fail:\r\n\t//someIntToo := 99\r\n\r\n\t//# functions\r\n\r\n\t// function without parameter and return value\r\n\tuselessPrint()\r\n\r\n\t// function with 1 parameter\r\n\tuselessPrintParameter(42)\r\n\r\n\t// function with 2 parameters\r\n\tuselessPrintMore(\"the answer to all:\", 23)\r\n\r\n\t// function with return value\r\n\tvar uselessReturnTemp string\r\n\tuselessReturnTemp = uselessReturn()\r\n\t// this would work too:\r\n\t//var uselessReturnTemp string = uselessReturn()\r\n\t//uselessReturnTempToo := uselessReturn()\r\n\t//fmt.Println(uselessReturn())\r\n\tfmt.Println(uselessReturnTemp)\r\n\r\n\t// function with multiple return values\r\n\ttheAnswerText, theAnserNumber := uselessMultipleReturn()\r\n\t// will work too\r\n\t/*\r\n\t\tvar theAnswerText string\r\n\t\tvar theAnserNumber int\r\n\t\ttheAnswerText, theAnserNumber = uselessMultipleReturn()\r\n\t*/\r\n\tfmt.Printf(\"%s: %s\\n\", theAnswerText, theAnserNumber)\r\n\r\n\t//# conditions\r\n\tvar matchMe int = 23\r\n\tif matchMe == 7 {\r\n\t\tfmt.Println(\"the 7 is all\")\r\n\t} else if matchMe == 23 {\r\n\t\tfmt.Println(\"dont trust machines\")\r\n\t} else {\r\n\t\tfmt.Println(\"this is not the value i am looking for\")\r\n\t}\r\n\r\n\tswitch {\r\n\tcase matchMe == 7:\r\n\t\tfmt.Println(\"the 7 is all\")\r\n\tcase matchMe == 23:\r\n\t\tfmt.Println(\"dont trust machines\")\r\n\tdefault:\r\n\t\tfmt.Println(\"this is not the value i am looking for\")\r\n\t}\r\n\r\n\t//# array, list of one type with unchangeable length\r\n\tvar myArray [3]int\r\n\tfmt.Println(myArray)\r\n\tmyArray[1] = 42\r\n\tfmt.Println(myArray)\r\n\r\n\t//# slice, list of one type that can vary in length\r\n\tvar mySlice []int\r\n\tfmt.Println(mySlice)\r\n\tmySlice = append(mySlice, 21, 42, 101)\r\n\tfmt.Println(mySlice)\r\n\tfmt.Println(mySlice[1])\r\n\r\n\t//# map, key/value pair of two types (dictionary)\r\n\tvar myMap map[string]int\r\n\t// you have to \"make\" them\r\n\tmyMap = make(map[string]int)\r\n\t// set some values\r\n\tmyMap[\"the answer\"] = 42\r\n\tmyMap[\"half_theTruth\"] = 11\r\n\tmyMap[\"weTrust\"] = 23\r\n\tfmt.Println(myMap)\r\n\t// access one value\r\n\tfmt.Println(myMap[\"half_theTruth\"])\r\n\t// change value\r\n\tmyMap[\"half_theTruth\"] = 21\r\n\tfmt.Println(myMap[\"half_theTruth\"])\r\n\r\n\t// access key->value pair\r\n\tfor key, value := range myMap {\r\n\t\tfmt.Println(\"Key:\", key, \"Value:\", value)\r\n\t}\r\n\r\n\t// delete key+value\r\n\tdelete(myMap, \"half_theTruth\")\r\n\tfmt.Println(myMap)\r\n}", "func RobotLingo() string {\n\n println(\"Affirmatives\")\n println(\"Affirmative\")\n println(\"affirmative\")\n println(\"affirmative\")\n\n println(\"lickMyBatteries\")\n println(\"LickMyBattery\")\n println(\"lickMyBattery\")\n println(\"lick_my_battery\")\n\n println(\"exterminate_the_humans\")\n println(\"Exterminatethehuman\")\n println(\"exterminatethehuman\")\n println(\"exterminate_the_human\")\n\n}", "func lesson44(){\n\tdo(10)\n\tdo(\"Sato\")\n\tdo(true)\n}", "func Hi(name string) {\n\tfmt.Println(\"Hi\", name)\n}", "func bar(s string) {\n\tfmt.Println(\"hello,\", s)\n}", "func main() {\n\tcmd := cliapp.Command{\n\t\tName: \"test\",\n\t\tAliases: []string{\"ts\"},\n\t\tDescription: \"this is a description <info>message</> for {$cmd}\", // // {$cmd} will be replace to 'test'\n\t\tFn: run,\n\t}\n\n\tcmd.Flags.BoolVar(&opts.visualMode, \"visual\", false, \"Prints the font name.\")\n\tcmd.Flags.StringVar(&opts.fontName, \"font\", \"\", \"Choose a font name. Default is a random font.\")\n\tcmd.Flags.BoolVar(&opts.list, \"list\", false, \"Lists all available fonts.\")\n\tcmd.Flags.BoolVar(&opts.sample, \"sample\", false, \"Prints a sample with that font.\")\n\n\t// Alone Running\n\tcmd.AloneRun()\n}", "func main() {\n\tif len(os.Args) != 2 {\n\t\tfmt.Println(\"Usage: flyweight.exe digits\")\n\t\tfmt.Println(\"Ex. : flyweight.exe 1212123\")\n\t} else {\n\t\tbs := NewLargeSizeString(os.Args[1])\n\t\tbs.Display()\n\t}\n}", "func main() {\n\t// var whatToSay string\n\t// whatToSay = \"Hello World again!\"\n\n\t// store a string in a variable and it figure out what type this is based\n\t// whatToSay := \"Hello World again!\"\n\n\t// or\n\t// var whatToSay string = \"Hello World again!\"\n\t// sayHelloWorld(whatToSay)\n\n\treader := bufio.NewReader(os.Stdin)\n\n\tvar whatToSay string = doctor.Intro()\n\n\tfmt.Println(whatToSay)\n\n\tfor {\n\t\tfmt.Print(\"-> \")\n\t\tuserInput, _ := reader.ReadString('\\n')\n\t\tuserInput = strings.Replace(userInput, \"\\r\\n\", \"\", -1)\n\t\tuserInput = strings.Replace(userInput, \"\\n\", \"\", -1)\n\t\tif userInput == \"quit\" {\n\t\t\tbreak\n\t\t} else {\n\t\t\tfmt.Println(doctor.Response(userInput))\n\t\t}\n\t}\n}", "func Main(args map[string]interface{}) map[string]interface{} {\n\tname, ok := args[\"name\"].(string)\n\tif !ok {\n\t\tname = \"world\"\n\t}\n\treturn map[string]interface{}{\n\t\t\"body\": \"Go: Hello \" + name,\n\t}\n}", "func SampleAlpha(alpha string) {\n\n}", "func main() {\n\tfirst, last := greet(\"Luis \", \"Benavides \")\n\tfmt.Printf(\"%s %s \\n\", first, last)\n}", "func wrappers(app string) (prefix, suffix string) {\n\tswitch app {\n\tcase \"Slack\":\n\tcase \"Discord\":\n\t\tprefix, suffix = \"```\\n\", \"```\"\n\t}\n\treturn\n}", "func main() {\n\tkeyword := \"Plastiblends\"\n\tsource.YahooFinance(\"https://in.finance.yahoo.com\", keyword)\n\t// source.EconomicTimes(\"https://economictimes.indiatimes.com\", keyword)\n\t// // source.CNBC(\"https://www.cnbctv18.com\", keyword, `\\s*(?i)https://www[.]cnbctv18[.]com(\\\"([^\"]*\\\")|'[^']*'|([^'\">\\s]+))`)\n\t// source.MoneyControl(\"https://www.moneycontrol.com\", keyword, `\\s*(?i)https://www[.]moneycontrol[.]com(\\\"([^\"]*\\\")|'[^']*'|([^'\">\\s]+))`)\n\t// source.Investing(\"https://in.investing.com\", keyword)\n}", "func main() {\n\tsamples := []string{\"hello\", \"apple_π!\"}\nouter:\n\tfor _, sample := range samples {\n\t\tfor i, r := range sample {\n\t\t\tfmt.Println(i, r, string(r))\n\t\t\tif r == 'l' {\n\t\t\t\tcontinue outer\n\t\t\t}\n\t\t}\n\t\tfmt.Println()\n\t}\n}", "func Hello(name ...string) string{\n\tif len(name) > 0{\n\t\treturn fmt.Sprintf(\"Hello %s!\", name[0])\n\t}\n\treturn \"Hello World!\"\n}", "func demo(greeting string) {\n\tfmt.Println(greeting)\n}", "func sayHello(firstName, lastName, birthday string) {\n\tage := calculateAge(birthday)\n\tfmt.Println(\"Hi\", firstName, lastName+\", you are \"+age+\" years old\")\n\t// => \"Hi Van Le, you are 26.5 years old\"\n}", "func main() {\n\tx := 10\n\n\tfmt.Printf(\"i said %v %v times\\n\", y, x)\n\n\tfmt.Println(z)\n\n}", "func main( /*no arguments*/ ) /*no return value*/ {\n\n\t// Command line arguments are provided by os.Args as []string.\n\tif len(os.Args) == 1 {\n\t\tfmt.Println(\"Hallo Welt!\")\n\t} else {\n\t\tfor i := 1; i < len(os.Args); i++ {\n\t\t\tfmt.Print(os.Args[i])\n\t\t\tif i+1 < len(os.Args) {\n\t\t\t\tfmt.Print(\", \")\n\t\t\t}\n\t\t}\n\t\tfmt.Println()\n\t}\n\n\t// This is a method call.\n\tvar args MyType\n\targs = os.Args\n\tn1 := args.CountArgs()\n\tfmt.Printf(\"n1 = %v\\n\", n1) // %v - value in default format\n\n\t// This is a function call.\n\tn2 := CountArgs(os.Args)\n\tfmt.Printf(\"n2 = %v\\n\", n2)\n}", "func greet1(fname string, lname string) {\n\tfmt.Println(fname, lname)\n}", "func hello(conn *ExtendedConnection, data *Hello) {\n\tconn.Counter += 1\n\tfmt.Println(\"Hello from\", data.From, \"to\", data.To, \"Counter:\", conn.Counter)\n\tconn.answer(\"Thanks, client!\")\n}", "func main() {\n\n\t//simple from string\n\tt, err := template.New(\"foo\").Parse(`\n{{define \"T\"}}\n{{.}}\n<a title='{{.}}'>\n<a href=\"/{{.}}\">\n<a href=\"?q={{.}}\">\n<a on='f(\"{{.}}\")'>\n<a on='f({{.}})'>\n<a on='pattern = /{{.}}/;'>\n{{end}}\n`)\n\t// now try to bypass our template with wimple injection:\n\terr = t.ExecuteTemplate(os.Stdout, \"T\", \"<script>alert('you have been pwned')</script>\")\n\n\t// as we can see in each context data are escaped in different way.\n\tif err != nil {\n\t\tfmt.Errorf(\"Error %s\", err)\n\t}\n}", "func main() {\n\n\tdisplayMessage(\"Hello\", \"from\", \"my\", \"little\", \"friend\")\n\n\tmin := minimum(13, 12, 17, 14, 12, 7, 5, 2)\n\tfmt.Println(\"The minimum value is:\", min)\n\n}", "func main() {\r\n\t//goRoutines()\r\n\t//goRoutineBufferChannel()\r\n\t//goRoutinesSyncExample()\r\n\t//goRoutineMutexExample()\r\n\t//goRoutineOddEvenSample()\r\n\t//variadicFunctions()\r\n\t//goMaxProcsExample()\r\n\t//goRoutunesGoodExample()\r\n\t//generateOddEvenNumbers()\r\n\t//diningPhilosophersProblem\r\n\t//printLog()\r\n\t//SampleEvent()\r\n\t//sampleCallback()\r\n\t//samplePromise()\r\n\t//sampleOnce()\r\n\t//samplePool()\r\n\t//Println(os.Args[1], os.Args[2])\r\n\t//sampleRecover()\r\n\t//sampleChannel()\r\n\t//sampleChannel1()\r\n\t//multiflexingChannels()\r\n}", "func Extend(target map[string]interface{}, args ...interface{}) (map[string]interface{}, error) {\n\tif len(args)%2 != 0 {\n\t\treturn nil, fmt.Errorf(\"expecting even number of arguments, got %d\", len(args))\n\t}\n\n\tfn := \"\"\n\tfor _, v := range args {\n\t\tif len(fn) == 0 {\n\t\t\tif s, ok := v.(string); ok {\n\t\t\t\tfn = s\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn target, fmt.Errorf(\"expecting string for odd numbered arguments, got %+v\", v)\n\t\t}\n\t\ttarget[fn] = v\n\t\tfn = \"\"\n\t}\n\n\treturn target, nil\n}", "func main() {\n\n\tfmt.Println(\"Hello\")\n\tworkingmyassoff.Working(\"SId\")\n\n\tconst name string = \"Sid\"\n\n\tworkingmyassoff.IsthisWorking(name == \"Sid\")\n\n\tworkingmyassoff.IsthisWorking(name == \"Owl\")\n}", "func main() {\n\tapp := &cli.App{\n\t\tName: \"grrs\",\n\t\tUsage: \"like a grep, but written in go\",\n\t\tAction: func(c *cli.Context) error {\n\t\t\targs, err := cl.InitArgs(c.Args())\n\t\t\tcheck(err)\n\t\t\tlines, err := matches.InFile(args)\n\t\t\tcheck(err)\n\t\t\tfor _, line := range lines {\n\t\t\t\tfmt.Println(line)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\n\terr := app.Run(os.Args)\n\tcheck(errors.Wrap(err, strings.Join(os.Args, \", \")))\n}", "func main() {\n\tshout1()\n\tshout2()\n\thello_utils.Twerpy()\n}", "func Hello(ctx ejob.Context) error {\n\tfmt.Printf(\"hello, world\")\n\treturn nil\n}", "func say(word string) string {\n title := \"[20190926] Hello World\"\n return title + \" \" + word\n}", "func main() {\n\t// flag.Parse()\n\t// args := flag.Args()\n\t// if len(args) < 1 {\n\t// \tfmt.Println(\"Please specify start page\") // if a starting page wasn't provided as an argument\n\t// \tos.Exit(1) // show a message and exit.\n\t// }\n\t// getBody(args[0])\n\tgetBody(\"https://ng.indeed.com/jobs-in-Lagos\")\n}", "func main(){\n var c, python, java = true, false, \"no!\"\n l := 100 // declaring as short variable\n fmt.Println(i, j, c, python, java, k, l )\n}", "func myPrintFunction(custom string) myPrintType{\nreturn func(s string){\nfmt.Println(s +custom)\n}\n}", "func main() {\n fmt.println(\"Hello Programmer!\")\n fmt.println(\"Here's what you have in store for today\")\n fmt.println(\"I can't predict your day, I'm a computer. But you can tell me\")\n}", "func Sample() {\n\tfmt.Println(\"s11sss\")\n}", "func Something(i string) string{\n\treturn \"app connected\" + i\n}", "func main() {\n\t// initialize the app with custom registered objects in the injection container\n\tc := clif.New(\"My App\", \"1.0.0\", \"An example application\").\n\t\tRegister(&exampleStruct{\"bar1\"}).\n\t\tRegisterAs(reflect.TypeOf((*exampleInterface)(nil)).Elem().String(), &exampleStruct{\"bar2\"}).\n\t\tNew(\"hello\", \"The obligatory hello world\", callHello)\n\n\t// extend output styles\n\tclif.DefaultStyles[\"mine\"] = \"\\033[32;1m\"\n\n\t// customize error handler\n\tclif.Die = func(msg string, args ...interface{}) {\n\t\tc.Output().Printf(\"<error>Everyting went wrong: %s<reset>\\n\\n\", fmt.Sprintf(msg, args...))\n\t\tos.Exit(1)\n\t}\n\n\t// build & add a complex command\n\tcmd := clif.NewCommand(\"foo\", \"It does foo\", callFoo).\n\t\tNewArgument(\"name\", \"Name for greeting\", \"\", true, false).\n\t\tNewArgument(\"more-names\", \"And more names for greeting\", \"\", false, true).\n\t\tNewOption(\"whatever\", \"w\", \"Some required option\", \"\", true, false)\n\tcnt := clif.NewOption(\"counter\", \"c\", \"Show how high you can count\", \"\", false, false)\n\tcnt.SetValidator(clif.IsInt)\n\tcmd.AddOption(cnt)\n\tc.Add(cmd)\n\n\tcb := func(c *clif.Command, out clif.Output) {\n\t\tout.Printf(\"Called %s\\n\", c.Name)\n\t}\n\tc.New(\"bar:baz\", \"A grouped command\", cb).\n\t\tNew(\"bar:zoing\", \"Another grouped command\", cb).\n\t\tNew(\"hmm:huh\", \"Yet another grouped command\", cb).\n\t\tNew(\"hmm:uhm\", \"And yet another grouped command\", cb)\n\n\t// execute the main loop\n\tc.Run()\n}", "func HelloWorld() {\n\tprintln(\"Hello world #2!\")\n}", "func main() {\n\tbase()\n}", "func main() {\n\tf,err := os.Create(\"Grace_kid.go\")\n\tif (err != nil) {\n\t\tpanic(err)\n\t}\n\tw := bufio.NewWriter(f)\n\ts := `package main\n\nimport(\n\t\"fmt\"\n\t\"os\"\n\t\"bufio\"\n)\n\n/*\n Just a random comment wandering in this Quine\n*/\n\nfunc main() {\n\tf,err := os.Create(\"Grace_kid.go\")\n\tif (err != nil) {\n\t\tpanic(err)\n\t}\n\tw := bufio.NewWriter(f)\n\ts := %s\n\tfmt.Fprintf(w, s, \"%c\"+s+\"%c\", 96, 96, 10)\n\tw.Flush()\n}%c`\n\tfmt.Fprintf(w, s, \"`\"+s+\"`\", 96, 96, 10)\n\tw.Flush()\n}", "func (term *Terminal) Custom(prefix string, test func(string) (string, bool)) (string, error) {\n\tvar err error\n\tvar input string\n\tvar ok bool\n\n\tfor !ok {\n\t\tinput, err = term.GetPrompt(prefix)\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tinput, ok = test(input)\n\t}\n\n\treturn input, nil\n}", "func Hello() string { //If you are not returning anything , u will write func Hello(){ ... }\n\tfmt.Println(\"This is going to print our standard first program\")\n\treturn \"Hello, World\"\n}", "func main() {\n\n\tif len(os.Args) != 2 {\n\t\tfmt.Println(\"Wrong params. Usage\")\n\t\t// Show list of snippets suitable for coping in command ba\n\t\tvar cmds bytes.Buffer\n\t\tfor k, _ := range Snippets {\n\t\t\tcmds.WriteString(fmt.Sprintf(\"'sn %s', \", k))\n\t\t}\n\t\tfmt.Println(cmds.String())\n\t\tos.Exit(2)\n\t}\n\tvar snippet string\n\tcommand := os.Args[1]\n\tif val, ok := Snippets[command]; ok == false {\n\t\tfmt.Println(\"Snippet not found\")\n\t\tos.Exit(1)\n\t} else {\n\t\tsnippet = val\n\t}\n\n\tid, _ := strconv.Atoi(os.Getenv(\"winid\"))\n\twfile, err := acme.Open(id, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Frist read is buggeous\n\t_, _, _ = wfile.ReadAddr()\n\n\terr = wfile.Ctl(\"addr=dot\\n\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// Read current cursor position\n\tq0, q1, _ := wfile.ReadAddr()\n\n\tvar a, b int\n\n\t// get user selection\n\tvar selection string\n\tif q0 == q1 {\n\t\tselection = \"\"\n\t} else {\n\t\t\n\t\tdata, err := wfile.ReadAll(\"body\")\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\ta = q0\t\n\t\t// to locate second byte offset must check for\n\t\t// runes inside string\n\t\tb = runeOffset2ByteOffset(data, q1)\n\t\ta = runeOffset2ByteOffset(data, q0)\n\t\t\n\t\tselection = string(data[a:b])\n\t\t\n\t\t// restore address after read.\n\t\terr = wfile.Addr(\"#%d,#%d\", q0, q1)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tresult := fmt.Sprintf(snippet, selection)\n\t_, err = wfile.Write(\"data\", []byte(result))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// Try to put cursor on middle snippet\n\t// if empty selection\n\tif selection == \"\" {\n\t\tc := q0 + len(result)/2\n\t\terr = wfile.Addr(\"#%d,#%d\", c, c)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\t_ = wfile.Ctl(\"dot=addr\\n\")\n\n\t}\n\n}", "func main() {\n\n\tflag.IntVar(&broker, \"broker\", broker, \"Broker Id\")\n\tflag.StringVar(&host, \"host\", host, \"Exchange host\")\n\tflag.IntVar(&port, \"port\", port, \"Exchange port\")\n\tflag.Parse()\n\n\tcommand := flag.Arg(0)\n\n\tswitch strings.ToLower(command) {\n\tcase \"offer\":\n\t\toffer()\n\tcase \"offers\":\n\t\toffers()\n\tcase \"bid\":\n\t\tbid()\n\tcase \"bids\":\n\t\tbids()\n\tcase \"add-broker\":\n\t\taddBroker()\n\tdefault:\n\t\tfmt.Println(\"Please provide a valid command\")\n\t}\n\n\tos.Exit(1)\n}", "func main() {\n\tword := \"apple\"\n\tprefix := \"app\"\n\tobj := Constructor()\n\tobj.Insert(word)\n\tparam_2 := obj.Search(word)\n\tparam_3 := obj.StartsWith(prefix)\n\tfmt.Println(param_2, param_3)\n}", "func main() {\n\t// Get a greeting message and print it.\n\t//Access the Hello function in the greetings package\n\tmessage := greetings.Hello(\"Gladys\")\n\tfmt.Println(message)\n}", "func (s *StressFlag) AddExtraUsage(eu string) {}", "func OOMthdInteritanceMain() {\n\tsaul := Student{Person{\"Saul\", 25, \"620-923-8989\"}, \"MIT\"}\n\tsteve := Employee{Person{\"Steve\", 40, \"510-924-3434\"}, \"Google\"}\n\n\tsaul.SayHi()\n\tsteve.SayHi()\n\n}", "func say(s string) {\n\tfor i := 0; i < 5; i++ {\n\t\tfmt.Println(s)\n\t}\n}", "func Haha1() {\r\n\tfmt.Print(\"Ha ha ebitut\\n\")\r\n}", "func Hello(name string) {\n\tfmt.Printf(\"Hello, %s!\\n\", name)\n}", "func usageMain(w io.Writer, set *flag.FlagSet) error {\n\toutputPara(w, usageLineLength, 0, usageShort)\n\toutputPara(w, usageLineLength, 0, usageMainPara)\n\n\tcmdNames := commands.Names()\n\n\tfieldWidth := 0\n\tfor _, name := range cmdNames {\n\t\tif n := len(name); n+4 > fieldWidth {\n\t\t\tfieldWidth = n + 4\n\t\t}\n\t}\n\n\tfmt.Fprintln(w, \"Commands:\")\n\tfor _, name := range cmdNames {\n\t\tcmd, _ := commands[name]\n\t\tfmt.Fprintf(w, \"%s%-*s %s\\n\", strings.Repeat(\" \", usageIndent), fieldWidth, name, cmd.shortDesc)\n\t}\n\tfmt.Fprintln(w)\n\toutputPara(w, usageLineLength, 0, usageCommandPara)\n\n\tif !isFlagPassed(set, commonFlag) {\n\t\toutputPara(w, usageLineLength, 0, usageCommonPara)\n\n\t\treturn nil\n\t}\n\n\tfmt.Fprintln(w, \"Configuration file:\")\n\toutputPara(w, usageLineLength, usageIndent, usageConfigIntroPara)\n\toutputPara(w, usageLineLength, usageIndent, usageConfigLocationPara)\n\toutputPara(w, usageLineLength, usageIndent, usageConfigKeysPara)\n\n\tfmt.Fprintln(w, \"Explicit and implicit anchors:\")\n\toutputPara(w, usageLineLength, usageIndent, usageAnchorsIntroPara)\n\toutputPara(w, usageLineLength, usageIndent, usageAnchorsFormatPara)\n\toutputPara(w, usageLineLength, usageIndent, usageAnchorsInsecurePara)\n\n\tfmt.Fprintln(w, \"Additional path segment:\")\n\toutputPara(w, usageLineLength, usageIndent, usageAPSIntroPara)\n\n\tfmt.Fprintln(w, \"TLS client certificates:\")\n\toutputPara(w, usageLineLength, usageIndent, usageCertsIntroPara)\n\toutputPara(w, usageLineLength, usageIndent, usageCertsFormatPara)\n\toutputPara(w, usageLineLength, usageIndent, usageCertsKeyPara)\n\n\tfmt.Fprintln(w, \"Additional HTTP headers:\")\n\toutputPara(w, usageLineLength, usageIndent, usageHeadersPara)\n\toutputPara(w, usageLineLength, usageIndent*2, usageHeadersExample)\n\n\tfmt.Fprintln(w, \"HTTP Host header:\")\n\toutputPara(w, usageLineLength, usageIndent, usageHostHeaderPara)\n\n\tfmt.Fprintln(w, \"Request timeout:\")\n\toutputPara(w, usageLineLength, usageIndent, usageTimeoutPara)\n\n\treturn nil\n}", "func main() {\n\targs := os.Args\n\tapp := args[0]\n\tsc := bufio.NewScanner(os.Stdin)\n\tminWordLength := flag.Int(\"min-length\", 1, \"minimum length of words to keep (default is 1)\")\n\tshowLineNumber := flag.Bool(\"show-line-number\", false, \"allow display of prefix with line number\")\n\tconvertToCase := flag.String(\"convert-to-case\", \"\", \"convert case of all words. Can be one of (U,u,upper or L,l,lower)\")\n\tflag.Parse()\n\tcount := 1\n\n\tfor sc.Scan() {\n\t\tif len(strings.TrimSpace(sc.Text())) > 0 {\n\t\t\t// should match any utf8 letter including any diacritics\n\t\t\t// this regex will always match a rune like : à, regardless of how it is encoded !\n\t\t\t// https://www.regular-expressions.info/unicode.html\n\t\t\treWords := regexp.MustCompile(\"(\\\\p{L}\\\\p{M}*)+\")\n\t\t\twords := reWords.FindAllString(sc.Text(), -1)\n\t\t\tonlyBiggerWords := []string{}\n\t\t\tfor _, word := range words {\n\t\t\t\tif utf8.RuneCountInString(strings.TrimSpace(word)) >= *minWordLength {\n\t\t\t\t\tswitch *convertToCase {\n\t\t\t\t\tcase \"upper\", \"u\", \"U\":\n\t\t\t\t\t\tonlyBiggerWords = append(onlyBiggerWords, strings.ToUpper(word))\n\t\t\t\t\tcase \"lower\", \"l\", \"L\":\n\t\t\t\t\t\tonlyBiggerWords = append(onlyBiggerWords, strings.ToLower(word))\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tonlyBiggerWords = append(onlyBiggerWords, word)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(onlyBiggerWords) > 0 {\n\t\t\t\tif *showLineNumber {\n\t\t\t\t\twList := strings.Join(onlyBiggerWords, \" \")\n\t\t\t\t\tfmt.Printf(\"%d : %s \\n\", count, wList)\n\t\t\t\t} else {\n\t\t\t\t\twList := strings.Join(onlyBiggerWords, \"\\n\")\n\t\t\t\t\tfmt.Printf(\"%s\\n\", wList)\n\t\t\t\t}\n\t\t\t}\n\t\t\tcount += 1\n\t\t\t//fmt.Println(wList)\n\t\t} else {\n\t\t\tif count < 2 {\n\t\t\t\t//TODO find a solution to display help when no data is present without polutin output if first line is empty\n\t\t\t\tgolog.Warn(\"# %s should be used in a pipe like this : cat your_utf8_text.txt | getwords --min-length=2 --show-line-number=0\", app)\n\t\t\t}\n\t\t}\n\t}\n\tif err := sc.Err(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"error:\", err)\n\t\tos.Exit(1)\n\t}\n}", "func implements(s selection, args []string) {\n\tfmt.Println(runWithStdin(s.archive(), \"guru\", \"-modified\", \"-scope\", scope(args), \"implements\", s.pos()))\n}", "func main() {\n\tvar name string\n\t// flag.StringVar() melakukan penyimpanan dgn metode referensi, dimana variabel yg digunakan adalah \"name\"\n\tflag.StringVar(&name,\"name\",\"any\",\"type your name\")\n\tvar age = flag.Int64(\"age\",25,\"type your age\")\n\n\tflag.Parse()\n\n\t// Pengaksesan nilai asli tdk perlu di-deference lagi\n\tfmt.Printf(\"Name\\t: %s\\n\",name)\n\n\tfmt.Printf(\"Age\\t: %d\\n\",*age)\n\n\t// Cara eksekusinya : go run flagnya.go -name=\"Alex Hendra\" -age=28\n}", "func main() {\n\tversion = \"k/g\"\n\tMain(os.Args[1:])\n}", "func versionRunner(cmd *cobra.Command, args []string) {\n\tfmt.Println(\"Passgen : Password generator for general purpose - v2\")\n}", "func main() {\n\tfmt.Println(\"Hello, my name is Jordan\")\n}", "func main() {\n\tauthor1 := author{\n\t\t\"Naveen\",\n\t\t\"Ramanathan\",\n\t\t\"Golang Enthusiast\",\n\t}\n\tpost1 := post{\n\t\t\"Inheritance in Go\",\n\t\t\"Go supports composition instead of inheritance\",\n\t\tauthor1,\n\t}\n\tpost1.details()\n}", "func main() {\n\t//fmt.Println(findSubstring(\"barfoothefoobarman\", []string{\"foo\", \"bar\"}))\n\t//fmt.Println(findSubstring(\"wordgoodgoodgoodbestword\", []string{\"word\", \"good\", \"best\", \"good\"}))\n\tfmt.Println(findSubstring(\"foobarfoobar\", []string{\"foo\", \"bar\"}))\n}", "func main() {\n\tflag.Parse()\n\tif err := echoargs(!*n, *s, flag.Args()); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"echo %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}", "func otherVersion(c *cli.Context) error {\n\targs := c.Args()\n\tif args.Len() != 1 {\n\t\treturn nil\n\t}\n\tn := args.First()\n\tfor _, cmd := range c.Command.VisibleCommands() {\n\t\tif cmd.HasName(n) {\n\t\t\treturn nil\n\t\t}\n\t}\n\tif verRegexp.MatchString(n) {\n\t\tos.Exit(0)\n\t}\n\treturn nil\n}", "func hints(s string) *cli.Hint {\n\tif s == \"hello\" {\n\t\t// string, color, bold\n\t\treturn &cli.Hint{\" World\", 35, false}\n\t}\n\treturn nil\n}", "func Greet(name string) {\n fmt.Println(\"Hello, \" + name)\n}", "func generateByThirdPackage() {\n\n}", "func hello(name string) string {\n\treturn fmt.Sprintf(\"Hello, %s!\", name)\n}", "func main(){\n\n\tfmt.Println(checkInclusion(\"ab\",\"eidbaooo\"))\n\n}", "func main() {\n\n\tch1 := boring4(\"Tom\")\n\tch2 := boring4(\"Jerry\")\n\n\t//c := fanIn(ch1, ch2)\n\t//for i := 1; i < 10; i++ {\n\t//\tfmt.Println(<-c)\n\t//}\n\n\tc2 := fanSimple(ch1, ch2)\n\tfor i := 1; i < 10; i++ {\n\t\tfmt.Println(<-c2)\n\t}\n\tfmt.Println(\"You're both boring. I'm leaving\")\n}", "func Echo5() {\r\n\tfor i, v := range os.Args {\r\n\t\tfmt.Printf(\"index: %d\\tvalue: %s\\n\", i, v)\r\n\t}\r\n}", "func main() {\n\tfmt.Println(\"starting main function\")\n\tdisplayMessage(\"hi towfeeq\")\n\tdisplayValue(20)\n\tfmt.Println(\"ending main function\")\n\n\n}", "func GenAsciidocCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error {\n\tcmd.InitDefaultHelpCmd()\n\tcmd.InitDefaultHelpFlag()\n\n\tbuf := new(bytes.Buffer)\n\tname := cmd.CommandPath()\n\tbuf.WriteString(\"== \" + name + \"\\n\\n\")\n\tbuf.WriteString(\"ifdef::env-github,env-browser[:relfilesuffix: .adoc]\\n\\n\")\n\tbuf.WriteString(cmd.Short + \"\\n\\n\")\n\tif len(cmd.Long) > 0 {\n\t\tbuf.WriteString(\"=== Synopsis\\n\\n\")\n\t\tbuf.WriteString(cmd.Long + \"\\n\\n\")\n\t}\n\n\tif cmd.Runnable() {\n\t\tbuf.WriteString(fmt.Sprintf(\"....\\n%s\\n....\\n\\n\", cmd.UseLine()))\n\t}\n\n\tif len(cmd.Example) > 0 {\n\t\tbuf.WriteString(\"=== Examples\\n\\n\")\n\t\tbuf.WriteString(fmt.Sprintf(\"....\\n%s\\n....\\n\\n\", cmd.Example))\n\t}\n\n\tif err := printOptions(buf, cmd); err != nil {\n\t\treturn err\n\t}\n\tif hasSeeAlso(cmd) {\n\t\tbuf.WriteString(\"=== SEE ALSO\\n\\n\")\n\t\tif cmd.HasParent() {\n\t\t\tparent := cmd.Parent()\n\t\t\tpname := parent.CommandPath()\n\t\t\tlink := pname + \"{relfilesuffix}\"\n\t\t\tlink = strings.ReplaceAll(link, \" \", \"_\")\n\t\t\tbuf.WriteString(fmt.Sprintf(\"* link:%s[%s]\\t - %s\\n\", linkHandler(link), pname, parent.Short))\n\t\t\tcmd.VisitParents(func(c *cobra.Command) {\n\t\t\t\tif c.DisableAutoGenTag {\n\t\t\t\t\tcmd.DisableAutoGenTag = c.DisableAutoGenTag\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\n\t\tchildren := cmd.Commands()\n\t\tsort.Sort(byName(children))\n\n\t\tfor _, child := range children {\n\t\t\tif !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcname := name + \" \" + child.Name()\n\t\t\tlink := cname + \"{relfilesuffix}\"\n\t\t\tlink = strings.ReplaceAll(link, \" \", \"_\")\n\t\t\tbuf.WriteString(fmt.Sprintf(\"* link:%s[%s]\\t - %s\\n\", linkHandler(link), cname, child.Short))\n\t\t}\n\t\tbuf.WriteString(\"\\n\")\n\t}\n\tif !cmd.DisableAutoGenTag {\n\t\tbuf.WriteString(\"====== Auto generated by spf13/cobra on \" + time.Now().Format(\"2-Jan-2006\") + \"\\n\")\n\t}\n\t_, err := buf.WriteTo(w)\n\treturn err\n}", "func usage() string {\n\treturn `\n\n Usage:\n ./qdawslogs [-logGroupName xxx] [-field xx]* -filter FILTER_CLAUSE or -messageFilter [-startTime epoch/RFC3339] [-endTime epoch/RFC3339] [-limit xxx] [-region xxx]\n\n Required: -filter or -messageFilter. \n\n Filter must be a complete filter clause. See https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html\n MessageFilter is the value to be included in the like clause.\n\n -field is optional. Can be specified multiple times. When specified, it should be one of the following: @timestamp, @message, @logStream or @ingestionTime\n\n -startTime/-endTime can be either an integer for the epoch time in seconds or RFC 3339 format (which is the case of graphQL datetime value\n\n Optional with provided values:\n logGroupName = /aws/ecs/prod-rt\n region=us-east-1\n field = @timestamp, @message, @logStream\n startTime = 1hour before now\n endTime = now\n\n\n -------------------\n Example:\n\n [1] Getting fields timestamp, message from the log group /aws/ecs/stage-rt within the previous hour of the given epoch time: \n\n\t\t ./qdawslogs -logGroupName /aws/ecs/stage-rt -field @timestamp -field @message -filter \"@message like /19062412_5Xi2eYcEc6/\" -endTime 1560322977 -limit 1000\n \n\n\n\n [2] Getting default fields (timestamp, message, logStream) from the default log group /aws/ecs/prod-rt with startTime 2019-06-12T06:47:12.000Z, filtering \n messages containing 19062412_5Xi2eYcEc6:\n\n\t\t ./qdawslogs.mac -startTime \"2019-06-12T06:47:12.000Z\" -messageFilter 19062412_5Xi2eYcEc6\n\n\n\n\n [3] Getting default fields (timestamp, message, logStream) from the default log group /aws/ecs/prod-rt with startTime 2019-06-12T06:47:12.000Z, filtering \n messages containing 19062412_5Xi2eYcEc6 AND logStream contains \"coord\":\n\n ./qdawslogs.mac -startTime \"2019-06-12T06:47:12.000Z\" -messageFilter 19062412_5Xi2eYcEc6 -filter \"@logStream like /coord/\"\n \n\n`\n}", "func printHelpCustomNoTemplate(out io.Writer, template string, data interface{}, _ map[string]interface{}) {\n\thelpStr := \"\"\n\tswitch d := data.(type) {\n\tcase *App:\n\t\tcommandsStr := \"\"\n\t\tfor _, cmd := range d.Commands {\n\t\t\tcommandsStr = commandsStr + fmt.Sprintf(\" %s\\t%s\\n\", cmd.Name, cmd.Usage)\n\t\t}\n\t\toptionsStr := \"\"\n\t\tfor _, flag := range d.Flags {\n\t\t\toptionsStr = optionsStr + fmt.Sprintf(\" %s\\n\", flag)\n\t\t}\n\t\t// It is not possible to figure out if the data passed is an App or a \"command with subcommands\" (which\n\t\t// uses the same App struct). Hack: look for a unique string in the [otherwise ignored] given template.\n\t\tif strings.Contains(template, \"VERSION\") {\n\t\t\thelpStr = fmt.Sprintf(appHelpTextFmt, d.Name, d.Usage, d.Version, d.Description, commandsStr, optionsStr)\n\t\t} else {\n\t\t\thelpStr = fmt.Sprintf(subCmdHelpFmt, d.Name, d.Usage, d.Name, d.Description, commandsStr, optionsStr)\n\t\t}\n\tcase *Command:\n\t\toptionsStr := \"\"\n\t\tfor _, flag := range d.Flags {\n\t\t\toptionsStr = optionsStr + fmt.Sprintf(\" %s\\n\", flag)\n\t\t}\n\t\thelpStr = fmt.Sprintf(cmdHelpTextFmt, d.Name, d.Usage, d.Name, optionsStr)\n\t}\n\n\tw := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0)\n\tw.Write([]byte(helpStr))\n\tw.Flush()\n}", "func main() {\n\tfmt.Println(greeting(\"John\", \"New York City\", \"banker\"))\n\tfmt.Println(getAge(1980))\n\tfmt.Println(greeting(\"Sam\", \"Miami\", \"surfer\"))\n\tfmt.Println(getAge(1991))\n\tfmt.Println(greeting(\"Jane\", \"San Francisco\", \"software developer\"))\n\tfmt.Println(getAge(1986))\n}", "func main() {\n\tl := len(os.Args)\n\tif l <= 1 || l%2 != 1 {\n\t\tdie(help)\n\t}\n\tfilter := inex.NewRoot()\n\tpath := \".\"\n\tfor i := 1; i < l; i += 2 {\n\t\tswitch os.Args[i] {\n\t\tcase \"-e\", \"--exclude\":\n\t\t\tfilter = filter.Exclude(&inex.RegexpMatcher{regexp.MustCompile(os.Args[i+1])})\n\t\tcase \"-i\", \"--include\":\n\t\t\tfilter = filter.Include(&inex.RegexpMatcher{regexp.MustCompile(os.Args[i+1])})\n\t\tcase \"-c\", \"--contains\":\n\t\t\tpartial := os.Args[i+1]\n\t\t\tfilter = filter.Include(inex.FuncMatcher(func(str string) bool {\n\t\t\t\treturn strings.Contains(str, partial)\n\t\t\t}))\n\t\tcase \"-C\", \"--not-contains\":\n\t\t\tpartial := os.Args[i+1]\n\t\t\tfilter = filter.Include(inex.FuncMatcher(func(str string) bool {\n\t\t\t\treturn !strings.Contains(str, partial)\n\t\t\t}))\n\t\tcase \"-t\", \"--type\":\n\t\t\ttyp := os.Args[i+1]\n\t\t\tswitch typ {\n\t\t\tcase \"f\", \"file\", \"d\", \"directory\":\n\t\t\tdefault:\n\t\t\t\tdie(\"invalid --type provided. must be \\\"f\\\", \\\"file\\\", \\\"d\\\" or \\\"directory\\\"\")\n\t\t\t}\n\t\t\tfilter = filter.Include(inex.FuncMatcher(func(str string) bool {\n\t\t\t\tstat, err := os.Stat(str)\n\t\t\t\tif err != nil {\n\t\t\t\t\tdie(\"failed to stat \\\"%s\\\": %s\\n\", str, err)\n\t\t\t\t}\n\t\t\t\tif typ == \"f\" || typ == \"file\" {\n\t\t\t\t\treturn !stat.IsDir()\n\t\t\t\t}\n\t\t\t\treturn stat.IsDir()\n\t\t\t}))\n\t\tcase \"-p\", \"--path\":\n\t\t\tif path != \".\" {\n\t\t\t\tdie(\"%s can only be provided once\\n\\n%s\", os.Args[i], help)\n\t\t\t}\n\t\t\tpath = os.Args[i+1]\n\t\tdefault:\n\t\t\tdie(\"unsupported operation \\\"%s\\\"\\n\\n%s\", os.Args[i], help)\n\t\t}\n\t}\n\tif filter.IsRoot() {\n\t\tdie(\"no filters provided\\n\\n%s\", help)\n\t}\n\tfilter = filter.Root()\n\tfilepath.Walk(path, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t} else if filter.Match(path) {\n\t\t\tfmt.Println(path)\n\t\t}\n\t\treturn nil\n\t})\n}", "func HelloWorld(what string) string {\n\treturn fmt.Sprintf(\"Hello World %s\", what)\n}" ]
[ "0.5223018", "0.5174197", "0.50365174", "0.5033618", "0.5012377", "0.5009525", "0.49829808", "0.49779582", "0.49590465", "0.4917345", "0.48560345", "0.48281768", "0.4814344", "0.4811926", "0.47952038", "0.47935736", "0.47702038", "0.4769694", "0.47628325", "0.47418863", "0.47372526", "0.47325626", "0.47317016", "0.47299892", "0.47216678", "0.469438", "0.46904674", "0.46806896", "0.46730664", "0.4671758", "0.4658305", "0.46529198", "0.46466896", "0.46461922", "0.46458906", "0.46454167", "0.46440247", "0.4643601", "0.46427804", "0.46398044", "0.46271774", "0.4607342", "0.46056017", "0.46004826", "0.45978895", "0.4597425", "0.4594188", "0.45897308", "0.45716387", "0.45640087", "0.4546603", "0.45462903", "0.4542487", "0.45367554", "0.45348406", "0.45245045", "0.45154774", "0.45120183", "0.45107087", "0.45098522", "0.45098013", "0.45078897", "0.4505336", "0.45018086", "0.44834667", "0.4482464", "0.44786242", "0.44644767", "0.4461498", "0.44609082", "0.445278", "0.4450582", "0.44416603", "0.44400793", "0.44378236", "0.44372386", "0.44296128", "0.44271815", "0.4425819", "0.44254422", "0.44233727", "0.44214427", "0.44212914", "0.44187698", "0.44181868", "0.4415925", "0.4415335", "0.4409546", "0.4408866", "0.44079956", "0.44073397", "0.44057927", "0.44038546", "0.44027668", "0.43980008", "0.4393791", "0.43924198", "0.43912372", "0.43862715", "0.43813875", "0.43812874" ]
0.0
-1
TestMockValidity ensures that we don't go into a wild goose chase if our mock system gets screwed up
func TestMockValidity(t *testing.T) { nr := 50 _, hlp := agreement.WireAgreement(nr) hash, _ := crypto.RandEntropy(32) handler := agreement.NewHandler(hlp.Keys[0], *hlp.P) for i := 0; i < nr; i++ { a := message.MockAgreement(hash, 1, 3, hlp.Keys, hlp.P, i) if !assert.NoError(t, handler.Verify(a)) { t.FailNow() } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IsMockInvalid(cc ContractCall) bool {\n\treturn false\n}", "func TestMimeMessageValidity(t *testing.T) {\n\tm := MimeMessage{\n\t\tToAddress: \"[email protected]\",\n\t\tContent: []byte(\"This is my body. There are many like it but this one is mine.\")}\n\n\tif m.IsValid() != true {\n\t\tt.Error(\"Message should have been valid!\")\n\t}\n\n\tm.ToAddress = \"\"\n\tif m.IsValid() != false {\n\t\tt.Error(\"Message(2) should have been invalid!\")\n\t}\n\n\tm = MimeMessage{ToAddress: \"[email protected]\"}\n\tif m.IsValid() != false {\n\t\tt.Error(\"Message(3) should have been invalid!\")\n\t}\n}", "func mockNeverRun() bool { return false }", "func TestVerifySignedMessage(t *testing.T) {\n\tfor _, tc := range []struct {\n\t\tdesc string\n\t\tsettings *crypt.PkiSettings\n\t\tsetup func(mdb *mocks.MockDepsBundle, setupDone *bool) error\n\t\tmessageToSign string\n\t\tbase64Signature string\n\t\tPEMPublicKey string\n\t\texpectedError *testtools.ErrorSpec\n\t\texpectedValidity bool\n\t}{\n\t\t{\n\t\t\tdesc: \"invalid base64 signature\",\n\t\t\tsettings: &crypt.PkiSettings{\n\t\t\t\tAlgorithm: x509.ECDSA,\n\t\t\t\tPrivateKeyPath: \".prog/ecdsa_priv.key\",\n\t\t\t\tPublicKeyPath: \".prog/ecdsa.pub\",\n\t\t\t},\n\t\t\tsetup: func(mdb *mocks.MockDepsBundle, setupDone *bool) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\tmessageToSign: \"some other message\",\n\t\t\tbase64Signature: \"@#$^&*()_\",\n\t\t\tPEMPublicKey: \"\",\n\t\t\texpectedError: &testtools.ErrorSpec{\n\t\t\t\tType: \"base64.CorruptInputError\",\n\t\t\t\tMessage: \"illegal base64 data at input byte 0\",\n\t\t\t},\n\t\t\texpectedValidity: false,\n\t\t},\n\t\t{\n\t\t\tdesc: \"empty PEM key\",\n\t\t\tsettings: &crypt.PkiSettings{\n\t\t\t\tAlgorithm: x509.ECDSA,\n\t\t\t\tPrivateKeyPath: \".prog/ecdsa_priv.key\",\n\t\t\t\tPublicKeyPath: \".prog/ecdsa.pub\",\n\t\t\t},\n\t\t\tsetup: func(mdb *mocks.MockDepsBundle, setupDone *bool) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\tmessageToSign: \"some other message\",\n\t\t\tbase64Signature: \"abcdefgh\",\n\t\t\tPEMPublicKey: \"\",\n\t\t\texpectedError: &testtools.ErrorSpec{\n\t\t\t\tType: \"*errors.errorString\",\n\t\t\t\tMessage: \"No PEM data was found\",\n\t\t\t},\n\t\t\texpectedValidity: false,\n\t\t},\n\t\t{\n\t\t\tdesc: \"bad key data\",\n\t\t\tsettings: &crypt.PkiSettings{\n\t\t\t\tAlgorithm: x509.ECDSA,\n\t\t\t\tPrivateKeyPath: \".prog/ecdsa_priv.key\",\n\t\t\t\tPublicKeyPath: \".prog/ecdsa.pub\",\n\t\t\t},\n\t\t\tsetup: func(mdb *mocks.MockDepsBundle, setupDone *bool) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\tmessageToSign: \"some other message\",\n\t\t\tbase64Signature: \"abcdefgh\",\n\t\t\tPEMPublicKey: \"-----BEGIN INVALID DATA-----\\n\" +\n\t\t\t\t\"MTIzNDU2Nzg5MGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6\\n\" +\n\t\t\t\t\"-----END INVALID DATA-----\\n\",\n\t\t\texpectedError: &testtools.ErrorSpec{\n\t\t\t\tType: \"asn1.StructuralError\",\n\t\t\t\tMessage: \"asn1: structure \" +\n\t\t\t\t\t\"error: tags don't match (16 vs {class:0 \" +\n\t\t\t\t\t\"tag:17 \" +\n\t\t\t\t\t\"length:50 \" +\n\t\t\t\t\t\"isCompound:true}) {optional:false \" +\n\t\t\t\t\t\"explicit:false \" +\n\t\t\t\t\t\"application:false \" +\n\t\t\t\t\t\"defaultValue:<nil> \" +\n\t\t\t\t\t\"tag:<nil> \" +\n\t\t\t\t\t\"stringType:0 \" +\n\t\t\t\t\t\"timeType:0 \" +\n\t\t\t\t\t\"set:false \" +\n\t\t\t\t\t\"omitEmpty:false} publicKeyInfo @2\",\n\t\t\t},\n\t\t\texpectedValidity: false,\n\t\t},\n\t\t{\n\t\t\tdesc: \"invalid signature\",\n\t\t\tsettings: &crypt.PkiSettings{\n\t\t\t\tAlgorithm: x509.ECDSA,\n\t\t\t\tPrivateKeyPath: \".prog/ecdsa_priv.key\",\n\t\t\t\tPublicKeyPath: \".prog/ecdsa.pub\",\n\t\t\t},\n\t\t\tsetup: func(mdb *mocks.MockDepsBundle, setupDone *bool) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\tmessageToSign: \"some other message\",\n\t\t\tbase64Signature: \"abcdefgh\",\n\t\t\tPEMPublicKey: \"-----BEGIN ECDSA PUBLIC KEY-----\\n\" +\n\t\t\t\t\"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE7WzVjtn9Gk+WHr5xbv8XMvooqU25\\n\" +\n\t\t\t\t\"BhgNjZ/vHZLBdVtCOjk4KxjS1UBfQm0c3TRxWBl3hj2AmnJbCrnGofMHBQ==\\n\" +\n\t\t\t\t\"-----END ECDSA PUBLIC KEY-----\\n\",\n\t\t\texpectedError: &testtools.ErrorSpec{\n\t\t\t\tType: \"asn1.SyntaxError\",\n\t\t\t\tMessage: \"asn1: syntax error: truncated tag or length\",\n\t\t\t},\n\t\t\texpectedValidity: false,\n\t\t},\n\t\t{\n\t\t\tdesc: \"ecdsa key for rsa mode\",\n\t\t\tsettings: &crypt.PkiSettings{\n\t\t\t\tAlgorithm: x509.RSA,\n\t\t\t\tPrivateKeyPath: \".prog/ecdsa_priv.key\",\n\t\t\t\tPublicKeyPath: \".prog/ecdsa.pub\",\n\t\t\t},\n\t\t\tsetup: func(mdb *mocks.MockDepsBundle, setupDone *bool) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\tmessageToSign: \"some other message\",\n\t\t\tbase64Signature: \"N3SuIdWI7XlXDteTmcOZUd2OBacyUWY+/+A8SC4QUBz9rXnldBqXha6YyGwnTuizxuy6quQ2QDFdtW16dj7EQk3lozfngskyhc2r86q3AUbdFDvrQVphMQhzsgBhHVoMjCL/YRfvtzCTWhBxegjVMLraLDCBb8IZTIqcMYafYyeJTvAnjBuntlZ+14TDuTt14Uqz85T04CXxBEqlIXMMKpTc01ST4Jsxz5HLO+At1htXp5eHOUFtQSilm3G7iO8ynhgPcXHDWfMAWu6VySUoHWCG70pJaCq6ehF7223t0UFOCqAyDyyQyP9yeUHj8F75SPSxfJm8iKXGx2LND/qLYw==\",\n\t\t\tPEMPublicKey: \"-----BEGIN RSA PUBLIC KEY-----\\n\" +\n\t\t\t\t\"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE7WzVjtn9Gk+WHr5xbv8XMvooqU25\\n\" +\n\t\t\t\t\"BhgNjZ/vHZLBdVtCOjk4KxjS1UBfQm0c3TRxWBl3hj2AmnJbCrnGofMHBQ==\\n\" +\n\t\t\t\t\"-----END RSA PUBLIC KEY-----\\n\",\n\t\t\texpectedError: &testtools.ErrorSpec{\n\t\t\t\tType: \"*errors.errorString\",\n\t\t\t\tMessage: \"Expecting a *rsa.PublicKey, but encountered a *ecdsa.PublicKey instead\",\n\t\t\t},\n\t\t\texpectedValidity: false,\n\t\t},\n\t\t{\n\t\t\tdesc: \"rsa key for ecdsa mode\",\n\t\t\tsettings: &crypt.PkiSettings{\n\t\t\t\tAlgorithm: x509.ECDSA,\n\t\t\t\tPrivateKeyPath: \".prog/ecdsa_priv.key\",\n\t\t\t\tPublicKeyPath: \".prog/ecdsa.pub\",\n\t\t\t},\n\t\t\tsetup: func(mdb *mocks.MockDepsBundle, setupDone *bool) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\tmessageToSign: \"some other message\",\n\t\t\tbase64Signature: \"MEYCIQDPM0fc/PFauoZzpltH3RpWtlaqRnL0gFk5WFiLMrFqrwIhAIDvlBozU6Ky2UC9xOSq3YZ5iFuO356t9RnHOElaaXFJ\",\n\t\t\tPEMPublicKey: \"-----BEGIN RSA PUBLIC KEY-----\\n\" +\n\t\t\t\t\"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzCTTFKQBHfTN8jW6q8PT\\n\" +\n\t\t\t\t\"HNZKWnRPxSt9kpgWmyqFaZnEUipgoKGAxSIsVrl2PJSm5OlgkVzx+MY+LWM64VKM\\n\" +\n\t\t\t\t\"bRpUUGJR3zdMNhwZQX0hjOpLpVJvUwD78utVs8vijrU7sH48usFiaZQYjy4m4hQh\\n\" +\n\t\t\t\t\"63/x4h3KVz7YqUnlRMzYJFT43+AwYzYuEpzWRxtW7IObJPtjtmYVoqva98fF6aj5\\n\" +\n\t\t\t\t\"uHAsvaAgZGBalHXmCiPzKiGU/halzXSPvyJ2Cqz2aUqMHgwi/2Ip4z/mrfX+mUTa\\n\" +\n\t\t\t\t\"S+LyBy7GgqJ5vbkGArMagJIc0eARF60r6Uf483xh17oniABdLJy4qlLf6PcEU+ut\\n\" +\n\t\t\t\t\"EwIDAQAB\\n\" +\n\t\t\t\t\"-----END RSA PUBLIC KEY-----\\n\",\n\t\t\texpectedError: &testtools.ErrorSpec{\n\t\t\t\tType: \"*errors.errorString\",\n\t\t\t\tMessage: \"Expecting a *ecdsa.PublicKey, but encountered a *rsa.PublicKey instead\",\n\t\t\t},\n\t\t\texpectedValidity: false,\n\t\t},\n\t} {\n\t\tt.Run(fmt.Sprintf(\"Subtest: %s\", tc.desc), func(tt *testing.T) {\n\t\t\tmockDepsBundle := mocks.NewDefaultMockDeps(\"\", []string{\"progname\"}, \"/home/user\", nil)\n\t\t\treturnedNormally := false\n\t\t\tvar tooling *crypt.CryptoTooling\n\t\t\tvar actualErr error\n\t\t\tvar actualValidity bool\n\t\t\terr := mockDepsBundle.InvokeCallInMockedEnv(func() error {\n\t\t\t\tsetupComplete := false\n\t\t\t\tinnerErr := tc.setup(mockDepsBundle, &setupComplete)\n\t\t\t\tif innerErr != nil {\n\t\t\t\t\treturn innerErr\n\t\t\t\t}\n\t\t\t\tvar toolingErr error\n\t\t\t\ttooling, toolingErr = crypt.GetCryptoTooling(mockDepsBundle.Deps, tc.settings)\n\t\t\t\tif toolingErr != nil {\n\t\t\t\t\treturn toolingErr\n\t\t\t\t}\n\t\t\t\tsetupComplete = true\n\t\t\t\tactualValidity, actualErr = tooling.VerifySignedMessage(tc.messageToSign, tc.base64Signature, tc.PEMPublicKey)\n\t\t\t\treturnedNormally = true\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\ttt.Errorf(\"Unexpected error calling mockDepsBundle.InvokeCallInMockedEnv(): %s\", err.Error())\n\t\t\t}\n\t\t\tif exitStatus := mockDepsBundle.GetExitStatus(); (exitStatus != 0) || !returnedNormally {\n\t\t\t\ttt.Error(\"EncodeAndSaveKey() should not have paniced or called os.Exit.\")\n\t\t\t}\n\t\t\tif (mockDepsBundle.OutBuf.String() != \"\") || (mockDepsBundle.ErrBuf.String() != \"\") {\n\t\t\t\ttt.Errorf(\"EncodeAndSaveKey() should not have output any data. Saw stdout:\\n%s\\nstderr:\\n%s\", mockDepsBundle.OutBuf.String(), mockDepsBundle.ErrBuf.String())\n\t\t\t}\n\t\t\tif err := tc.expectedError.EnsureMatches(actualErr); err != nil {\n\t\t\t\ttt.Error(err.Error())\n\t\t\t}\n\t\t\tif tc.expectedError == nil {\n\t\t\t\tif actualValidity != tc.expectedValidity {\n\t\t\t\t\ttt.Errorf(\"Signature is %#v when %#v expected\", actualValidity, tc.expectedValidity)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif tc.expectedValidity {\n\t\t\t\t\ttt.Error(\"TEST CASE INVALID. Should not expect \\\"valid\\\".\")\n\t\t\t\t}\n\t\t\t\tif actualValidity {\n\t\t\t\t\ttt.Error(\"Error was expected. Should not report \\\"valid\\\".\")\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}", "func (m *MockMounter) IsCorruptedMnt(err error) bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"IsCorruptedMnt\", err)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func TestMockFileControl(t *testing.T) {\n\tfc := mockFileControl()\n\tif err := fc.Validate(); err != nil {\n\t\tt.Error(\"mockFileControl does not validate and will break other tests: \", err)\n\t}\n\tif fc.recordType != \"99\" {\n\t\tt.Error(\"recordType does not validate\")\n\t}\n\tif fc.CashLetterCount != 1 {\n\t\tt.Error(\"CashLetterCount does not validate\")\n\t}\n\tif fc.TotalRecordCount != 7 {\n\t\tt.Error(\"TotalRecordCount does not validate\")\n\t}\n\tif fc.TotalItemCount != 1 {\n\t\tt.Error(\"TotalItemCount does not validate\")\n\t}\n\tif fc.FileTotalAmount != 100000 {\n\t\tt.Error(\"FileTotalAmount does not validate\")\n\t}\n\tif fc.ImmediateOriginContactName != \"Contact Name\" {\n\t\tt.Error(\"ImmediateOriginContactName does not validate\")\n\t}\n\tif fc.ImmediateOriginContactPhoneNumber != \"5558675552\" {\n\t\tt.Error(\"ImmediateOriginContactPhoneNumber does not validate\")\n\t}\n\tif fc.CreditTotalIndicator != 0 {\n\t\tt.Error(\"CreditTotalIndicator does not validate\")\n\t}\n}", "func (m *MockRepo) IsValid() error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"IsValid\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestAdjustmentAmountValid(t *testing.T) {\n\tadj := mockAdjustment()\n\tadj.RemittanceAmount.Amount = \"X,\"\n\n\terr := adj.Validate()\n\n\trequire.EqualError(t, err, fieldError(\"Amount\", ErrNonAmount, adj.RemittanceAmount.Amount).Error())\n}", "func newMockKvCapabilityVerifier(t mockConstructorTestingTnewMockKvCapabilityVerifier) *mockKvCapabilityVerifier {\n\tmock := &mockKvCapabilityVerifier{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func TestSimulateValidatorsChange(t *testing.T) {\n\tnPeers := 7\n\tnVals := 4\n\tcss, genDoc, config, cleanup := randConsensusNetWithPeers(\n\t\tnVals,\n\t\tnPeers,\n\t\t\"replay_test\",\n\t\tnewMockTickerFunc(true),\n\t\tnewPersistentKVStoreWithPath)\n\tsim.Config = config\n\tsim.GenesisState, _ = sm.MakeGenesisState(genDoc)\n\tsim.CleanupFunc = cleanup\n\n\tpartSize := types.BlockPartSizeBytes\n\n\tnewRoundCh := subscribe(css[0].eventBus, types.EventQueryNewRound)\n\tproposalCh := subscribe(css[0].eventBus, types.EventQueryCompleteProposal)\n\n\tvss := make([]*validatorStub, nPeers)\n\tfor i := 0; i < nPeers; i++ {\n\t\tvss[i] = newValidatorStub(css[i].privValidator, int32(i))\n\t}\n\theight, round := css[0].Height, css[0].Round\n\n\t// start the machine\n\tstartTestRound(css[0], height, round)\n\tincrementHeight(vss...)\n\tensureNewRound(newRoundCh, height, 0)\n\tensureNewProposal(proposalCh, height, round)\n\trs := css[0].GetRoundState()\n\tsignAddVotes(css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...)\n\tensureNewRound(newRoundCh, height+1, 0)\n\n\t// HEIGHT 2\n\theight++\n\tincrementHeight(vss...)\n\tnewValidatorPubKey1, err := css[nVals].privValidator.GetPubKey()\n\trequire.NoError(t, err)\n\tvalPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1)\n\trequire.NoError(t, err)\n\tnewValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower)\n\terr = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx1, nil, mempl.TxInfo{})\n\tassert.Nil(t, err)\n\tpropBlock, _ := css[0].createProposalBlock() // changeProposer(t, cs1, vs2)\n\tpropBlockParts := propBlock.MakePartSet(partSize)\n\tblockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}\n\n\tproposal := types.NewProposal(vss[1].Height, round, -1, blockID)\n\tp := proposal.ToProto()\n\tif err := vss[1].SignProposal(config.ChainID(), p); err != nil {\n\t\tt.Fatal(\"failed to sign bad proposal\", err)\n\t}\n\tproposal.Signature = p.Signature\n\n\t// set the proposal block\n\tif err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, \"some peer\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tensureNewProposal(proposalCh, height, round)\n\trs = css[0].GetRoundState()\n\tsignAddVotes(css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...)\n\tensureNewRound(newRoundCh, height+1, 0)\n\n\t// HEIGHT 3\n\theight++\n\tincrementHeight(vss...)\n\tupdateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey()\n\trequire.NoError(t, err)\n\tupdatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1)\n\trequire.NoError(t, err)\n\tupdateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)\n\terr = assertMempool(css[0].txNotifier).CheckTx(updateValidatorTx1, nil, mempl.TxInfo{})\n\tassert.Nil(t, err)\n\tpropBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)\n\tpropBlockParts = propBlock.MakePartSet(partSize)\n\tblockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}\n\n\tproposal = types.NewProposal(vss[2].Height, round, -1, blockID)\n\tp = proposal.ToProto()\n\tif err := vss[2].SignProposal(config.ChainID(), p); err != nil {\n\t\tt.Fatal(\"failed to sign bad proposal\", err)\n\t}\n\tproposal.Signature = p.Signature\n\n\t// set the proposal block\n\tif err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, \"some peer\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tensureNewProposal(proposalCh, height, round)\n\trs = css[0].GetRoundState()\n\tsignAddVotes(css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...)\n\tensureNewRound(newRoundCh, height+1, 0)\n\n\t// HEIGHT 4\n\theight++\n\tincrementHeight(vss...)\n\tnewValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey()\n\trequire.NoError(t, err)\n\tnewVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2)\n\trequire.NoError(t, err)\n\tnewValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower)\n\terr = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx2, nil, mempl.TxInfo{})\n\tassert.Nil(t, err)\n\tnewValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey()\n\trequire.NoError(t, err)\n\tnewVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3)\n\trequire.NoError(t, err)\n\tnewValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)\n\terr = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx3, nil, mempl.TxInfo{})\n\tassert.Nil(t, err)\n\tpropBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)\n\tpropBlockParts = propBlock.MakePartSet(partSize)\n\tblockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}\n\tnewVss := make([]*validatorStub, nVals+1)\n\tcopy(newVss, vss[:nVals+1])\n\tsort.Sort(ValidatorStubsByPower(newVss))\n\n\tvalIndexFn := func(cssIdx int) int {\n\t\tfor i, vs := range newVss {\n\t\t\tvsPubKey, err := vs.GetPubKey()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tcssPubKey, err := css[cssIdx].privValidator.GetPubKey()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif vsPubKey.Equals(cssPubKey) {\n\t\t\t\treturn i\n\t\t\t}\n\t\t}\n\t\tpanic(fmt.Sprintf(\"validator css[%d] not found in newVss\", cssIdx))\n\t}\n\n\tselfIndex := valIndexFn(0)\n\n\tproposal = types.NewProposal(vss[3].Height, round, -1, blockID)\n\tp = proposal.ToProto()\n\tif err := vss[3].SignProposal(config.ChainID(), p); err != nil {\n\t\tt.Fatal(\"failed to sign bad proposal\", err)\n\t}\n\tproposal.Signature = p.Signature\n\n\t// set the proposal block\n\tif err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, \"some peer\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tensureNewProposal(proposalCh, height, round)\n\n\tremoveValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)\n\terr = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx2, nil, mempl.TxInfo{})\n\tassert.Nil(t, err)\n\n\trs = css[0].GetRoundState()\n\tfor i := 0; i < nVals+1; i++ {\n\t\tif i == selfIndex {\n\t\t\tcontinue\n\t\t}\n\t\tsignAddVotes(css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i])\n\t}\n\n\tensureNewRound(newRoundCh, height+1, 0)\n\n\t// HEIGHT 5\n\theight++\n\tincrementHeight(vss...)\n\t// Reflect the changes to vss[nVals] at height 3 and resort newVss.\n\tnewVssIdx := valIndexFn(nVals)\n\tnewVss[newVssIdx].VotingPower = 25\n\tsort.Sort(ValidatorStubsByPower(newVss))\n\tselfIndex = valIndexFn(0)\n\tensureNewProposal(proposalCh, height, round)\n\trs = css[0].GetRoundState()\n\tfor i := 0; i < nVals+1; i++ {\n\t\tif i == selfIndex {\n\t\t\tcontinue\n\t\t}\n\t\tsignAddVotes(css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i])\n\t}\n\tensureNewRound(newRoundCh, height+1, 0)\n\n\t// HEIGHT 6\n\theight++\n\tincrementHeight(vss...)\n\tremoveValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0)\n\terr = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx3, nil, mempl.TxInfo{})\n\tassert.Nil(t, err)\n\tpropBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)\n\tpropBlockParts = propBlock.MakePartSet(partSize)\n\tblockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}\n\tnewVss = make([]*validatorStub, nVals+3)\n\tcopy(newVss, vss[:nVals+3])\n\tsort.Sort(ValidatorStubsByPower(newVss))\n\n\tselfIndex = valIndexFn(0)\n\tproposal = types.NewProposal(vss[1].Height, round, -1, blockID)\n\tp = proposal.ToProto()\n\tif err := vss[1].SignProposal(config.ChainID(), p); err != nil {\n\t\tt.Fatal(\"failed to sign bad proposal\", err)\n\t}\n\tproposal.Signature = p.Signature\n\n\t// set the proposal block\n\tif err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, \"some peer\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tensureNewProposal(proposalCh, height, round)\n\trs = css[0].GetRoundState()\n\tfor i := 0; i < nVals+3; i++ {\n\t\tif i == selfIndex {\n\t\t\tcontinue\n\t\t}\n\t\tsignAddVotes(css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i])\n\t}\n\tensureNewRound(newRoundCh, height+1, 0)\n\n\tsim.Chain = make([]*types.Block, 0)\n\tsim.Commits = make([]*types.Commit, 0)\n\tfor i := 1; i <= numBlocks; i++ {\n\t\tsim.Chain = append(sim.Chain, css[0].blockStore.LoadBlock(int64(i)))\n\t\tsim.Commits = append(sim.Commits, css[0].blockStore.LoadBlockCommit(int64(i)))\n\t}\n}", "func (m *MockHasher) IsValid(arg0, arg1 string) bool {\n\tret := m.ctrl.Call(m, \"IsValid\", arg0, arg1)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (m *MockCacheOptions) Validate() error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Validate\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestSignContractFailure(t *testing.T) {\n\tsignatureHelper(t, true)\n}", "func TestMockProxyApp(t *testing.T) {\n\tsim.CleanupFunc() // clean the test env created in TestSimulateValidatorsChange\n\tlogger := log.TestingLogger()\n\tvalidTxs, invalidTxs := 0, 0\n\ttxIndex := 0\n\n\tassert.NotPanics(t, func() {\n\t\tabciResWithEmptyDeliverTx := new(tmstate.ABCIResponses)\n\t\tabciResWithEmptyDeliverTx.DeliverTxs = make([]*abci.ResponseDeliverTx, 0)\n\t\tabciResWithEmptyDeliverTx.DeliverTxs = append(abciResWithEmptyDeliverTx.DeliverTxs, &abci.ResponseDeliverTx{})\n\n\t\t// called when saveABCIResponses:\n\t\tbytes, err := proto.Marshal(abciResWithEmptyDeliverTx)\n\t\trequire.NoError(t, err)\n\t\tloadedAbciRes := new(tmstate.ABCIResponses)\n\n\t\t// this also happens sm.LoadABCIResponses\n\t\terr = proto.Unmarshal(bytes, loadedAbciRes)\n\t\trequire.NoError(t, err)\n\n\t\tmock := newMockProxyApp([]byte(\"mock_hash\"), loadedAbciRes)\n\n\t\tabciRes := new(tmstate.ABCIResponses)\n\t\tabciRes.DeliverTxs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTxs))\n\t\t// Execute transactions and get hash.\n\t\tproxyCb := func(req *abci.Request, res *abci.Response) {\n\t\t\tif r, ok := res.Value.(*abci.Response_DeliverTx); ok {\n\t\t\t\t// TODO: make use of res.Log\n\t\t\t\t// TODO: make use of this info\n\t\t\t\t// Blocks may include invalid txs.\n\t\t\t\ttxRes := r.DeliverTx\n\t\t\t\tif txRes.Code == abci.CodeTypeOK {\n\t\t\t\t\tvalidTxs++\n\t\t\t\t} else {\n\t\t\t\t\tlogger.Debug(\"Invalid tx\", \"code\", txRes.Code, \"log\", txRes.Log)\n\t\t\t\t\tinvalidTxs++\n\t\t\t\t}\n\t\t\t\tabciRes.DeliverTxs[txIndex] = txRes\n\t\t\t\ttxIndex++\n\t\t\t}\n\t\t}\n\t\tmock.SetResponseCallback(proxyCb)\n\n\t\tsomeTx := []byte(\"tx\")\n\t\tmock.DeliverTxAsync(abci.RequestDeliverTx{Tx: someTx})\n\t})\n\tassert.True(t, validTxs == 1)\n\tassert.True(t, invalidTxs == 0)\n}", "func (m *DelegationTokenFactoryMock) MinimockVerifyInspect() {\n\tfor _, e := range m.VerifyMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\tm.t.Errorf(\"Expected call to DelegationTokenFactoryMock.Verify with params: %#v\", *e.params)\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.VerifyMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterVerifyCounter) < 1 {\n\t\tif m.VerifyMock.defaultExpectation.params == nil {\n\t\t\tm.t.Error(\"Expected call to DelegationTokenFactoryMock.Verify\")\n\t\t} else {\n\t\t\tm.t.Errorf(\"Expected call to DelegationTokenFactoryMock.Verify with params: %#v\", *m.VerifyMock.defaultExpectation.params)\n\t\t}\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcVerify != nil && mm_atomic.LoadUint64(&m.afterVerifyCounter) < 1 {\n\t\tm.t.Error(\"Expected call to DelegationTokenFactoryMock.Verify\")\n\t}\n}", "func (mysuit *MySuite) TestSmcManager_ForbidInternalContract(c *check.C) {\n\tutest.Init(orgID)\n\tcontractOwner := utest.DeployContract(c, contractName, orgID, contractMethods, contractInterfaces)\n\ttest := NewTestObject(contractOwner)\n\n\tcontract := std.Contract{\n\t\tAddress: test.obj.sdk.Helper().BlockChainHelper().CalcContractAddress(\"a\", \"1.0\", orgID),\n\t\tCodeHash: []byte{},\n\t\tEffectHeight: 0,\n\t\tLoseHeight: 0,\n\t\tMethods: nil,\n\t\tInterfaces: nil,\n\t}\n\ttest.obj.sdk.Helper().StateHelper().Set(\"/contract/\"+contract.Address, &contract)\n\n\tcontractHasForbid := std.Contract{\n\t\tAddress: \"testForbid\",\n\t\tCodeHash: []byte{},\n\t\tEffectHeight: 0,\n\t\tLoseHeight: 5,\n\t\tMethods: nil,\n\t\tInterfaces: nil,\n\t}\n\ttest.obj.sdk.Helper().StateHelper().Set(\"/contract/\"+contractHasForbid.Address, &contractHasForbid)\n\n\ttestCases := []struct {\n\t\terr types.Error\n\t\tcontractAddr types.Address\n\t\teffectHeight int64\n\t}{\n\t\t{types.Error{ErrorCode: types.ErrNoAuthorization}, contract.Address, test.obj.sdk.Block().Height() + 3},\n\t\t{types.Error{ErrorCode: types.ErrInvalidAddress}, \"\", test.obj.sdk.Block().Height() + 4},\n\t\t{types.Error{ErrorCode: types.ErrInvalidAddress}, contract.Address + \"test\", test.obj.sdk.Block().Height() + 5},\n\t\t{types.Error{ErrorCode: types.ErrInvalidAddress}, contractHasForbid.Address, test.obj.sdk.Block().Height() + 5},\n\t\t{types.Error{ErrorCode: types.ErrInvalidParameter}, contractHasForbid.Address, test.obj.sdk.Block().Height() - 1},\n\t}\n\n\tfor i, v := range testCases {\n\t\terr := test.run().setSender(contractOwner).ForbidInternalContract(v.contractAddr, v.effectHeight)\n\t\tfmt.Println(i)\n\t\tutest.AssertError(err, v.err.ErrorCode)\n\t\tif err.ErrorCode == types.CodeOK {\n\t\t\tcon := *test.obj.sdk.Helper().StateHelper().Get(\"/contract/\"+contract.Address, new(std.Contract)).(*std.Contract)\n\t\t\tutest.Assert(con.LoseHeight == v.effectHeight)\n\t\t}\n\t}\n}", "func TestDelegatorProxyValidatorShares7Steps(t *testing.T) {\n\n}", "func TestAdjustmentAmountRequired(t *testing.T) {\n\tadj := mockAdjustment()\n\tadj.RemittanceAmount.Amount = \"\"\n\n\terr := adj.Validate()\n\n\trequire.EqualError(t, err, fieldError(\"Amount\", ErrFieldRequired).Error())\n}", "func TestValidBlockchain(t *testing.T) {\n\tblockchain := Blockchain{}\n\tblockchain.AddBlock(\"hello\")\n\tblockchain.AddBlock(\"data\")\n\n\tassertEq(t, blockchain.IsValid(), true)\n}", "func (e TestSpecificationValidationError) Cause() error { return e.cause }", "func (c Chkr) Expect(v validator, args ...interface{}) {\n\tif c.runTest(v, args...) {\n\t\tc.Fail()\n\t}\n}", "func (this *PoolTestSuite) TestValidationFailureOnReturnFreesCapacity() {\n\tthis.factory.setValid(false) // Validate will always fail\n\tthis.factory.enableValidation = true\n\tthis.pool.Config.MaxTotal = 2\n\tthis.pool.Config.MaxWaitMillis = int64(1500)\n\tthis.pool.Config.TestOnReturn = true\n\tthis.pool.Config.TestOnBorrow = false\n\t// Borrow an instance and hold if for 5 seconds\n\tch1 := waitTestGoroutine(this.pool, 5000)\n\t// Borrow another instance and return it after 500 ms (validation will fail)\n\tch2 := waitTestGoroutine(this.pool, 500)\n\tsleep(50)\n\t// Try to borrow an object\n\tobj := this.NoErrorWithResult(this.pool.BorrowObject())\n\tthis.NoError(this.pool.ReturnObject(obj))\n\t<-ch1\n\tclose(ch1)\n\t<-ch2\n\tclose(ch2)\n}", "func (s *KeeperTestSuite) TestHandleNewValidator() {\n\tctx := s.ctx\n\n\taddrDels := simtestutil.AddTestAddrsIncremental(s.bankKeeper, s.stakingKeeper, ctx, 1, s.stakingKeeper.TokensFromConsensusPower(ctx, 0))\n\tvalAddrs := simtestutil.ConvertAddrsToValAddrs(addrDels)\n\tpks := simtestutil.CreateTestPubKeys(1)\n\taddr, val := valAddrs[0], pks[0]\n\ttstaking := stakingtestutil.NewHelper(s.T(), ctx, s.stakingKeeper)\n\tctx = ctx.WithBlockHeight(s.slashingKeeper.SignedBlocksWindow(ctx) + 1)\n\n\t// Validator created\n\tamt := tstaking.CreateValidatorWithValPower(addr, val, 100, true)\n\n\tstaking.EndBlocker(ctx, s.stakingKeeper)\n\ts.Require().Equal(\n\t\ts.bankKeeper.GetAllBalances(ctx, sdk.AccAddress(addr)),\n\t\tsdk.NewCoins(sdk.NewCoin(s.stakingKeeper.GetParams(ctx).BondDenom, InitTokens.Sub(amt))),\n\t)\n\ts.Require().Equal(amt, s.stakingKeeper.Validator(ctx, addr).GetBondedTokens())\n\n\t// Now a validator, for two blocks\n\ts.slashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, true)\n\tctx = ctx.WithBlockHeight(s.slashingKeeper.SignedBlocksWindow(ctx) + 2)\n\ts.slashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, false)\n\n\tinfo, found := s.slashingKeeper.GetValidatorSigningInfo(ctx, sdk.ConsAddress(val.Address()))\n\ts.Require().True(found)\n\ts.Require().Equal(s.slashingKeeper.SignedBlocksWindow(ctx)+1, info.StartHeight)\n\ts.Require().Equal(int64(2), info.IndexOffset)\n\ts.Require().Equal(int64(1), info.MissedBlocksCounter)\n\ts.Require().Equal(time.Unix(0, 0).UTC(), info.JailedUntil)\n\n\t// validator should be bonded still, should not have been jailed or slashed\n\tvalidator, _ := s.stakingKeeper.GetValidatorByConsAddr(ctx, sdk.GetConsAddress(val))\n\ts.Require().Equal(stakingtypes.Bonded, validator.GetStatus())\n\tbondPool := s.stakingKeeper.GetBondedPool(ctx)\n\texpTokens := s.stakingKeeper.TokensFromConsensusPower(ctx, 100)\n\t// adding genesis validator tokens\n\texpTokens = expTokens.Add(s.stakingKeeper.TokensFromConsensusPower(ctx, 1))\n\ts.Require().True(expTokens.Equal(s.bankKeeper.GetBalance(ctx, bondPool.GetAddress(), s.stakingKeeper.BondDenom(ctx)).Amount))\n}", "func (m *SignatureKeyHolderMock) ValidateCallCounters() {\n\n\tif !m.AsByteStringFinished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.AsByteString\")\n\t}\n\n\tif !m.AsBytesFinished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.AsBytes\")\n\t}\n\n\tif !m.EqualsFinished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.Equals\")\n\t}\n\n\tif !m.FixedByteSizeFinished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.FixedByteSize\")\n\t}\n\n\tif !m.FoldToUint64Finished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.FoldToUint64\")\n\t}\n\n\tif !m.GetSignMethodFinished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.GetSignMethod\")\n\t}\n\n\tif !m.GetSignatureKeyMethodFinished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.GetSignatureKeyMethod\")\n\t}\n\n\tif !m.GetSignatureKeyTypeFinished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.GetSignatureKeyType\")\n\t}\n\n\tif !m.ReadFinished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.Read\")\n\t}\n\n\tif !m.WriteToFinished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.WriteTo\")\n\t}\n\n}", "func TestMockUserPayeeEndorsement(t *testing.T) {\n\tupe := mockUserPayeeEndorsement()\n\tif err := upe.Validate(); err != nil {\n\t\tt.Error(\"mockUserGeneral does not validate and will break other tests: \", err)\n\t}\n\tif upe.recordType != \"68\" {\n\t\tt.Error(\"recordType does not validate\")\n\t}\n\tif upe.OwnerIdentifierIndicator != 3 {\n\t\tt.Error(\"OwnerIdentifierIndicator does not validate\")\n\t}\n\tif upe.OwnerIdentifier != \"230918276\" {\n\t\tt.Error(\"OwnerIdentifier does not validate\")\n\t}\n\tif upe.OwnerIdentifierModifier != \"ZZ1\" {\n\t\tt.Error(\"OwnerIdentifierModifier does not validate\")\n\t}\n\tif upe.UserRecordFormatType != \"001\" {\n\t\tt.Error(\"UserRecordFormatType does not validate\")\n\t}\n\tif upe.FormatTypeVersionLevel != \"1\" {\n\t\tt.Error(\"FormatTypeVersionLevel does not validate\")\n\t}\n\tif upe.LengthUserData != \"0000290\" {\n\t\tt.Error(\"LengthUserData does not validate\")\n\t}\n\tif upe.PayeeName != \"Payee Name\" {\n\t\tt.Error(\"PayeeName does not validate\")\n\t}\n\n\t_ = additionalUPEFields(upe, t)\n}", "func mockAlwaysRun() bool { return true }", "func TestCurrencyInstructedAmountValid(t *testing.T) {\n\tcia := mockCurrencyInstructedAmount()\n\tcia.Amount = \"1-0\"\n\n\terr := cia.Validate()\n\n\trequire.EqualError(t, err, fieldError(\"Amount\", ErrNonAmount, cia.Amount).Error())\n}", "func (m *ParcelMock) ValidateCallCounters() {\n\n\tif !m.AllowedSenderObjectAndRoleFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.AllowedSenderObjectAndRole\")\n\t}\n\n\tif !m.ContextFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.Context\")\n\t}\n\n\tif !m.DefaultRoleFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.DefaultRole\")\n\t}\n\n\tif !m.DefaultTargetFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.DefaultTarget\")\n\t}\n\n\tif !m.DelegationTokenFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.DelegationToken\")\n\t}\n\n\tif !m.GetCallerFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.GetCaller\")\n\t}\n\n\tif !m.GetSenderFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.GetSender\")\n\t}\n\n\tif !m.GetSignFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.GetSign\")\n\t}\n\n\tif !m.MessageFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.Message\")\n\t}\n\n\tif !m.PulseFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.Pulse\")\n\t}\n\n\tif !m.SetSenderFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.SetSender\")\n\t}\n\n\tif !m.TypeFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.Type\")\n\t}\n\n}", "func newVersionCheckerMock(version string, tags []string) *VersionChecker {\n\n\tfixedAppVersion := fixVersion(version)\n\n\treturn &VersionChecker{\n\t\tfixedAppVersion: fixedAppVersion,\n\t\tversionSource: &versionCheckerMock{\n\t\t\ttags: tags,\n\t\t\tfixVersionStrFunc: fixVersion,\n\t\t\ttagFilterFunc: versionFilterFunc(fixedAppVersion),\n\t\t},\n\t}\n}", "func TestDesiredState(t *testing.T) {\n\t// I am always using require, so that we stop immediately upon an error\n\t// A long test is hard to debug when it fails in the middle and keeps going\n\ttest, err := tester.NewTest(t)\n\tdefer test.Close()\n\trequire.NoError(t, err, \"test setup failed\")\n\n\ttest.PrepareTestBundle()\n\ttest.Chdir(test.TestDir)\n\n\t// Try to import an installation with an invalid schema\n\t_, _, err = test.RunPorter(\"installation\", \"apply\", filepath.Join(test.RepoRoot, \"tests/testdata/installations/invalid-schema.yaml\"))\n\trequire.Error(t, err, \"apply should have failed because the schema of the imported document is incorrect\")\n\trequire.Contains(t, err.Error(), \"invalid installation\")\n\n\t// Try to import a credential set with an invalid schema\n\t_, _, err = test.RunPorter(\"credentials\", \"apply\", filepath.Join(test.RepoRoot, \"tests/testdata/creds/invalid-schema.yaml\"))\n\trequire.Error(t, err, \"apply should have failed because the schema of the imported document is incorrect\")\n\trequire.Contains(t, err.Error(), \"invalid credential set\")\n\n\t// Try to import a parameter set with an invalid schema\n\t_, _, err = test.RunPorter(\"parameters\", \"apply\", filepath.Join(test.RepoRoot, \"tests/testdata/params/invalid-schema.yaml\"))\n\trequire.Error(t, err, \"apply should have failed because the schema of the imported document is incorrect\")\n\trequire.Contains(t, err.Error(), \"invalid parameter set\")\n\n\t// Import some creds and params for mybuns\n\ttest.RequirePorter(\"parameters\", \"apply\", filepath.Join(test.RepoRoot, \"tests/testdata/params/mybuns.yaml\"), \"--namespace=\")\n\ttest.RequirePorter(\"credentials\", \"apply\", filepath.Join(test.RepoRoot, \"tests/testdata/creds/mybuns.yaml\"), \"--namespace=\")\n\ttest.RequirePorter(\"credentials\", \"apply\", filepath.Join(test.RepoRoot, \"tests/testdata/creds/alt-mybuns.yaml\"), \"--namespace=\")\n\n\tmgx.Must(shx.Copy(filepath.Join(test.RepoRoot, \"tests/testdata/installations/mybuns.yaml\"), \"mybuns.yaml\"))\n\n\t// Import an installation with uninstalled=true, should do nothing\n\ttest.EditYaml(\"mybuns.yaml\", func(yq *yaml.Editor) error {\n\t\treturn yq.SetValue(\"uninstalled\", \"true\")\n\t})\n\t_, stderr, err := test.RunPorter(\"installation\", \"apply\", \"mybuns.yaml\", \"--namespace\", \"operator\")\n\trequire.NoError(t, err)\n\trequire.Contains(t, stderr, \"Ignoring because installation.uninstalled is true but the installation doesn't exist yet\")\n\n\t// Now set uninstalled = false so that it's installed for the first time\n\ttest.EditYaml(\"mybuns.yaml\", func(yq *yaml.Editor) error {\n\t\treturn yq.SetValue(\"uninstalled\", \"false\")\n\t})\n\n\t// Import an installation, since the file is missing a namespace, it should use the --namespace flag value\n\t// This also tests out that --allow-docker-host-access is being defaulted properly from the Porter config file\n\toutput, stderr, err := test.RunPorter(\"installation\", \"apply\", \"mybuns.yaml\", \"--namespace\", \"operator\")\n\trequire.NoError(t, err)\n\trequire.Contains(t, stderr, \"The installation is out-of-sync, running the install action\")\n\trequire.Contains(t, stderr, \"Triggering because the installation has not completed successfully yet\")\n\tinstallation := test.RequireInstallationExists(\"operator\", \"mybuns\")\n\trequire.Equal(t, \"succeeded\", installation.Status.ResultStatus)\n\n\t// Repeat the apply command, this should result in an upgrade because mybuns has different parameters for install and upgrade\n\t// so porter will detect different params and run again.\n\t_, stderr = test.RequirePorter(\"installation\", \"apply\", \"mybuns.yaml\", \"--namespace\", \"operator\")\n\ttests.RequireOutputContains(t, stderr, \"The installation is out-of-sync, running the upgrade action...\")\n\n\t// Repeat the apply command, there should be no changes detected now that it's two upgrades in a row.\n\t// Using dry run because we just want to know if it _would_ be re-executed.\n\t_, output, err = test.RunPorter(\"installation\", \"apply\", \"mybuns.yaml\", \"--namespace\", \"operator\", \"--dry-run\")\n\trequire.NoError(t, err)\n\ttests.RequireOutputContains(t, output, \"The installation is already up-to-date\")\n\n\t// Repeat the apply command with --force, even though there are no changes, this should trigger an upgrade.\n\t_, output, err = test.RunPorter(\"installation\", \"apply\", \"mybuns.yaml\", \"--namespace\", \"operator\", \"--dry-run\", \"--force\")\n\trequire.NoError(t, err)\n\ttests.RequireOutputContains(t, output, \"The installation is up-to-date but will be re-applied because --force was specified\")\n\n\t// Edit the installation file with a minor change that shouldn't trigger reconciliation\n\ttest.EditYaml(\"mybuns.yaml\", func(yq *yaml.Editor) error {\n\t\treturn yq.SetValue(\"labels.thing\", \"2\")\n\t})\n\t_, output, err = test.RunPorter(\"installation\", \"apply\", \"mybuns.yaml\", \"--namespace\", \"operator\")\n\trequire.NoError(t, err)\n\ttests.RequireOutputContains(t, output, \"The installation is already up-to-date\")\n\n\t// Change a bundle parameter and trigger an upgrade\n\ttest.EditYaml(\"mybuns.yaml\", func(yq *yaml.Editor) error {\n\t\treturn yq.SetValue(\"parameters.log_level\", \"3\")\n\t})\n\t_, output, err = test.RunPorter(\"installation\", \"apply\", \"mybuns.yaml\", \"--namespace\", \"operator\")\n\trequire.NoError(t, err)\n\ttests.RequireOutputContains(t, output, \"The installation is out-of-sync, running the upgrade action\")\n\n\tdisplayInstallation, err := test.ShowInstallation(\"operator\", \"mybuns\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, float64(3), displayInstallation.Parameters[\"log_level\"])\n\n\t// Switch credentials and trigger an upgrade\n\ttest.EditYaml(\"mybuns.yaml\", func(yq *yaml.Editor) error {\n\t\treturn yq.SetValue(\"credentialSets[0]\", \"alt-mybuns\")\n\t})\n\t_, output, err = test.RunPorter(\"installation\", \"apply\", \"mybuns.yaml\", \"--namespace\", \"operator\")\n\trequire.NoError(t, err)\n\ttests.RequireOutputContains(t, output, \"The installation is out-of-sync, running the upgrade action\")\n\n\t// Uninstall by setting uninstalled: true\n\ttest.EditYaml(\"mybuns.yaml\", func(yq *yaml.Editor) error {\n\t\treturn yq.SetValue(\"uninstalled\", \"true\")\n\t})\n\t_, output, err = test.RunPorter(\"installation\", \"apply\", \"mybuns.yaml\", \"--namespace\", \"operator\")\n\trequire.NoError(t, err)\n\ttests.RequireOutputContains(t, output, \"The installation is out-of-sync, running the uninstall action\")\n}", "func TestTestComand_Validate(t *testing.T) {\n\tdur, _ := time.ParseDuration(\"2s\")\n\tctx, _ := context.WithTimeout(context.Background(), dur)\n\n\tcom := base.NewTestCommand(\"test\")\n\n\tprops := com.Properties()\n\tif validProp, err := props.Get(base_test.PROPERTY_ID_OPERATIONVALID); err != nil {\n\t\tt.Error(\"TestCommand Properties() did not provide a ValidProperty\")\n\t} else {\n\t\tvalidProp.Set(false)\n\t\tres := com.Validate(props)\n\t\tselect {\n\t\tcase <-res.Finished():\n\t\t\tif res.Success() {\n\t\t\t\tt.Error(\"TestCommand thinks it is valid when it shouldn't be\")\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\tt.Error(\"TestCommand Validate timed out: \", ctx.Err().Error())\n\t\t}\n\n\t\tvalidProp.Set(true)\n\t\tres = com.Validate(props)\n\t\tselect {\n\t\tcase <-res.Finished():\n\t\t\tif !res.Success() {\n\t\t\t\tt.Error(\"TestCommand thinks it is invald when it shouldn't be\")\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\tt.Error(\"TestCommand Validate timed out: \", ctx.Err().Error())\n\t\t}\n\t}\n}", "func TestEnforcement(t *testing.T) {\r\n\tdefer tests.Recover(t)\r\n\r\n\tt.Run(\"freeze\", freezeOrder)\r\n\tt.Run(\"authority\", freezeAuthorityOrder)\r\n\tt.Run(\"thaw\", thawOrder)\r\n\tt.Run(\"confiscate\", confiscateOrder)\r\n\tt.Run(\"reconcile\", reconcileOrder)\r\n}", "func (e MaxofMessageProtocolTestsValidationError) Cause() error { return e.cause }", "func (e MatcherValidationError) Cause() error { return e.cause }", "func TestValidateSetDesiredSizeMessage(t *testing.T) {\n\t// SetDesiredSizeMessage with desiredSize => should pass validation\n\tdesiredSize := 1\n\tm := SetDesiredSizeMessage{DesiredSize: &desiredSize}\n\terr := m.Validate()\n\trequire.Nil(t, err, \"expected validation to succeed\")\n\n\t// SetDesiredSizeMessage missing desiredSize => should fail validation\n\tm = SetDesiredSizeMessage{}\n\terr = m.Validate()\n\trequire.NotNil(t, err, \"expected validation to fail due to missing desiredSize\")\n\trequire.Equal(t, fmt.Errorf(\"setDesiredSize message did not specify a desiredSize\"), err, \"unexpected validation error\")\n\n\t// SetDesiredSizeMessage with negative desiredSize => should fail validation\n\tdesiredSize = -1\n\tm = SetDesiredSizeMessage{DesiredSize: &desiredSize}\n\terr = m.Validate()\n\trequire.NotNil(t, err, \"expected validation to fail due to negative desiredSize\")\n\trequire.Equal(t, fmt.Errorf(\"setDesiredSize message: desiredSize must be non-negative\"), err, \"unexpected validation error\")\n}", "func (m *mockAPI) AllExpectationsMet() (err error) {\n\tm.Lock()\n\tdefer m.Unlock()\n\tif len(m.expectations) > 0 {\n\t\terr = errors.New(\"there are remaining expectations\")\n\t}\n\treturn\n}", "func (m *MockUnsafePdfServiceServer) mustEmbedUnimplementedPdfServiceServer() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"mustEmbedUnimplementedPdfServiceServer\")\n}", "func (m *MockUnsafeLinkServiceServer) mustEmbedUnimplementedLinkServiceServer() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"mustEmbedUnimplementedLinkServiceServer\")\n}", "func (m *MockAssets) ValidStateName(path string) bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ValidStateName\", path)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (m *ParcelMock) MinimockFinish() {\n\n\tif !m.AllowedSenderObjectAndRoleFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.AllowedSenderObjectAndRole\")\n\t}\n\n\tif !m.ContextFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.Context\")\n\t}\n\n\tif !m.DefaultRoleFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.DefaultRole\")\n\t}\n\n\tif !m.DefaultTargetFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.DefaultTarget\")\n\t}\n\n\tif !m.DelegationTokenFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.DelegationToken\")\n\t}\n\n\tif !m.GetCallerFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.GetCaller\")\n\t}\n\n\tif !m.GetSenderFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.GetSender\")\n\t}\n\n\tif !m.GetSignFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.GetSign\")\n\t}\n\n\tif !m.MessageFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.Message\")\n\t}\n\n\tif !m.PulseFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.Pulse\")\n\t}\n\n\tif !m.SetSenderFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.SetSender\")\n\t}\n\n\tif !m.TypeFinished() {\n\t\tm.t.Fatal(\"Expected call to ParcelMock.Type\")\n\t}\n\n}", "func (m *SignatureKeyHolderMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && m.AsByteStringFinished()\n\t\tok = ok && m.AsBytesFinished()\n\t\tok = ok && m.EqualsFinished()\n\t\tok = ok && m.FixedByteSizeFinished()\n\t\tok = ok && m.FoldToUint64Finished()\n\t\tok = ok && m.GetSignMethodFinished()\n\t\tok = ok && m.GetSignatureKeyMethodFinished()\n\t\tok = ok && m.GetSignatureKeyTypeFinished()\n\t\tok = ok && m.ReadFinished()\n\t\tok = ok && m.WriteToFinished()\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif !m.AsByteStringFinished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.AsByteString\")\n\t\t\t}\n\n\t\t\tif !m.AsBytesFinished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.AsBytes\")\n\t\t\t}\n\n\t\t\tif !m.EqualsFinished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.Equals\")\n\t\t\t}\n\n\t\t\tif !m.FixedByteSizeFinished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.FixedByteSize\")\n\t\t\t}\n\n\t\t\tif !m.FoldToUint64Finished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.FoldToUint64\")\n\t\t\t}\n\n\t\t\tif !m.GetSignMethodFinished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.GetSignMethod\")\n\t\t\t}\n\n\t\t\tif !m.GetSignatureKeyMethodFinished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.GetSignatureKeyMethod\")\n\t\t\t}\n\n\t\t\tif !m.GetSignatureKeyTypeFinished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.GetSignatureKeyType\")\n\t\t\t}\n\n\t\t\tif !m.ReadFinished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.Read\")\n\t\t\t}\n\n\t\t\tif !m.WriteToFinished() {\n\t\t\t\tm.t.Error(\"Expected call to SignatureKeyHolderMock.WriteTo\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func validateControllerExpectations(t *testing.T, testName string, ctrlr *ClusterController, cluster *clusteroperator.Cluster, expectedAdds, expectedDeletes int) {\n\texpectations, ok, err := ctrlr.expectations.GetExpectations(getKey(cluster, t))\n\tswitch {\n\tcase err != nil:\n\t\tt.Errorf(\"%s: error getting expecations: %v\", testName, cluster.GetName(), err)\n\tcase !ok:\n\t\tif expectedAdds != 0 || expectedDeletes != 0 {\n\t\t\tt.Errorf(\"%s: no expectations found: expectedAdds %v, expectedDeletes %v\", testName, expectedAdds, expectedDeletes)\n\t\t}\n\tdefault:\n\t\tactualsAdds, actualDeletes := expectations.GetExpectations()\n\t\tif e, a := int64(expectedAdds), actualsAdds; e != a {\n\t\t\tt.Errorf(\"%s: unexpected number of adds in expectations: expected %v, got %v\", testName, e, a)\n\t\t}\n\t\tif e, a := int64(expectedDeletes), actualDeletes; e != a {\n\t\t\tt.Errorf(\"%s: unexpected number of deletes in expectations: expected %v, got %v\", testName, e, a)\n\t\t}\n\t}\n}", "func TestMissedBlockAndRankStreakCounter(t *testing.T) {\n\tapp := simapp.Setup(false)\n\tctx := app.BaseApp.NewContext(false, tmproto.Header{})\n\n\taddrDels := simapp.AddTestAddrsIncremental(app, ctx, 1, sdk.TokensFromConsensusPower(200, sdk.DefaultPowerReduction))\n\tvalAddrs := simapp.ConvertAddrsToValAddrs(addrDels)\n\tpks := simapp.CreateTestPubKeys(1)\n\taddr, val := valAddrs[0], pks[0]\n\tvalAddr := sdk.ValAddress(addr)\n\ttstaking := teststaking.NewHelper(t, ctx, app.CustomStakingKeeper, app.CustomGovKeeper)\n\tctx = ctx.WithBlockHeight(1)\n\n\t// Validator created\n\ttstaking.CreateValidator(addr, val, true)\n\n\tstaking.EndBlocker(ctx, app.CustomStakingKeeper)\n\n\t// Now a validator, for two blocks\n\tapp.CustomSlashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, true)\n\tctx = ctx.WithBlockHeight(2)\n\tapp.CustomSlashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, false)\n\n\tv := tstaking.CheckValidator(valAddr, stakingtypes.Active)\n\trequire.Equal(t, v.Rank, int64(1))\n\trequire.Equal(t, v.Streak, int64(1))\n\n\tinfo, found := app.CustomSlashingKeeper.GetValidatorSigningInfo(ctx, sdk.ConsAddress(val.Address()))\n\trequire.True(t, found)\n\trequire.Equal(t, int64(1), info.MischanceConfidence)\n\trequire.Equal(t, int64(0), info.Mischance)\n\trequire.Equal(t, int64(1), info.MissedBlocksCounter)\n\trequire.Equal(t, int64(1), info.ProducedBlocksCounter)\n\n\theight := ctx.BlockHeight() + 1\n\tfor i := int64(0); i < 10; i++ {\n\t\tctx = ctx.WithBlockHeight(height + i)\n\t\tapp.CustomSlashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, false)\n\t}\n\tctx = ctx.WithBlockHeight(height + 10)\n\tapp.CustomSlashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, true)\n\tinfo, found = app.CustomSlashingKeeper.GetValidatorSigningInfo(ctx, sdk.ConsAddress(val.Address()))\n\trequire.True(t, found)\n\trequire.Equal(t, int64(0), info.MischanceConfidence)\n\trequire.Equal(t, int64(0), info.Mischance)\n\trequire.Equal(t, int64(11), info.MissedBlocksCounter)\n\trequire.Equal(t, int64(2), info.ProducedBlocksCounter)\n\n\tv = tstaking.CheckValidator(valAddr, stakingtypes.Active)\n\trequire.Equal(t, v.Rank, int64(1))\n\trequire.Equal(t, v.Streak, int64(1))\n\n\t// sign 100 blocks successfully\n\theight = ctx.BlockHeight() + 1\n\tfor i := int64(0); i < 100; i++ {\n\t\tctx = ctx.WithBlockHeight(height + i)\n\t\tapp.CustomSlashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, true)\n\t}\n\n\tv = tstaking.CheckValidator(valAddr, stakingtypes.Active)\n\trequire.Equal(t, v.Rank, int64(101))\n\trequire.Equal(t, v.Streak, int64(101))\n\n\t// miss one block\n\tctx = ctx.WithBlockHeight(height + 100)\n\tapp.CustomSlashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, false)\n\tv = tstaking.CheckValidator(valAddr, stakingtypes.Active)\n\trequire.Equal(t, v.Rank, int64(101))\n\trequire.Equal(t, v.Streak, int64(101))\n\n\tapp.CustomSlashingKeeper.Inactivate(ctx, sdk.ConsAddress(val.Address()))\n\tv = tstaking.CheckValidator(valAddr, stakingtypes.Inactive)\n\trequire.Equal(t, v.Rank, int64(50))\n\trequire.Equal(t, v.Streak, int64(0))\n\n\tapp.CustomSlashingKeeper.Activate(ctx, valAddr)\n\t// miss 5 blocks\n\theight = ctx.BlockHeight() + 1\n\tfor i := int64(0); i < 5; i++ {\n\t\tctx = ctx.WithBlockHeight(height + i)\n\t\tapp.CustomSlashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, false)\n\t}\n\tv = tstaking.CheckValidator(valAddr, stakingtypes.Active)\n\trequire.Equal(t, v.Rank, int64(50))\n\trequire.Equal(t, v.Streak, int64(0))\n}", "func (m *StateSwitcherMock) MinimockFinish() {\n\n\tif m.GetStateFunc != nil && atomic.LoadUint64(&m.GetStateCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to StateSwitcherMock.GetState\")\n\t}\n\n\tif m.SetPulsarFunc != nil && atomic.LoadUint64(&m.SetPulsarCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to StateSwitcherMock.SetPulsar\")\n\t}\n\n\tif m.SwitchToStateFunc != nil && atomic.LoadUint64(&m.SwitchToStateCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to StateSwitcherMock.SwitchToState\")\n\t}\n\n\tif m.setStateFunc != nil && atomic.LoadUint64(&m.setStateCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to StateSwitcherMock.setState\")\n\t}\n\n}", "func (m *IndexBucketModifierMock) CheckMocksCalled() {\n\tm.Finish()\n}", "func TestHandleNewValidator(t *testing.T) {\n\tapp := simapp.Setup(false)\n\tctx := app.BaseApp.NewContext(false, tmproto.Header{})\n\n\taddrDels := simapp.AddTestAddrsIncremental(app, ctx, 1, sdk.TokensFromConsensusPower(200, sdk.DefaultPowerReduction))\n\tvalAddrs := simapp.ConvertAddrsToValAddrs(addrDels)\n\tpks := simapp.CreateTestPubKeys(1)\n\taddr, val := valAddrs[0], pks[0]\n\ttstaking := teststaking.NewHelper(t, ctx, app.CustomStakingKeeper, app.CustomGovKeeper)\n\tctx = ctx.WithBlockHeight(1)\n\n\t// Validator created\n\ttstaking.CreateValidator(addr, val, true)\n\n\tstaking.EndBlocker(ctx, app.CustomStakingKeeper)\n\n\t// Now a validator, for two blocks\n\tapp.CustomSlashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, true)\n\tctx = ctx.WithBlockHeight(2)\n\tapp.CustomSlashingKeeper.HandleValidatorSignature(ctx, val.Address(), 100, false)\n\n\tinfo, found := app.CustomSlashingKeeper.GetValidatorSigningInfo(ctx, sdk.ConsAddress(val.Address()))\n\trequire.True(t, found)\n\trequire.Equal(t, int64(1), info.StartHeight)\n\trequire.Equal(t, int64(1), info.MischanceConfidence)\n\trequire.Equal(t, int64(0), info.Mischance)\n\trequire.Equal(t, int64(1), info.MissedBlocksCounter)\n\trequire.Equal(t, int64(1), info.ProducedBlocksCounter)\n\trequire.Equal(t, time.Unix(0, 0).UTC(), info.InactiveUntil)\n\n\t// validator should be active still, should not have been inactivated\n\tvalidator, _ := app.CustomStakingKeeper.GetValidatorByConsAddr(ctx, sdk.GetConsAddress(val))\n\trequire.Equal(t, stakingtypes.Active, validator.GetStatus())\n}", "func TestValid(t *testing.T) {\n\tinput := \"abcdefg\"\n\toutput := Invalid(input)\n\n\tif output != false {\n\t\tt.Error(\"Valid test failed\")\n\t}\n}", "func TestCommitConflictRepeat4A(t *testing.T) {\n}", "func TestWithContractAuthErrors(t *testing.T) {\n\tvar expSTType errors.StackTrace\n\n\targs := []string{mock.Anything}\n\n\ttests := []struct {\n\t\tcRef string\n\t\tc rbac.ContractFunc\n\t\texpSC int32\n\t\texpC int32\n\t\tmsg string\n\t\tcidRoles string\n\t\tcidFound bool\n\t\tcidErr error\n\t}{\n\t\t{\n\t\t\tcRef: contractCreateTransfer,\n\t\t\tc: mockContract,\n\t\t\texpSC: http.StatusUnauthorized,\n\t\t\texpC: rbac.CodeErrAuthentication,\n\t\t\tmsg: \"when an error is returned from the CID\",\n\t\t\tcidRoles: mock.Anything,\n\t\t\tcidFound: false,\n\t\t\tcidErr: errors.New(\"some err from cid\"),\n\t\t},\n\t\t{\n\t\t\tcRef: contractCreateTransfer,\n\t\t\tc: mockContract,\n\t\t\texpSC: http.StatusForbidden,\n\t\t\texpC: rbac.CodeErrRoles,\n\t\t\tmsg: \"when the roleAttr is not found in the identity\",\n\t\t\tcidRoles: mock.Anything,\n\t\t\tcidFound: false,\n\t\t\tcidErr: nil,\n\t\t},\n\t\t{\n\t\t\tcRef: contractCreateTransfer,\n\t\t\tc: mockContract,\n\t\t\texpSC: http.StatusForbidden,\n\t\t\texpC: rbac.CodeErrContract,\n\t\t\tmsg: \"when the role is not found in the permissions map\",\n\t\t\tcidRoles: \"anUnknownRole\",\n\t\t\tcidFound: true,\n\t\t\tcidErr: nil,\n\t\t},\n\t\t{\n\t\t\tcRef: contractCreateTransfer,\n\t\t\tc: mockContract,\n\t\t\texpSC: http.StatusForbidden,\n\t\t\texpC: rbac.CodeErrContract,\n\t\t\tmsg: \"when contract invocation is not allowed\",\n\t\t\tcidRoles: \"user\",\n\t\t\tcidFound: true,\n\t\t\tcidErr: nil,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tstub := initEmptyStub()\n\t\tcid := new(mockCID)\n\t\tcid.On(\"GetAttributeValue\", mock.Anything).Return(tt.cidRoles, tt.cidFound, tt.cidErr)\n\t\tcid.On(\"GetID\", mock.Anything).Return(mock.Anything)\n\n\t\tappAuth, err := rbac.New(stub, cid, getRolePerms(), \"roles\")\n\t\t// If the New constructor didn't fail\n\t\tif err == nil {\n\t\t\t_, err = appAuth.WithContractAuth(tt.cRef, args, tt.c)\n\t\t}\n\n\t\tassert.Implements(t, (*error)(nil), err)\n\t\tassert.Implements(t, (*rbac.AuthErrorInterface)(nil), err)\n\t\tassert.IsType(t, (string)(\"\"), err.Error())\n\n\t\tif assert.Error(t, err) {\n\t\t\tt.Logf(\"Should return an error with code %v and HTTP status code %v %v\\nmsg: %v\", tt.expC, tt.expSC, tt.msg, err)\n\n\t\t\tif e, ok := err.(rbac.AuthErrorInterface); ok {\n\t\t\t\tassert.Equal(t, tt.expC, e.Code())\n\t\t\t\tassert.Equal(t, tt.expSC, e.StatusCode())\n\t\t\t\tassert.IsType(t, expSTType, e.StackTrace())\n\t\t\t}\n\t\t}\n\t}\n}", "func TestValidatePending(t *testing.T) {\n\tsender, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n\tencodedSenderPublicKey, _ := publickey.Encode(&sender.PublicKey)\n\tsenderPKH := hashing.New(encodedSenderPublicKey)\n\trecipient, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n\tencodedRecipientPublicKey, _ := publickey.Encode(&recipient.PublicKey)\n\trecipientPKH := hashing.New(encodedRecipientPublicKey)\n\n\tzeroValueContract, _ := contracts.New(1, sender, recipientPKH, 0, 1)\n\tzeroValueContract.Sign(sender)\n\n\tnilSenderContract, _ := contracts.New(1, nil, senderPKH, 500, 1)\n\n\tsenderRecipContract, _ := contracts.New(1, sender, senderPKH, 500, 1)\n\tsenderRecipContract.Sign(sender)\n\n\tinvalidSignatureContract, _ := contracts.New(1, sender, recipientPKH, 500, 1)\n\tinvalidSignatureContract.Sign(recipient)\n\n\tinsufficentFundsContract, _ := contracts.New(1, sender, recipientPKH, 2000000, 1)\n\tinsufficentFundsContract.Sign(sender)\n\n\tinvalidNonceContract, _ := contracts.New(1, sender, recipientPKH, 20, 0)\n\tinvalidNonceContract.Sign(sender)\n\n\tinvalidNonceContract2, _ := contracts.New(1, sender, recipientPKH, 20, 2)\n\tinvalidNonceContract2.Sign(sender)\n\n\t// Start: pBalance = 100, pNonce = 0\n\tvalidFirstContract, _ := contracts.New(1, sender, recipientPKH, 50, 1)\n\tvalidFirstContract.Sign(sender)\n\n\t// pBalance = 50, pNonce = 1\n\tkeyNotInTable, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n\tencodedSenderPublicKey, err := publickey.Encode(&keyNotInTable.PublicKey)\n\tif err != nil {\n\t\tt.Errorf(\"failure to encode Sender Public Key: %v\", err)\n\t}\n\tkeyNotInTablePKH := hashing.New(encodedSenderPublicKey)\n\n\tInvalidBalanceContract, _ := contracts.New(1, sender, keyNotInTablePKH, 51, 2)\n\tInvalidBalanceContract.Sign(sender)\n\n\tInvalidNonceContract, _ := contracts.New(1, sender, keyNotInTablePKH, 20, 3)\n\tInvalidNonceContract.Sign(sender)\n\n\tValidSecondContract, _ := contracts.New(1, sender, keyNotInTablePKH, 50, 2)\n\tValidSecondContract.Sign(sender)\n\n\ttests := []struct {\n\t\tname string\n\t\tc *contracts.Contract\n\t\tpBalance uint64\n\t\tpNonce uint64\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"Zero value\",\n\t\t\tc: zeroValueContract,\n\t\t\tpBalance: 1000,\n\t\t\tpNonce: 0,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Nil sender\",\n\t\t\tc: nilSenderContract,\n\t\t\tpBalance: 1000,\n\t\t\tpNonce: 0,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Sender == Recipient\",\n\t\t\tc: senderRecipContract,\n\t\t\tpBalance: 1000,\n\t\t\tpNonce: 0,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Invalid signature\",\n\t\t\tc: invalidSignatureContract,\n\t\t\tpBalance: 1000,\n\t\t\tpNonce: 0,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Insufficient funds\",\n\t\t\tc: insufficentFundsContract,\n\t\t\tpBalance: 1000,\n\t\t\tpNonce: 0,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Invalid nonce\",\n\t\t\tc: invalidNonceContract,\n\t\t\tpBalance: 1000,\n\t\t\tpNonce: 0,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Invalid nonce 2\",\n\t\t\tc: invalidNonceContract2,\n\t\t\tpBalance: 1000,\n\t\t\tpNonce: 0,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Totally valid\",\n\t\t\tc: validFirstContract,\n\t\t\tpBalance: 100,\n\t\t\tpNonce: 0,\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Invalid balance\",\n\t\t\tc: InvalidBalanceContract,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Invalid state nonce\",\n\t\t\tc: InvalidNonceContract,\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Totally valid 2\",\n\t\t\tc: ValidSecondContract,\n\t\t\twantErr: false,\n\t\t},\n\t}\n\n\tvar updatedBal uint64\n\tvar updatedNonce uint64\n\tfor i, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif i > 7 {\n\t\t\t\ttt.pBalance = updatedBal\n\t\t\t\ttt.pNonce = updatedNonce\n\t\t\t}\n\n\t\t\tvar err error\n\t\t\terr = ValidatePending(tt.c, &tt.pBalance, &tt.pNonce)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"ValidatePending() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tupdatedBal = tt.pBalance\n\t\t\tupdatedNonce = tt.pNonce\n\t\t})\n\t}\n}", "func (m *ModifierMock) ValidateCallCounters() {\n\n\tif !m.SetFinished() {\n\t\tm.t.Fatal(\"Expected call to ModifierMock.Set\")\n\t}\n\n}", "func (sv *StubbedValidator) ExpectSuccessValidateRestart() {\n\tsv.expectRevalidate = true\n\tsv.StubSuccessValidateRestart()\n}", "func mockIllegalBlockTx(publicKey []byte) *types.Transaction {\n\treturn &types.Transaction{\n\t\tTxType: types.IllegalBlockEvidence,\n\t\tPayload: &payload.DPOSIllegalBlocks{\n\t\t\tEvidence: payload.BlockEvidence{\n\t\t\t\tSigners: [][]byte{publicKey},\n\t\t\t},\n\t\t\tCompareEvidence: payload.BlockEvidence{\n\t\t\t\tSigners: [][]byte{publicKey},\n\t\t\t},\n\t\t},\n\t}\n}", "func (m *MockUnsafeTodoServiceServer) mustEmbedUnimplementedTodoServiceServer() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"mustEmbedUnimplementedTodoServiceServer\")\n}", "func (m *MockMounter) canSafelySkipMountPointCheck() bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"canSafelySkipMountPointCheck\")\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func TestMockCheckDetailAddendumC(t *testing.T) {\n\tcdAddendumC := mockCheckDetailAddendumC()\n\tif err := cdAddendumC.Validate(); err != nil {\n\t\tt.Error(\"mockCheckDetailAddendumC does not validate and will break other tests: \", err)\n\t}\n\tif cdAddendumC.recordType != \"28\" {\n\t\tt.Error(\"recordType does not validate\")\n\t}\n\tif cdAddendumC.RecordNumber != 1 {\n\t\tt.Error(\"RecordNumber does not validate\")\n\t}\n\tif cdAddendumC.EndorsingBankRoutingNumber != \"121042882\" {\n\t\tt.Error(\"EndorsingBankRoutingNumber does not validate\")\n\t}\n\tif cdAddendumC.EndorsingBankItemSequenceNumber != \"1 \" {\n\t\tt.Error(\"EndorsingBankItemSequenceNumber does not validate\")\n\t}\n\tif cdAddendumC.TruncationIndicator != \"Y\" {\n\t\tt.Error(\"TruncationIndicator does not validate\")\n\t}\n\tif cdAddendumC.ReturnReason != \"A\" {\n\t\tt.Error(\"ReturnReason does not validate\")\n\t}\n\tif cdAddendumC.EndorsingBankConversionIndicator != \"1\" {\n\t\tt.Error(\"EndorsingBankConversionIndicator does not validate\")\n\t}\n\tif cdAddendumC.EndorsingBankCorrectionIndicator != 0 {\n\t\tt.Error(\"EndorsingBankCorrectionIndicator does not validate\")\n\t}\n\tif cdAddendumC.UserField != \"\" {\n\t\tt.Error(\"UserField does not validate\")\n\t}\n\tif cdAddendumC.EndorsingBankIdentifier != 0 {\n\t\tt.Error(\"EndorsingBankIdentifier does not validate\")\n\t}\n}", "func (m *SignatureKeyHolderMock) MinimockFinish() {\n\n\tif !m.AsByteStringFinished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.AsByteString\")\n\t}\n\n\tif !m.AsBytesFinished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.AsBytes\")\n\t}\n\n\tif !m.EqualsFinished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.Equals\")\n\t}\n\n\tif !m.FixedByteSizeFinished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.FixedByteSize\")\n\t}\n\n\tif !m.FoldToUint64Finished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.FoldToUint64\")\n\t}\n\n\tif !m.GetSignMethodFinished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.GetSignMethod\")\n\t}\n\n\tif !m.GetSignatureKeyMethodFinished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.GetSignatureKeyMethod\")\n\t}\n\n\tif !m.GetSignatureKeyTypeFinished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.GetSignatureKeyType\")\n\t}\n\n\tif !m.ReadFinished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.Read\")\n\t}\n\n\tif !m.WriteToFinished() {\n\t\tm.t.Fatal(\"Expected call to SignatureKeyHolderMock.WriteTo\")\n\t}\n\n}", "func ExpectationsWereMet(mock sqlmock.Sqlmock) error {\n\tif !IsEnd2EndTest() {\n\t\treturn mock.ExpectationsWereMet()\n\t}\n\treturn nil\n}", "func (m *MockCertificateManager) CanHandle(arg0 peer.ID) bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CanHandle\", arg0)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func testMockEntryDetail(t testing.TB) {\n\tentry := mockEntryDetail()\n\tif err := entry.Validate(); err != nil {\n\t\tt.Error(\"mockEntryDetail does not validate and will break other tests\")\n\t}\n\tif entry.TransactionCode != CheckingCredit {\n\t\tt.Error(\"TransactionCode dependent default value has changed\")\n\t}\n\tif entry.DFIAccountNumber != \"123456789\" {\n\t\tt.Error(\"DFIAccountNumber dependent default value has changed\")\n\t}\n\tif entry.Amount != 100000000 {\n\t\tt.Error(\"Amount dependent default value has changed\")\n\t}\n\tif entry.IndividualName != \"Wade Arnold\" {\n\t\tt.Error(\"IndividualName dependent default value has changed\")\n\t}\n\tif entry.TraceNumber != \"121042880000001\" {\n\t\tt.Errorf(\"TraceNumber dependent default value has changed %v\", entry.TraceNumber)\n\t}\n}", "func (m *HostNetworkMock) MinimockFinish() {\n\n\tif m.BuildResponseFunc != nil && atomic.LoadUint64(&m.BuildResponseCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to HostNetworkMock.BuildResponse\")\n\t}\n\n\tif m.GetNodeIDFunc != nil && atomic.LoadUint64(&m.GetNodeIDCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to HostNetworkMock.GetNodeID\")\n\t}\n\n\tif m.NewRequestBuilderFunc != nil && atomic.LoadUint64(&m.NewRequestBuilderCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to HostNetworkMock.NewRequestBuilder\")\n\t}\n\n\tif m.PublicAddressFunc != nil && atomic.LoadUint64(&m.PublicAddressCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to HostNetworkMock.PublicAddress\")\n\t}\n\n\tif m.RegisterRequestHandlerFunc != nil && atomic.LoadUint64(&m.RegisterRequestHandlerCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to HostNetworkMock.RegisterRequestHandler\")\n\t}\n\n\tif m.SendRequestFunc != nil && atomic.LoadUint64(&m.SendRequestCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to HostNetworkMock.SendRequest\")\n\t}\n\n\tif m.StartFunc != nil && atomic.LoadUint64(&m.StartCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to HostNetworkMock.Start\")\n\t}\n\n\tif m.StopFunc != nil && atomic.LoadUint64(&m.StopCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to HostNetworkMock.Stop\")\n\t}\n\n}", "func TestCommitConflictRace4A(t *testing.T) {\n}", "func (m *ModifierMock) CheckMocksCalled() {\n\tm.Finish()\n}", "func testImmutableOrderField(t *testing.T, fldPath *field.Path, setter func(*cmacme.Order, testValue)) {\n\tt.Run(\"should reject updates to \"+fldPath.String(), func(t *testing.T) {\n\t\texpectedErrs := []*field.Error{\n\t\t\tfield.Forbidden(fldPath, \"field is immutable once set\"),\n\t\t}\n\t\tvar expectedWarnings []string\n\t\told := &cmacme.Order{}\n\t\tnew := &cmacme.Order{}\n\t\tsetter(old, testValueOptionOne)\n\t\tsetter(new, testValueOptionTwo)\n\t\terrs, warnings := ValidateOrderUpdate(someAdmissionRequest, old, new)\n\t\tif len(errs) != len(expectedErrs) {\n\t\t\tt.Errorf(\"Expected errors %v but got %v\", expectedErrs, errs)\n\t\t\treturn\n\t\t}\n\t\tfor i, e := range errs {\n\t\t\texpectedErr := expectedErrs[i]\n\t\t\tif !reflect.DeepEqual(e, expectedErr) {\n\t\t\t\tt.Errorf(\"Expected error %v but got %v\", expectedErr, e)\n\t\t\t}\n\t\t}\n\t\tif !reflect.DeepEqual(warnings, expectedWarnings) {\n\t\t\tt.Errorf(\"Expected warnings %+#v but got %+#v\", expectedWarnings, warnings)\n\t\t}\n\t})\n\tt.Run(\"should allow updates to \"+fldPath.String()+\" if not already set\", func(t *testing.T) {\n\t\texpectedErrs := []*field.Error{}\n\t\tvar expectedWarnings []string\n\t\told := &cmacme.Order{}\n\t\tnew := &cmacme.Order{}\n\t\tsetter(old, testValueNone)\n\t\tsetter(new, testValueOptionOne)\n\t\terrs, warnings := ValidateOrderUpdate(someAdmissionRequest, old, new)\n\t\tif len(errs) != len(expectedErrs) {\n\t\t\tt.Errorf(\"Expected errors %v but got %v\", expectedErrs, errs)\n\t\t\treturn\n\t\t}\n\t\tfor i, e := range errs {\n\t\t\texpectedErr := expectedErrs[i]\n\t\t\tif !reflect.DeepEqual(e, expectedErr) {\n\t\t\t\tt.Errorf(\"Expected error %v but got %v\", expectedErr, e)\n\t\t\t}\n\t\t}\n\t\tif !reflect.DeepEqual(warnings, expectedWarnings) {\n\t\t\tt.Errorf(\"Expected warnings %+#v but got %+#v\", expectedWarnings, warnings)\n\t\t}\n\t})\n}", "func MockInvalidTx() *Transaction {\n\ttx := NewTransaction()\n\n\treturn tx\n}", "func (m *mSignatureKeyHolderMockAsByteString) Expect() *mSignatureKeyHolderMockAsByteString {\n\tm.mock.AsByteStringFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &SignatureKeyHolderMockAsByteStringExpectation{}\n\t}\n\n\treturn m\n}", "func TestAdjustmentReasonCodeValid(t *testing.T) {\n\tadj := mockAdjustment()\n\tadj.AdjustmentReasonCode = \"ZZ\"\n\n\terr := adj.Validate()\n\n\trequire.EqualError(t, err, fieldError(\"AdjustmentReasonCode\", ErrAdjustmentReasonCode, adj.AdjustmentReasonCode).Error())\n}", "func (m *TesterMock) CheckMocksCalled() {\n\tm.Finish()\n}", "func TestValidateHostFirmwareSettings(t *testing.T) {\n\n\ttestCases := []struct {\n\t\tScenario string\n\t\tSpecSettings metal3api.HostFirmwareSettingsSpec\n\t\tExpectedError string\n\t}{\n\t\t{\n\t\t\tScenario: \"valid spec changes with schema\",\n\t\t\tSpecSettings: metal3api.HostFirmwareSettingsSpec{\n\t\t\t\tSettings: metal3api.DesiredSettingsMap{\n\t\t\t\t\t\"CustomPostMessage\": intstr.FromString(\"All tests passed\"),\n\t\t\t\t\t\"ProcVirtualization\": intstr.FromString(\"Disabled\"),\n\t\t\t\t\t\"NetworkBootRetryCount\": intstr.FromInt(20),\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpectedError: \"\",\n\t\t},\n\t\t{\n\t\t\tScenario: \"invalid string\",\n\t\t\tSpecSettings: metal3api.HostFirmwareSettingsSpec{\n\t\t\t\tSettings: metal3api.DesiredSettingsMap{\n\t\t\t\t\t\"CustomPostMessage\": intstr.FromString(\"A really long POST message\"),\n\t\t\t\t\t\"ProcVirtualization\": intstr.FromString(\"Disabled\"),\n\t\t\t\t\t\"NetworkBootRetryCount\": intstr.FromInt(20),\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpectedError: \"Setting CustomPostMessage is invalid, string A really long POST message length is above maximum length 20\",\n\t\t},\n\t\t{\n\t\t\tScenario: \"invalid int\",\n\t\t\tSpecSettings: metal3api.HostFirmwareSettingsSpec{\n\t\t\t\tSettings: metal3api.DesiredSettingsMap{\n\t\t\t\t\t\"CustomPostMessage\": intstr.FromString(\"All tests passed\"),\n\t\t\t\t\t\"ProcVirtualization\": intstr.FromString(\"Disabled\"),\n\t\t\t\t\t\"NetworkBootRetryCount\": intstr.FromInt(2000),\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpectedError: \"Setting NetworkBootRetryCount is invalid, integer 2000 is above maximum value 20\",\n\t\t},\n\t\t{\n\t\t\tScenario: \"invalid enum\",\n\t\t\tSpecSettings: metal3api.HostFirmwareSettingsSpec{\n\t\t\t\tSettings: metal3api.DesiredSettingsMap{\n\t\t\t\t\t\"CustomPostMessage\": intstr.FromString(\"All tests passed\"),\n\t\t\t\t\t\"ProcVirtualization\": intstr.FromString(\"Not enabled\"),\n\t\t\t\t\t\"NetworkBootRetryCount\": intstr.FromString(\"20\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpectedError: \"Setting ProcVirtualization is invalid, unknown enumeration value - Not enabled\",\n\t\t},\n\t\t{\n\t\t\tScenario: \"invalid name\",\n\t\t\tSpecSettings: metal3api.HostFirmwareSettingsSpec{\n\t\t\t\tSettings: metal3api.DesiredSettingsMap{\n\t\t\t\t\t\"SomeNewSetting\": intstr.FromString(\"foo\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpectedError: \"Setting SomeNewSetting is not in the Status field\",\n\t\t},\n\t\t{\n\t\t\tScenario: \"invalid password in spec\",\n\t\t\tSpecSettings: metal3api.HostFirmwareSettingsSpec{\n\t\t\t\tSettings: metal3api.DesiredSettingsMap{\n\t\t\t\t\t\"CustomPostMessage\": intstr.FromString(\"All tests passed\"),\n\t\t\t\t\t\"ProcVirtualization\": intstr.FromString(\"Disabled\"),\n\t\t\t\t\t\"NetworkBootRetryCount\": intstr.FromString(\"20\"),\n\t\t\t\t\t\"SysPassword\": intstr.FromString(\"Pa%$word\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpectedError: \"Cannot set Password field\",\n\t\t},\n\t\t{\n\t\t\tScenario: \"string instead of int\",\n\t\t\tSpecSettings: metal3api.HostFirmwareSettingsSpec{\n\t\t\t\tSettings: metal3api.DesiredSettingsMap{\n\t\t\t\t\t\"CustomPostMessage\": intstr.FromString(\"All tests passed\"),\n\t\t\t\t\t\"ProcVirtualization\": intstr.FromString(\"Disabled\"),\n\t\t\t\t\t\"NetworkBootRetryCount\": intstr.FromString(\"foo\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpectedError: \"Setting NetworkBootRetryCount is invalid, String foo entered while integer expected\",\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.Scenario, func(t *testing.T) {\n\n\t\t\thfs := getHFS(tc.SpecSettings)\n\t\t\tr := getTestHFSReconciler(hfs)\n\t\t\tinfo := &rInfo{\n\t\t\t\tlog: logf.Log.WithName(\"controllers\").WithName(\"HostFirmwareSettings\"),\n\t\t\t\thfs: hfs,\n\t\t\t}\n\n\t\t\terrors := r.validateHostFirmwareSettings(info, &info.hfs.Status, getExpectedSchema())\n\t\t\tif len(errors) == 0 {\n\t\t\t\tassert.Equal(t, tc.ExpectedError, \"\")\n\t\t\t} else {\n\t\t\t\tfor _, error := range errors {\n\t\t\t\t\tassert.Equal(t, tc.ExpectedError, error.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}", "func (m *HeavySyncMock) ValidateCallCounters() {\n\n\tif !m.ResetFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.Reset\")\n\t}\n\n\tif !m.StartFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.Start\")\n\t}\n\n\tif !m.StopFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.Stop\")\n\t}\n\n\tif !m.StoreBlobsFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.StoreBlobs\")\n\t}\n\n\tif !m.StoreDropFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.StoreDrop\")\n\t}\n\n\tif !m.StoreIndicesFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.StoreIndices\")\n\t}\n\n\tif !m.StoreRecordsFinished() {\n\t\tm.t.Fatal(\"Expected call to HeavySyncMock.StoreRecords\")\n\t}\n\n}", "func (m *mCryptographyServiceMockVerify) Expect(p crypto.PublicKey, p1 insolar.Signature, p2 []byte) *mCryptographyServiceMockVerify {\n\tm.mock.VerifyFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &CryptographyServiceMockVerifyExpectation{}\n\t}\n\tm.mainExpectation.input = &CryptographyServiceMockVerifyInput{p, p1, p2}\n\treturn m\n}", "func (suite *SubscriptionsTestSuite) assertMockExpectations() {\n\tsuite.oauthServiceMock.AssertExpectations(suite.T())\n\tsuite.accountsServiceMock.AssertExpectations(suite.T())\n\tsuite.resetMocks()\n}", "func TestUnitAcceptableVersion(t *testing.T) {\n\tinvalidVersions := []string{\n\t\t// ascii gibberish\n\t\t\"foobar\",\n\t\t\"foobar.0\",\n\t\t\"foobar.9\",\n\t\t\"0.foobar\",\n\t\t\"9.foobar\",\n\t\t\"foobar.0.0\",\n\t\t\"foobar.9.9\",\n\t\t\"0.foobar.0\",\n\t\t\"9.foobar.9\",\n\t\t\"0.0.foobar\",\n\t\t\"9.9.foobar\",\n\t\t// utf-8 gibberish\n\t\t\"世界\",\n\t\t\"世界.0\",\n\t\t\"世界.9\",\n\t\t\"0.世界\",\n\t\t\"9.世界\",\n\t\t\"世界.0.0\",\n\t\t\"世界.9.9\",\n\t\t\"0.世界.0\",\n\t\t\"9.世界.9\",\n\t\t\"0.0.世界\",\n\t\t\"9.9.世界\",\n\t\t// missing numbers\n\t\t\".\",\n\t\t\"..\",\n\t\t\"...\",\n\t\t\"0.\",\n\t\t\".1\",\n\t\t\"2..\",\n\t\t\".3.\",\n\t\t\"..4\",\n\t\t\"5.6.\",\n\t\t\".7.8\",\n\t\t\".9.0.\",\n\t}\n\tfor _, v := range invalidVersions {\n\t\terr := acceptableVersion(v)\n\t\tif _, ok := err.(invalidVersionError); err == nil || !ok {\n\t\t\tt.Errorf(\"acceptableVersion returned %q for version %q, but expected invalidVersionError\", err, v)\n\t\t}\n\t}\n\tinsufficientVersions := []string{\n\t\t// random small versions\n\t\t\"0\",\n\t\t\"00\",\n\t\t\"0000000000\",\n\t\t\"0.0\",\n\t\t\"0000000000.0\",\n\t\t\"0.0000000000\",\n\t\t\"0.0.0.0.0.0.0.0\",\n\t\t/*\n\t\t\t\"0.0.9\",\n\t\t\t\"0.0.999\",\n\t\t\t\"0.0.99999999999\",\n\t\t\t\"0.1.2\",\n\t\t\t\"0.1.2.3.4.5.6.7.8.9\",\n\t\t\t// pre-hardfork versions\n\t\t\t\"0.3.3\",\n\t\t\t\"0.3.9.9.9.9.9.9.9.9.9.9\",\n\t\t\t\"0.3.9999999999\",\n\t\t\t\"1.3.0\",\n\t\t*/\n\t}\n\tfor _, v := range insufficientVersions {\n\t\terr := acceptableVersion(v)\n\t\tif _, ok := err.(insufficientVersionError); err == nil || !ok {\n\t\t\tt.Errorf(\"acceptableVersion returned %q for version %q, but expected insufficientVersionError\", err, v)\n\t\t}\n\t}\n\tvalidVersions := []string{\n\t\tminimumAcceptablePeerVersion,\n\t\t\"1.3.7\",\n\t\t\"1.4.0\",\n\t\t\"1.6.0\",\n\t\t\"1.6.1\",\n\t\t\"1.9\",\n\t\t\"1.999\",\n\t\t\"1.9999999999\",\n\t\t\"2\",\n\t\t\"2.0\",\n\t\t\"2.0.0\",\n\t\t\"9\",\n\t\t\"9.0\",\n\t\t\"9.0.0\",\n\t\t\"9.9.9\",\n\t}\n\tfor _, v := range validVersions {\n\t\terr := acceptableVersion(v)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"acceptableVersion returned %q for version %q, but expected nil\", err, v)\n\t\t}\n\t}\n}", "func (m *ConsensusNetworkMock) MinimockFinish() {\n\n\tif m.GetNodeIDFunc != nil && atomic.LoadUint64(&m.GetNodeIDCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to ConsensusNetworkMock.GetNodeID\")\n\t}\n\n\tif m.NewRequestBuilderFunc != nil && atomic.LoadUint64(&m.NewRequestBuilderCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to ConsensusNetworkMock.NewRequestBuilder\")\n\t}\n\n\tif m.PublicAddressFunc != nil && atomic.LoadUint64(&m.PublicAddressCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to ConsensusNetworkMock.PublicAddress\")\n\t}\n\n\tif m.RegisterRequestHandlerFunc != nil && atomic.LoadUint64(&m.RegisterRequestHandlerCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to ConsensusNetworkMock.RegisterRequestHandler\")\n\t}\n\n\tif m.SendRequestFunc != nil && atomic.LoadUint64(&m.SendRequestCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to ConsensusNetworkMock.SendRequest\")\n\t}\n\n\tif m.StartFunc != nil && atomic.LoadUint64(&m.StartCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to ConsensusNetworkMock.Start\")\n\t}\n\n\tif m.StopFunc != nil && atomic.LoadUint64(&m.StopCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to ConsensusNetworkMock.Stop\")\n\t}\n\n}", "func TestValidate2(t *testing.T) {\n\tcfg := &Config{Version: 0}\n\n\tif err := Validate(cfg); err == nil {\n\t\tt.Errorf(\"No Error Raised For Invalid Version Param: %f\", cfg.Version)\n\t}\n}", "func (m *mSignatureKeyHolderMockFixedByteSize) Expect() *mSignatureKeyHolderMockFixedByteSize {\n\tm.mock.FixedByteSizeFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &SignatureKeyHolderMockFixedByteSizeExpectation{}\n\t}\n\n\treturn m\n}", "func TestVerifyForged(t *testing.T) {\n\toptions := iniVerifyOptions(t)\n\toptions.CurrentTime = time.Date(2017, 02, 03, 11, 00, 00, 0, gmt)\n\n\tp := loadProxy(\"test-samples/BadForgedProxy.pem\", t)\n\tif e := p.Verify(options); e == nil {\n\t\tt.Error(\"Must have failed\")\n\t} else {\n\t\tt.Log(e)\n\t}\n}", "func (m *MockAPIConfigFromFlags) Validate() error {\n\tret := m.ctrl.Call(m, \"Validate\")\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestStepTypeIsMemoizedValid(t *testing.T) {\n\t// A step with both Delay and TrackArrival set is invalid.\n\tinvalid := &Step{Delay: &Delay{}, TrackArrival: &TrackArrival{}}\n\tif got, want := invalid.StepType(), stepInvalid; got != want {\n\t\tt.Errorf(\"invalid.StepType()=%v, want %v\", got, want)\n\t}\n\n\t// A delay only is valid.\n\tdelay := &Step{Delay: &Delay{}}\n\tif got, want := delay.StepType(), StepDelay; got != want {\n\t\tt.Errorf(\"delay.StepType()=%v, want %v\", got, want)\n\t}\n\t// If we now add TrackArrival, the type shouldn't change, so it should stay as Delay.\n\tdelay.TrackArrival = &TrackArrival{}\n\tif got, want := delay.StepType(), StepDelay; got != want {\n\t\tt.Errorf(\"delay.StepType()=%v, want %v\", got, want)\n\t}\n}", "func (mmIsRelayForbidden *mPacketParserMockIsRelayForbidden) Expect() *mPacketParserMockIsRelayForbidden {\n\tif mmIsRelayForbidden.mock.funcIsRelayForbidden != nil {\n\t\tmmIsRelayForbidden.mock.t.Fatalf(\"PacketParserMock.IsRelayForbidden mock is already set by Set\")\n\t}\n\n\tif mmIsRelayForbidden.defaultExpectation == nil {\n\t\tmmIsRelayForbidden.defaultExpectation = &PacketParserMockIsRelayForbiddenExpectation{}\n\t}\n\n\treturn mmIsRelayForbidden\n}", "func (m *StateSwitcherMock) ValidateCallCounters() {\n\n\tif m.GetStateFunc != nil && atomic.LoadUint64(&m.GetStateCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to StateSwitcherMock.GetState\")\n\t}\n\n\tif m.SetPulsarFunc != nil && atomic.LoadUint64(&m.SetPulsarCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to StateSwitcherMock.SetPulsar\")\n\t}\n\n\tif m.SwitchToStateFunc != nil && atomic.LoadUint64(&m.SwitchToStateCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to StateSwitcherMock.SwitchToState\")\n\t}\n\n\tif m.setStateFunc != nil && atomic.LoadUint64(&m.setStateCounter) == 0 {\n\t\tm.t.Fatal(\"Expected call to StateSwitcherMock.setState\")\n\t}\n\n}", "func TestValidAuth(t *testing.T) {\n\tt.Parallel()\n\ta, err := getAuth()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif !ValidAuth(a) {\n\t\tt.Error(ErrInvalidAuth)\n\t}\n}", "func Test_QualityOperation_IsValidFalse(t *testing.T) {\n\timg := MakeMockMutableImage()\n\top := &QualityOperation{\n\t\tImage: &img,\n\t\tNewQuality: -12,\n\t}\n\n\tassert.Equal(t, false, op.IsValid())\n\n\top2 := &QualityOperation{\n\t\tImage: &img,\n\t\tNewQuality: 500,\n\t}\n\n\tassert.Equal(t, false, op2.IsValid())\n}", "func TestValidateRoute(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\thost string\n\t\tallowNonCompliant string\n\t\texpectedErrors int\n\t}{\n\t\t{\n\t\t\tname: \"Non-DNS-compliant host with non-compliance allowed\",\n\t\t\thost: \"host\",\n\t\t\tallowNonCompliant: \"true\",\n\t\t\texpectedErrors: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"Non-DNS-compliant host with non-compliance not allowed\",\n\t\t\thost: \"host\",\n\t\t\tallowNonCompliant: \"false\",\n\t\t\texpectedErrors: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"Non-DNS-compliant host without non-compliance annotation\",\n\t\t\thost: \"host\",\n\t\t\texpectedErrors: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"Specified label too long\",\n\t\t\thost: \"1234567890-1234567890-1234567890-1234567890-1234567890-123456789.host.com\",\n\t\t\tallowNonCompliant: \"\",\n\t\t\texpectedErrors: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"Specified label too long, is not an error with non-compliance allowed\",\n\t\t\thost: \"1234567890-1234567890-1234567890-1234567890-1234567890-123456789.host.com\",\n\t\t\tallowNonCompliant: \"true\",\n\t\t\texpectedErrors: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"Specified host name too long\",\n\t\t\thost: \"1234567890-1234567890-1234567890-1234567890-1234567890.\" +\n\t\t\t\t\"1234567890-1234567890-1234567890-1234567890-1234567890.\" +\n\t\t\t\t\"1234567890-1234567890-1234567890-1234567890-1234567890.\" +\n\t\t\t\t\"1234567890-1234567890-1234567890-1234567890-1234567890.\" +\n\t\t\t\t\"1234567890-1234567890-1234567890-1\",\n\t\t\tallowNonCompliant: \"\",\n\t\t\texpectedErrors: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"Specified host name too long, is still an error even with non-compliance allowed\",\n\t\t\thost: \"1234567890-1234567890-1234567890-1234567890-1234567890.\" +\n\t\t\t\t\"1234567890-1234567890-1234567890-1234567890-1234567890.\" +\n\t\t\t\t\"1234567890-1234567890-1234567890-1234567890-1234567890.\" +\n\t\t\t\t\"1234567890-1234567890-1234567890-1234567890-1234567890.\" +\n\t\t\t\t\"1234567890-1234567890-1234567890-1\",\n\t\t\tallowNonCompliant: \"true\",\n\t\t\texpectedErrors: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"No host\",\n\t\t\thost: \"\",\n\t\t\tallowNonCompliant: \"\",\n\t\t\texpectedErrors: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"Invalid DNS 952 host\",\n\t\t\thost: \"**\",\n\t\t\tallowNonCompliant: \"\",\n\t\t\texpectedErrors: 1,\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\terrs := ValidateHost(tc.host, tc.allowNonCompliant, field.NewPath(\"spec.host\"))\n\t\tif len(errs) != tc.expectedErrors {\n\t\t\tt.Errorf(\"Test case %q expected %d error(s), got %d: %v\", tc.name, tc.expectedErrors, len(errs), errs)\n\t\t}\n\t}\n}", "func (mmState *mClientMockState) Expect() *mClientMockState {\n\tif mmState.mock.funcState != nil {\n\t\tmmState.mock.t.Fatalf(\"ClientMock.State mock is already set by Set\")\n\t}\n\n\tif mmState.defaultExpectation == nil {\n\t\tmmState.defaultExpectation = &ClientMockStateExpectation{}\n\t}\n\n\treturn mmState\n}", "func SimulateValidation(cs *markets.ContractSet) {\n\tfor _, m := range cs.Markets {\n\t\tif m.GetRatioFloat64() > .98 {\n\t\t\tcs.Validate(m, false)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (mmAsByteString *mDigestHolderMockAsByteString) Expect() *mDigestHolderMockAsByteString {\n\tif mmAsByteString.mock.funcAsByteString != nil {\n\t\tmmAsByteString.mock.t.Fatalf(\"DigestHolderMock.AsByteString mock is already set by Set\")\n\t}\n\n\tif mmAsByteString.defaultExpectation == nil {\n\t\tmmAsByteString.defaultExpectation = &DigestHolderMockAsByteStringExpectation{}\n\t}\n\n\treturn mmAsByteString\n}", "func fullBlockTestErrToLocalErr(t *testing.T, kind fullblocktests.ErrorKind) ErrorKind {\n\tt.Helper()\n\n\tswitch kind {\n\tcase fullblocktests.ErrDuplicateBlock:\n\t\treturn ErrDuplicateBlock\n\tcase fullblocktests.ErrBlockTooBig:\n\t\treturn ErrBlockTooBig\n\tcase fullblocktests.ErrWrongBlockSize:\n\t\treturn ErrWrongBlockSize\n\tcase fullblocktests.ErrInvalidTime:\n\t\treturn ErrInvalidTime\n\tcase fullblocktests.ErrTimeTooOld:\n\t\treturn ErrTimeTooOld\n\tcase fullblocktests.ErrTimeTooNew:\n\t\treturn ErrTimeTooNew\n\tcase fullblocktests.ErrUnexpectedDifficulty:\n\t\treturn ErrUnexpectedDifficulty\n\tcase fullblocktests.ErrHighHash:\n\t\treturn ErrHighHash\n\tcase fullblocktests.ErrBadMerkleRoot:\n\t\treturn ErrBadMerkleRoot\n\tcase fullblocktests.ErrNoTransactions:\n\t\treturn ErrNoTransactions\n\tcase fullblocktests.ErrNoTxInputs:\n\t\treturn ErrNoTxInputs\n\tcase fullblocktests.ErrNoTxOutputs:\n\t\treturn ErrNoTxOutputs\n\tcase fullblocktests.ErrBadTxOutValue:\n\t\treturn ErrBadTxOutValue\n\tcase fullblocktests.ErrDuplicateTxInputs:\n\t\treturn ErrDuplicateTxInputs\n\tcase fullblocktests.ErrBadTxInput:\n\t\treturn ErrBadTxInput\n\tcase fullblocktests.ErrMissingTxOut:\n\t\treturn ErrMissingTxOut\n\tcase fullblocktests.ErrUnfinalizedTx:\n\t\treturn ErrUnfinalizedTx\n\tcase fullblocktests.ErrDuplicateTx:\n\t\treturn ErrDuplicateTx\n\tcase fullblocktests.ErrImmatureSpend:\n\t\treturn ErrImmatureSpend\n\tcase fullblocktests.ErrSpendTooHigh:\n\t\treturn ErrSpendTooHigh\n\tcase fullblocktests.ErrTooManySigOps:\n\t\treturn ErrTooManySigOps\n\tcase fullblocktests.ErrFirstTxNotCoinbase:\n\t\treturn ErrFirstTxNotCoinbase\n\tcase fullblocktests.ErrCoinbaseHeight:\n\t\treturn ErrCoinbaseHeight\n\tcase fullblocktests.ErrMultipleCoinbases:\n\t\treturn ErrMultipleCoinbases\n\tcase fullblocktests.ErrStakeTxInRegularTree:\n\t\treturn ErrStakeTxInRegularTree\n\tcase fullblocktests.ErrRegTxInStakeTree:\n\t\treturn ErrRegTxInStakeTree\n\tcase fullblocktests.ErrBadCoinbaseScriptLen:\n\t\treturn ErrBadCoinbaseScriptLen\n\tcase fullblocktests.ErrBadCoinbaseValue:\n\t\treturn ErrBadCoinbaseValue\n\tcase fullblocktests.ErrBadCoinbaseFraudProof:\n\t\treturn ErrBadCoinbaseFraudProof\n\tcase fullblocktests.ErrBadCoinbaseAmountIn:\n\t\treturn ErrBadCoinbaseAmountIn\n\tcase fullblocktests.ErrBadStakebaseAmountIn:\n\t\treturn ErrBadStakebaseAmountIn\n\tcase fullblocktests.ErrBadStakebaseScriptLen:\n\t\treturn ErrBadStakebaseScriptLen\n\tcase fullblocktests.ErrBadStakebaseScrVal:\n\t\treturn ErrBadStakebaseScrVal\n\tcase fullblocktests.ErrScriptMalformed:\n\t\treturn ErrScriptMalformed\n\tcase fullblocktests.ErrScriptValidation:\n\t\treturn ErrScriptValidation\n\tcase fullblocktests.ErrNotEnoughStake:\n\t\treturn ErrNotEnoughStake\n\tcase fullblocktests.ErrStakeBelowMinimum:\n\t\treturn ErrStakeBelowMinimum\n\tcase fullblocktests.ErrNotEnoughVotes:\n\t\treturn ErrNotEnoughVotes\n\tcase fullblocktests.ErrTooManyVotes:\n\t\treturn ErrTooManyVotes\n\tcase fullblocktests.ErrFreshStakeMismatch:\n\t\treturn ErrFreshStakeMismatch\n\tcase fullblocktests.ErrInvalidEarlyStakeTx:\n\t\treturn ErrInvalidEarlyStakeTx\n\tcase fullblocktests.ErrTicketUnavailable:\n\t\treturn ErrTicketUnavailable\n\tcase fullblocktests.ErrVotesOnWrongBlock:\n\t\treturn ErrVotesOnWrongBlock\n\tcase fullblocktests.ErrVotesMismatch:\n\t\treturn ErrVotesMismatch\n\tcase fullblocktests.ErrIncongruentVotebit:\n\t\treturn ErrIncongruentVotebit\n\tcase fullblocktests.ErrInvalidSSRtx:\n\t\treturn ErrInvalidSSRtx\n\tcase fullblocktests.ErrRevocationsMismatch:\n\t\treturn ErrRevocationsMismatch\n\tcase fullblocktests.ErrTicketCommitment:\n\t\treturn ErrTicketCommitment\n\tcase fullblocktests.ErrBadNumPayees:\n\t\treturn ErrBadNumPayees\n\tcase fullblocktests.ErrMismatchedPayeeHash:\n\t\treturn ErrMismatchedPayeeHash\n\tcase fullblocktests.ErrBadPayeeValue:\n\t\treturn ErrBadPayeeValue\n\tcase fullblocktests.ErrTxSStxOutSpend:\n\t\treturn ErrTxSStxOutSpend\n\tcase fullblocktests.ErrRegTxCreateStakeOut:\n\t\treturn ErrRegTxCreateStakeOut\n\tcase fullblocktests.ErrInvalidFinalState:\n\t\treturn ErrInvalidFinalState\n\tcase fullblocktests.ErrPoolSize:\n\t\treturn ErrPoolSize\n\tcase fullblocktests.ErrBadBlockHeight:\n\t\treturn ErrBadBlockHeight\n\tcase fullblocktests.ErrBlockOneOutputs:\n\t\treturn ErrBlockOneOutputs\n\tcase fullblocktests.ErrNoTreasury:\n\t\treturn ErrNoTreasury\n\tcase fullblocktests.ErrExpiredTx:\n\t\treturn ErrExpiredTx\n\tcase fullblocktests.ErrFraudAmountIn:\n\t\treturn ErrFraudAmountIn\n\tcase fullblocktests.ErrFraudBlockHeight:\n\t\treturn ErrFraudBlockHeight\n\tcase fullblocktests.ErrFraudBlockIndex:\n\t\treturn ErrFraudBlockIndex\n\tcase fullblocktests.ErrInvalidEarlyVoteBits:\n\t\treturn ErrInvalidEarlyVoteBits\n\tcase fullblocktests.ErrInvalidEarlyFinalState:\n\t\treturn ErrInvalidEarlyFinalState\n\tdefault:\n\t\tt.Fatalf(\"unconverted fullblocktest error kind %v\", kind)\n\t}\n\n\tpanic(\"unreachable\")\n}", "func TestGetPriceFor_DoesNotReturnOldResults(t *testing.T) {\n\tmockStorage := &mockStorage{\n\t\tmockResults: map[string]mockResult{\n\t\t\t\"p1\": {price: 5, err: nil},\n\t\t\t\"p2\": {price: 7, err: nil},\n\t\t},\n\t}\n\tmockCache := &mockCache{\n\t\tmaxAge: time.Millisecond * 200,\n\t}\n\tmaxAge70Pct := time.Millisecond * 140\n\tservice := NewService(mockStorage, mockCache)\n\n\t// get price for \"p1\" twice (one external service call)\n\tassertFloat(t, 5, getPriceWithNoErr(t, service, \"p1\"), \"wrong price returned\")\n\tassertFloat(t, 5, getPriceWithNoErr(t, service, \"p1\"), \"wrong price returned\")\n\tassertInt(t, 1, mockStorage.getNumCalls(), \"wrong number of service calls\")\n\t// sleep 0.7 the maxAge\n\ttime.Sleep(maxAge70Pct)\n\t// get price for \"p1\" and \"p2\", only \"p2\" should be retrieved from the external service (one more external call)\n\tassertFloat(t, 5, getPriceWithNoErr(t, service, \"p1\"), \"wrong price returned\")\n\tassertFloat(t, 5, getPriceWithNoErr(t, service, \"p1\"), \"wrong price returned\")\n\tassertFloat(t, 7, getPriceWithNoErr(t, service, \"p2\"), \"wrong price returned\")\n\tassertFloat(t, 7, getPriceWithNoErr(t, service, \"p2\"), \"wrong price returned\")\n\tassertInt(t, 2, mockStorage.getNumCalls(), \"wrong number of service calls\")\n\t// sleep 0.7 the maxAge\n\ttime.Sleep(maxAge70Pct)\n\t// get price for \"p1\" and \"p2\", only \"p1\" should be retrieved from the cache (\"p2\" is still valid)\n\tassertFloat(t, 5, getPriceWithNoErr(t, service, \"p1\"), \"wrong price returned\")\n\tassertFloat(t, 5, getPriceWithNoErr(t, service, \"p1\"), \"wrong price returned\")\n\tassertFloat(t, 7, getPriceWithNoErr(t, service, \"p2\"), \"wrong price returned\")\n\tassertInt(t, 3, mockStorage.getNumCalls(), \"wrong number of service calls\")\n}", "func checkValidatorSetup(t *testing.T, pool Pool, initialTotalTokens, initialBondedTokens, initialUnbondedTokens int64) {\n\tassert.Equal(t, initialTotalTokens, pool.TokenSupply().Int64())\n\tassert.Equal(t, initialBondedTokens, pool.BondedTokens.Int64())\n\tassert.Equal(t, initialUnbondedTokens, pool.UnbondedTokens.Int64())\n\n\t// test initial bonded ratio\n\tassert.True(t, pool.bondedRatio().Equal(sdk.NewRat(initialBondedTokens, initialTotalTokens)), \"%v\", pool.bondedRatio())\n\t// test the value of validator shares\n\tassert.True(t, pool.bondedShareExRate().Equal(sdk.OneRat()), \"%v\", pool.bondedShareExRate())\n}", "func (m *SignatureKeyHolderMock) CheckMocksCalled() {\n\tm.Finish()\n}", "func TestValidateSuccessfulRootRotation(t *testing.T) {\n\ttestValidateSuccessfulRootRotation(t, data.ECDSAKey, data.ECDSAx509Key)\n\tif !testing.Short() {\n\t\ttestValidateSuccessfulRootRotation(t, data.RSAKey, data.RSAx509Key)\n\t}\n}", "func TestInputMessageAccountabilityDataInputSequenceNumberRequired(t *testing.T) {\n\timad := mockInputMessageAccountabilityData()\n\timad.InputSequenceNumber = \"\"\n\n\trequire.EqualError(t, imad.Validate(), fieldError(\"InputSequenceNumber\", ErrFieldRequired, imad.InputSequenceNumber).Error())\n}", "func (m *StateSwitcherMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && (m.GetStateFunc == nil || atomic.LoadUint64(&m.GetStateCounter) > 0)\n\t\tok = ok && (m.SetPulsarFunc == nil || atomic.LoadUint64(&m.SetPulsarCounter) > 0)\n\t\tok = ok && (m.SwitchToStateFunc == nil || atomic.LoadUint64(&m.SwitchToStateCounter) > 0)\n\t\tok = ok && (m.setStateFunc == nil || atomic.LoadUint64(&m.setStateCounter) > 0)\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif m.GetStateFunc != nil && atomic.LoadUint64(&m.GetStateCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to StateSwitcherMock.GetState\")\n\t\t\t}\n\n\t\t\tif m.SetPulsarFunc != nil && atomic.LoadUint64(&m.SetPulsarCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to StateSwitcherMock.SetPulsar\")\n\t\t\t}\n\n\t\t\tif m.SwitchToStateFunc != nil && atomic.LoadUint64(&m.SwitchToStateCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to StateSwitcherMock.SwitchToState\")\n\t\t\t}\n\n\t\t\tif m.setStateFunc != nil && atomic.LoadUint64(&m.setStateCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to StateSwitcherMock.setState\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func MockFee(randomized bool) *Fee {\n\treturn &Fee{\n\t\tGasLimit: 50000,\n\t\tGasPrice: 100,\n\t}\n}", "func (m *MockASOResourceSpecGetter) WasManaged(arg0 genruntime.MetaObject) bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"WasManaged\", arg0)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (m *UnsyncListMock) MinimockFinish() {\n\n\tif !m.AddClaimsFinished() {\n\t\tm.t.Fatal(\"Expected call to UnsyncListMock.AddClaims\")\n\t}\n\n\tif !m.CalculateHashFinished() {\n\t\tm.t.Fatal(\"Expected call to UnsyncListMock.CalculateHash\")\n\t}\n\n\tif !m.GetActiveNodeFinished() {\n\t\tm.t.Fatal(\"Expected call to UnsyncListMock.GetActiveNode\")\n\t}\n\n\tif !m.GetActiveNodesFinished() {\n\t\tm.t.Fatal(\"Expected call to UnsyncListMock.GetActiveNodes\")\n\t}\n\n\tif !m.IndexToRefFinished() {\n\t\tm.t.Fatal(\"Expected call to UnsyncListMock.IndexToRef\")\n\t}\n\n\tif !m.LengthFinished() {\n\t\tm.t.Fatal(\"Expected call to UnsyncListMock.Length\")\n\t}\n\n\tif !m.RefToIndexFinished() {\n\t\tm.t.Fatal(\"Expected call to UnsyncListMock.RefToIndex\")\n\t}\n\n\tif !m.RemoveClaimsFinished() {\n\t\tm.t.Fatal(\"Expected call to UnsyncListMock.RemoveClaims\")\n\t}\n\n}", "func (m *HostNetworkMock) MinimockWait(timeout time.Duration) {\n\ttimeoutCh := time.After(timeout)\n\tfor {\n\t\tok := true\n\t\tok = ok && (m.BuildResponseFunc == nil || atomic.LoadUint64(&m.BuildResponseCounter) > 0)\n\t\tok = ok && (m.GetNodeIDFunc == nil || atomic.LoadUint64(&m.GetNodeIDCounter) > 0)\n\t\tok = ok && (m.NewRequestBuilderFunc == nil || atomic.LoadUint64(&m.NewRequestBuilderCounter) > 0)\n\t\tok = ok && (m.PublicAddressFunc == nil || atomic.LoadUint64(&m.PublicAddressCounter) > 0)\n\t\tok = ok && (m.RegisterRequestHandlerFunc == nil || atomic.LoadUint64(&m.RegisterRequestHandlerCounter) > 0)\n\t\tok = ok && (m.SendRequestFunc == nil || atomic.LoadUint64(&m.SendRequestCounter) > 0)\n\t\tok = ok && (m.StartFunc == nil || atomic.LoadUint64(&m.StartCounter) > 0)\n\t\tok = ok && (m.StopFunc == nil || atomic.LoadUint64(&m.StopCounter) > 0)\n\n\t\tif ok {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-timeoutCh:\n\n\t\t\tif m.BuildResponseFunc != nil && atomic.LoadUint64(&m.BuildResponseCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to HostNetworkMock.BuildResponse\")\n\t\t\t}\n\n\t\t\tif m.GetNodeIDFunc != nil && atomic.LoadUint64(&m.GetNodeIDCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to HostNetworkMock.GetNodeID\")\n\t\t\t}\n\n\t\t\tif m.NewRequestBuilderFunc != nil && atomic.LoadUint64(&m.NewRequestBuilderCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to HostNetworkMock.NewRequestBuilder\")\n\t\t\t}\n\n\t\t\tif m.PublicAddressFunc != nil && atomic.LoadUint64(&m.PublicAddressCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to HostNetworkMock.PublicAddress\")\n\t\t\t}\n\n\t\t\tif m.RegisterRequestHandlerFunc != nil && atomic.LoadUint64(&m.RegisterRequestHandlerCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to HostNetworkMock.RegisterRequestHandler\")\n\t\t\t}\n\n\t\t\tif m.SendRequestFunc != nil && atomic.LoadUint64(&m.SendRequestCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to HostNetworkMock.SendRequest\")\n\t\t\t}\n\n\t\t\tif m.StartFunc != nil && atomic.LoadUint64(&m.StartCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to HostNetworkMock.Start\")\n\t\t\t}\n\n\t\t\tif m.StopFunc != nil && atomic.LoadUint64(&m.StopCounter) == 0 {\n\t\t\t\tm.t.Error(\"Expected call to HostNetworkMock.Stop\")\n\t\t\t}\n\n\t\t\tm.t.Fatalf(\"Some mocks were not called on time: %s\", timeout)\n\t\t\treturn\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n}", "func TestValidatorSMDestroyValidatorUnbonding2Removed(t *testing.T) {\n\n\t_, _, mk := CreateTestInput(t, false, SufficientInitPower)\n\tparams := DefaultParams()\n\n\toriginVaSet := addrVals[1:]\n\tparams.MaxValidators = uint16(len(originVaSet))\n\tparams.Epoch = 1\n\tparams.UnbondingTime = time.Millisecond * 300\n\n\tstartUpValidator := NewValidator(StartUpValidatorAddr, StartUpValidatorPubkey, Description{}, types.DefaultMinSelfDelegation)\n\n\tstartUpStatus := baseValidatorStatus{startUpValidator}\n\n\torgValsLen := len(originVaSet)\n\tfullVaSet := make([]sdk.ValAddress, orgValsLen+1)\n\tcopy(fullVaSet, originVaSet)\n\tcopy(fullVaSet[orgValsLen:], []sdk.ValAddress{startUpStatus.getValidator().GetOperator()})\n\n\tbAction := baseAction{mk}\n\tinputActions := []IAction{\n\t\tcreateValidatorAction{bAction, nil},\n\t\tendBlockAction{bAction},\n\t\tdelegatorsAddSharesAction{bAction, false, true, 0, nil},\n\t\tendBlockAction{bAction},\n\t\tdestroyValidatorAction{bAction},\n\t\tendBlockAction{bAction},\n\n\t\t// delegators unbond all tokens back, validator has no msd & delegator shares now, delegator removed\n\t\tdelegatorsWithdrawAction{bAction, true, true},\n\n\t\t// second unbonding time pass, no delegator shares left, unbonding --> validator removed\n\t\twaitUntilUnbondingTimeExpired{bAction},\n\t\tendBlockAction{bAction},\n\t}\n\n\t//expZeroInt := sdk.ZeroInt()\n\texpZeroDec := sdk.ZeroDec()\n\tdlgAddSharesCheck1 := andChecker{[]actResChecker{\n\t\tvalidatorDelegatorShareIncreased(true),\n\t\tvalidatorRemoved(false),\n\t\tvalidatorDelegatorShareLeft(true),\n\t\tvalidatorStatusChecker(sdk.Bonded.String()),\n\t}}\n\n\tdlgUnbondCheck2 := andChecker{[]actResChecker{\n\t\tnoErrorInHandlerResult(true),\n\t\tvalidatorStatusChecker(sdk.Unbonding.String()),\n\t\tvalidatorRemoved(false),\n\t\tqueryDelegatorCheck(ValidDelegator1, false, nil, nil, &expZeroDec, nil),\n\t}}\n\n\tafterUnbondingTimeExpiredCheck1 := andChecker{[]actResChecker{\n\t\tvalidatorRemoved(true),\n\t}}\n\n\tactionsAndChecker := []actResChecker{\n\t\tvalidatorStatusChecker(sdk.Unbonded.String()),\n\t\tqueryValidatorCheck(sdk.Bonded, false, &SharesFromDefaultMSD, &startUpValidator.MinSelfDelegation, nil),\n\t\tdlgAddSharesCheck1.GetChecker(),\n\t\tnil,\n\t\tqueryValidatorCheck(sdk.Bonded, true, nil, &expZeroDec, nil),\n\t\tvalidatorStatusChecker(sdk.Unbonding.String()),\n\t\tdlgUnbondCheck2.GetChecker(),\n\t\tqueryValidatorCheck(sdk.Unbonding, true, nil, &expZeroDec, nil),\n\t\tafterUnbondingTimeExpiredCheck1.GetChecker(),\n\t}\n\n\tsmTestCase := newValidatorSMTestCase(mk, params, startUpStatus, inputActions, actionsAndChecker, t)\n\tsmTestCase.SetupValidatorSetAndDelegatorSet(int(params.MaxValidators))\n\tsmTestCase.printParticipantSnapshot(t)\n\tsmTestCase.Run(t)\n}", "func TestPrewriteLocked4A(t *testing.T) {\n}" ]
[ "0.6337174", "0.56319845", "0.56032586", "0.5550087", "0.5531006", "0.5520939", "0.55201274", "0.551606", "0.5490635", "0.54042244", "0.5372254", "0.53221875", "0.5322081", "0.53210413", "0.53143036", "0.5292445", "0.5291869", "0.5279467", "0.5277037", "0.525805", "0.5255182", "0.52538717", "0.5251778", "0.52503496", "0.5248709", "0.5236565", "0.52346516", "0.5225843", "0.51860404", "0.51835763", "0.5173358", "0.51627797", "0.51580197", "0.51572347", "0.51536524", "0.5152643", "0.5151703", "0.51482505", "0.514623", "0.5142601", "0.51374996", "0.5134617", "0.5125944", "0.512106", "0.51199436", "0.511332", "0.51108545", "0.5107093", "0.5106342", "0.51017255", "0.5091507", "0.5087375", "0.50872755", "0.50868124", "0.50800955", "0.5065959", "0.506556", "0.5061578", "0.5059755", "0.5057489", "0.505028", "0.50498784", "0.5049657", "0.5038609", "0.5037298", "0.503582", "0.5032545", "0.5031675", "0.50310534", "0.503068", "0.5023812", "0.5022308", "0.50190485", "0.5018187", "0.5018182", "0.50169164", "0.501377", "0.5013553", "0.5011543", "0.5009036", "0.50043833", "0.50023264", "0.49971825", "0.4992826", "0.49914372", "0.49879023", "0.4979987", "0.49775466", "0.49772823", "0.49734867", "0.497078", "0.49687323", "0.49674812", "0.4966798", "0.49664357", "0.49607885", "0.4960173", "0.4959948", "0.49573225", "0.4956844" ]
0.6817638
0
Test the accumulation of agreement events. It should result in the agreement component publishing a round update. TODO: trap eventual errors
func TestAgreement(t *testing.T) { nr := 50 _, hlp := agreement.WireAgreement(nr) hash, _ := crypto.RandEntropy(32) for i := 0; i < nr; i++ { a := message.MockAgreement(hash, 1, 3, hlp.Keys, hlp.P, i) msg := message.New(topics.Agreement, a) hlp.Bus.Publish(topics.Agreement, msg) } res := <-hlp.CertificateChan cert := res.Payload().(message.Agreement) assert.Equal(t, hash, cert.State().BlockHash) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestSendAgreement(t *testing.T) {\n\tcommitteeMock, _ := agreement.MockCommittee(3, true, 3)\n\teb, _ := initAgreement(committeeMock)\n\n\tstreamer := helper.NewSimpleStreamer()\n\teb.SubscribeStream(string(topics.Gossip), streamer)\n\teb.RegisterPreprocessor(string(topics.Gossip), processing.NewGossip(protocol.TestNet))\n\n\t// Initiate the sending of an agreement message\n\thash, _ := crypto.RandEntropy(32)\n\tbuf := new(bytes.Buffer)\n\tif err := encoding.WriteUint64(buf, binary.LittleEndian, 1); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err := buf.ReadFrom(reduction.MockVoteSetBuffer(hash, 1, 2, 10)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\teb.Publish(msg.ReductionResultTopic, buf)\n\n\t// There should now be an agreement message in the streamer\n\t_, err := streamer.Read()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tseenTopics := streamer.SeenTopics()\n\tif seenTopics[0] != topics.Agreement {\n\t\tt.Fail()\n\t}\n}", "func MockAgreementEvent(hash []byte, round uint64, step uint8, keys []key.ConsensusKeys, p *user.Provisioners, iterativeIdx ...int) *Agreement {\n\t// Make sure we create an event made by an actual voting committee member\n\tc := p.CreateVotingCommittee(round, step, len(keys))\n\tcKeys := createCommitteeKeySet(c, keys)\n\n\tidx := 0\n\tif len(iterativeIdx) != 0 {\n\t\tidx = iterativeIdx[0]\n\t}\n\n\tif idx > len(keys) {\n\t\tpanic(\"wrong iterative index: cannot iterate more than there are keys\")\n\t}\n\n\ta := New(header.Header{Round: round, Step: step, BlockHash: hash, PubKeyBLS: cKeys[idx].BLSPubKeyBytes})\n\t// generating reduction events (votes) and signing them\n\tsteps := GenVotes(hash, round, step, keys, p)\n\n\twhole := new(bytes.Buffer)\n\tif err := header.MarshalSignableVote(whole, a.Header); err != nil {\n\t\tpanic(err)\n\t}\n\n\tsig, _ := bls.Sign(cKeys[idx].BLSSecretKey, cKeys[idx].BLSPubKey, whole.Bytes())\n\ta.VotesPerStep = steps\n\ta.SetSignature(sig.Compress())\n\treturn a\n}", "func initAgreement(c committee.Foldable) (wire.EventBroker, <-chan uint64) {\n\tbus := wire.NewEventBus()\n\troundChan := consensus.InitRoundUpdate(bus)\n\tk, _ := user.NewRandKeys()\n\tgo agreement.Launch(bus, c, k)\n\ttime.Sleep(200 * time.Millisecond)\n\tinit := make([]byte, 8)\n\tbinary.LittleEndian.PutUint64(init, 1)\n\tbus.Publish(msg.InitializationTopic, bytes.NewBuffer(init))\n\n\t// we remove the pre-processors here that the Launch function adds, so the mocked\n\t// buffers can be deserialized properly\n\tbus.RemoveAllPreprocessors(string(topics.Agreement))\n\t// we need to discard the first update since it is triggered directly as it is supposed to update the round to all other consensus compoenents\n\t<-roundChan\n\treturn bus, roundChan\n}", "func TestBroker(t *testing.T) {\n\tcommitteeMock, keys := agreement.MockCommittee(2, true, 2)\n\teb, roundChan := initAgreement(committeeMock)\n\n\thash, _ := crypto.RandEntropy(32)\n\teb.Publish(string(topics.Agreement), agreement.MockAgreement(hash, 1, 1, keys))\n\teb.Publish(string(topics.Agreement), agreement.MockAgreement(hash, 1, 1, keys))\n\n\tround := <-roundChan\n\tassert.Equal(t, uint64(2), round)\n}", "func MockAgreement(hash []byte, round uint64, step uint8, keys []key.ConsensusKeys, p *user.Provisioners, i ...int) *bytes.Buffer {\n\tbuf := new(bytes.Buffer)\n\tev := MockAgreementEvent(hash, round, step, keys, p, i...)\n\t_ = Marshal(buf, *ev)\n\treturn buf\n}", "func TestReduction(t *testing.T) {\n\teventBus, rpcBus, streamer, _, k := launchReductionTest(true, 2)\n\tgo launchCandidateVerifier(rpcBus, false)\n\n\t// Because round updates are asynchronous (sent through a channel), we wait\n\t// for a bit to let the broker update its round.\n\ttime.Sleep(200 * time.Millisecond)\n\t// send a hash to start reduction\n\thash, _ := crypto.RandEntropy(32)\n\tsendSelection(1, hash, eventBus)\n\n\t// send mocked events until we get a result from the outgoingAgreement channel\n\tsendReductionBuffers(k, hash, 1, 1, eventBus)\n\tsendReductionBuffers(k, hash, 1, 2, eventBus)\n\n\ttimer := time.AfterFunc(1*time.Second, func() {\n\t\tt.Fatal(\"\")\n\t})\n\n\tfor i := 0; i < 2; i++ {\n\t\tif _, err := streamer.Read(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\ttimer.Stop()\n}", "func TestNoQuorum(t *testing.T) {\n\tcommitteeMock, keys := agreement.MockCommittee(3, true, 3)\n\teb, roundChan := initAgreement(committeeMock)\n\thash, _ := crypto.RandEntropy(32)\n\teb.Publish(string(topics.Agreement), agreement.MockAgreement(hash, 1, 1, keys))\n\teb.Publish(string(topics.Agreement), agreement.MockAgreement(hash, 1, 1, keys))\n\n\tselect {\n\tcase <-roundChan:\n\t\tassert.FailNow(t, \"not supposed to get a round update without reaching quorum\")\n\tcase <-time.After(100 * time.Millisecond):\n\t\t// all good\n\t}\n}", "func format_ag_event(ix int, ev rpcFilterChanges, tr *TestResults) {\n // These event strings correspond to event codes from the agreements contract\n ag_cr8 := \"0x0000000000000000000000000000000000000000000000000000000000000000\"\n ag_cr8_detail := \"0x0000000000000000000000000000000000000000000000000000000000000001\"\n ag_cr8_fraud := \"0x0000000000000000000000000000000000000000000000000000000000000002\"\n ag_con_term := \"0x0000000000000000000000000000000000000000000000000000000000000003\"\n ag_pub_term := \"0x0000000000000000000000000000000000000000000000000000000000000004\"\n ag_fraud_term := \"0x0000000000000000000000000000000000000000000000000000000000000005\"\n ag_adm_term := \"0x0000000000000000000000000000000000000000000000000000000000000006\"\n\n if ev.Topics[0] == ag_cr8 {\n log.Printf(\"|%03d| Agreement created %v\\n\",ix,ev.Topics[1]);\n log.Printf(\"Data: %v\\n\",ev.Data);\n log.Printf(\"Block: %v\\n\\n\",ev.BlockNumber);\n tr.Successful += 1\n } else if ev.Topics[0] == ag_cr8_detail {\n log.Printf(\"|%03d| Agreement created detail %v\\n\",ix,ev.Topics[1]);\n log.Printf(\"Data: %v\\n\",ev.Data);\n log.Printf(\"Block: %v\\n\\n\",ev.BlockNumber);\n } else if ev.Topics[0] == ag_cr8_fraud {\n log.Printf(\"|%03d| Agreement creation fraud %v\\n\",ix,ev.Topics[1]);\n log.Printf(\"Data: %v\\n\",ev.Data);\n log.Printf(\"Block: %v\\n\\n\",ev.BlockNumber);\n tr.Fraud += 1\n } else if ev.Topics[0] == ag_con_term {\n log.Printf(\"|%03d| Consumer Terminated %v\\n\",ix,ev.Topics[1]);\n log.Printf(\"Data: %v\\n\",ev.Data);\n log.Printf(\"Block: %v\\n\\n\",ev.BlockNumber);\n tr.Delete += 1\n } else if ev.Topics[0] == ag_pub_term {\n log.Printf(\"|%03d| Publisher Terminated %v\\n\",ix,ev.Topics[1]);\n log.Printf(\"Data: %v\\n\",ev.Data);\n log.Printf(\"Block: %v\\n\\n\",ev.BlockNumber);\n tr.Delete += 1\n } else if ev.Topics[0] == ag_fraud_term {\n log.Printf(\"|%03d| Fraudulent Termination %v\\n\",ix,ev.Topics[1]);\n log.Printf(\"Data: %v\\n\",ev.Data);\n log.Printf(\"Block: %v\\n\\n\",ev.BlockNumber);\n tr.Fraud += 1\n } else if ev.Topics[0] == ag_adm_term {\n log.Printf(\"|%03d| Admin Termination %v\\n\",ix,ev.Topics[1]);\n log.Printf(\"Data: %v\\n\",ev.Data);\n log.Printf(\"Block: %v\\n\\n\",ev.BlockNumber);\n tr.Delete += 1\n } else {\n log.Printf(\"|%03d| Unknown event code in first topic slot.\\n\")\n log.Printf(\"Raw log entry:\\n%v\\n\\n\",ev)\n }\n}", "func TestUpdateSubscriptions(t *testing.T) {\n\n\tlogger := logtesting.TestLogger(t)\n\tctx := logging.WithLogger(context.TODO(), logger)\n\n\t// Test Data\n\tbrokers := []string{configtesting.DefaultKafkaBroker}\n\tconfig, err := commonclient.NewConfigBuilder().WithDefaults().FromYaml(clienttesting.DefaultSaramaConfigYaml).Build(ctx)\n\tassert.Nil(t, err)\n\n\tdispatcherConfig := DispatcherConfig{\n\t\tLogger: logger.Desugar(),\n\t\tBrokers: brokers,\n\t\tSaramaConfig: config,\n\t}\n\n\t// Define The TestCase Struct\n\ttype fields struct {\n\t\tDispatcherConfig DispatcherConfig\n\t\tsubscribers map[types.UID]*SubscriberWrapper\n\t}\n\ttype args struct {\n\t\tsubscriberSpecs []eventingduck.SubscriberSpec\n\t}\n\n\t// Define The TestCase Struct\n\ttype TestCase struct {\n\t\tonly bool\n\t\tname string\n\t\tfields fields\n\t\targs args\n\t\twant map[eventingduck.SubscriberSpec]error\n\t}\n\n\t// Create The Test Cases\n\ttestCases := []TestCase{\n\t\t{\n\t\t\tname: \"Add First Subscription\",\n\t\t\tfields: fields{\n\t\t\t\tDispatcherConfig: dispatcherConfig,\n\t\t\t\tsubscribers: map[types.UID]*SubscriberWrapper{},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tsubscriberSpecs: []eventingduck.SubscriberSpec{\n\t\t\t\t\t{UID: uid123},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: map[eventingduck.SubscriberSpec]error{},\n\t\t},\n\t\t{\n\t\t\tname: \"Add Second Subscription\",\n\t\t\tfields: fields{\n\t\t\t\tDispatcherConfig: dispatcherConfig,\n\t\t\t\tsubscribers: map[types.UID]*SubscriberWrapper{\n\t\t\t\t\tuid123: createSubscriberWrapper(uid123),\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tsubscriberSpecs: []eventingduck.SubscriberSpec{\n\t\t\t\t\t{UID: uid123},\n\t\t\t\t\t{UID: uid456},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: map[eventingduck.SubscriberSpec]error{},\n\t\t},\n\t\t{\n\t\t\tname: \"Add And Remove Subscriptions\",\n\t\t\tfields: fields{\n\t\t\t\tDispatcherConfig: dispatcherConfig,\n\t\t\t\tsubscribers: map[types.UID]*SubscriberWrapper{\n\t\t\t\t\tuid123: createSubscriberWrapper(uid123),\n\t\t\t\t\tuid456: createSubscriberWrapper(uid456),\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tsubscriberSpecs: []eventingduck.SubscriberSpec{\n\t\t\t\t\t{UID: uid456},\n\t\t\t\t\t{UID: uid789},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: map[eventingduck.SubscriberSpec]error{},\n\t\t},\n\t\t{\n\t\t\tname: \"Remove Penultimate Subscription\",\n\t\t\tfields: fields{\n\t\t\t\tDispatcherConfig: dispatcherConfig,\n\t\t\t\tsubscribers: map[types.UID]*SubscriberWrapper{\n\t\t\t\t\tuid123: createSubscriberWrapper(uid123),\n\t\t\t\t\tuid456: createSubscriberWrapper(uid456),\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tsubscriberSpecs: []eventingduck.SubscriberSpec{\n\t\t\t\t\t{UID: uid123},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: map[eventingduck.SubscriberSpec]error{},\n\t\t},\n\t\t{\n\t\t\tname: \"Remove Last Subscription\",\n\t\t\tfields: fields{\n\t\t\t\tDispatcherConfig: dispatcherConfig,\n\t\t\t\tsubscribers: map[types.UID]*SubscriberWrapper{\n\t\t\t\t\tuid123: createSubscriberWrapper(uid123),\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tsubscriberSpecs: []eventingduck.SubscriberSpec{},\n\t\t\t},\n\t\t\twant: map[eventingduck.SubscriberSpec]error{},\n\t\t},\n\t}\n\n\t// Filter To Those With \"only\" Flag (If Any Specified)\n\tfilteredTestCases := make([]TestCase, 0)\n\tfor _, testCase := range testCases {\n\t\tif testCase.only {\n\t\t\tfilteredTestCases = append(filteredTestCases, testCase)\n\t\t}\n\t}\n\tif len(filteredTestCases) == 0 {\n\t\tfilteredTestCases = testCases\n\t}\n\n\t// Execute The Test Cases (Create A DispatcherImpl & UpdateSubscriptions() :)\n\tfor _, testCase := range filteredTestCases {\n\t\tt.Run(testCase.name, func(t *testing.T) {\n\n\t\t\t// Create A New DispatcherImpl To Test\n\t\t\tdispatcher := &DispatcherImpl{\n\t\t\t\tDispatcherConfig: testCase.fields.DispatcherConfig,\n\t\t\t\tsubscribers: testCase.fields.subscribers,\n\t\t\t\tconsumerGroupFactory: &consumertesting.MockKafkaConsumerGroupFactory{},\n\t\t\t}\n\n\t\t\t// Perform The Test\n\t\t\tgot := dispatcher.UpdateSubscriptions(testCase.args.subscriberSpecs)\n\n\t\t\t// Verify Results\n\t\t\tassert.Equal(t, testCase.want, got)\n\n\t\t\t// Verify The Dispatcher's Tracking Of Subscribers Matches Specified State\n\t\t\tassert.Len(t, dispatcher.subscribers, len(testCase.args.subscriberSpecs))\n\t\t\tfor _, subscriber := range testCase.args.subscriberSpecs {\n\t\t\t\tassert.NotNil(t, dispatcher.subscribers[subscriber.UID])\n\t\t\t}\n\n\t\t\t// Shutdown The Dispatcher to Cleanup Resources\n\t\t\tdispatcher.Shutdown()\n\t\t\tassert.Len(t, dispatcher.subscribers, 0)\n\n\t\t\t// Pause Briefly To Let Any Async Shutdown Finish (Lame But Only For Visual Confirmation Of Logging ;)\n\t\t\ttime.Sleep(500 * time.Millisecond)\n\t\t})\n\t}\n}", "func TestComboBehavior(t *testing.T) {\n\tassert := audit.NewTestingAssertion(t, true)\n\tgenerator := audit.NewGenerator(audit.FixedRand())\n\tsigc := audit.MakeSigChan()\n\tenv := cells.NewEnvironment(\"combo-behavior\")\n\tdefer env.Stop()\n\n\tmatcher := func(accessor cells.EventSinkAccessor) (cells.CriterionMatch, cells.Payload) {\n\t\tanalyzer := cells.NewEventSinkAnalyzer(accessor)\n\t\tcombo := map[string]int{\n\t\t\t\"a\": 0,\n\t\t\t\"b\": 0,\n\t\t\t\"c\": 0,\n\t\t\t\"d\": 0,\n\t\t}\n\t\tmatches, err := analyzer.Match(func(index int, event cells.Event) (bool, error) {\n\t\t\t_, ok := combo[event.Topic()]\n\t\t\tif ok {\n\t\t\t\tcombo[event.Topic()]++\n\t\t\t}\n\t\t\treturn ok, nil\n\t\t})\n\t\tif err != nil || !matches {\n\t\t\treturn cells.CriterionDropLast, nil\n\t\t}\n\t\tfor _, count := range combo {\n\t\t\tif count == 0 {\n\t\t\t\treturn cells.CriterionKeep, nil\n\t\t\t}\n\t\t}\n\t\tpayload, err := cells.NewPayload(combo)\n\t\tassert.Nil(err)\n\t\treturn cells.CriterionDone, payload\n\t}\n\tprocessor := func(accessor cells.EventSinkAccessor) (cells.Payload, error) {\n\t\tanalyzer := cells.NewEventSinkAnalyzer(accessor)\n\t\tok, err := analyzer.Match(func(index int, event cells.Event) (bool, error) {\n\t\t\tvar payload map[string]int\n\t\t\tif err := event.Payload().Unmarshal(&payload); err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tif len(payload) != 4 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\tfor key := range payload {\n\t\t\t\tif payload[key] == 0 {\n\t\t\t\t\treturn false, nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn true, nil\n\t\t})\n\t\tsigc <- ok\n\t\treturn nil, err\n\t}\n\n\ttopics := []string{\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"now\"}\n\n\tenv.StartCell(\"combiner\", behaviors.NewComboBehavior(matcher))\n\tenv.StartCell(\"collector\", behaviors.NewCollectorBehavior(100, processor))\n\tenv.Subscribe(\"combiner\", \"collector\")\n\n\tfor i := 0; i < 1000; i++ {\n\t\ttopic := generator.OneStringOf(topics...)\n\t\tenv.EmitNew(\"combiner\", topic, nil)\n\t}\n\n\tenv.EmitNew(\"collector\", cells.TopicProcess, nil)\n\tassert.Wait(sigc, true, time.Minute)\n}", "func make_agreement(ag *contract_api.SolidityContract, agID []byte, sig_hash string, sig string, counterparty string, shouldWork bool) {\n tx_delay_toleration := 120\n err := error(nil)\n\n log.Printf(\"Make an agreement with ID:%v\\n\", agID)\n p := make([]interface{},0,10)\n p = append(p, agID)\n p = append(p, sig_hash[2:])\n p = append(p, sig[2:])\n p = append(p, counterparty)\n if _, err = ag.Invoke_method(\"create_agreement\", p); err != nil {\n log.Printf(\"...terminating, could not invoke create_agreement: %v\\n\", err)\n os.Exit(1)\n }\n log.Printf(\"Create agreement %v invoked.\\n\", agID)\n\n var res interface{}\n p = make([]interface{},0,10)\n p = append(p, counterparty)\n p = append(p, agID)\n byte_hash, _ := hex.DecodeString(sig_hash[2:])\n log.Printf(\"Binary Hash is: %v\\n\", byte_hash)\n start_timer := time.Now()\n for {\n if shouldWork {\n fmt.Printf(\"There should be a recorded contract hash, but it might be in a block we can't read yet.\\n\")\n } else {\n fmt.Printf(\"There should NOT be a recorded contract hash.\\n\")\n }\n if res, err = ag.Invoke_method(\"get_contract_hash\", p); err == nil {\n fmt.Printf(\"Received contract hash:%v.\\n\",res)\n if bytes.Compare([]byte(res.(string)), byte_hash) != 0 {\n if int(time.Now().Sub(start_timer).Seconds()) < tx_delay_toleration {\n fmt.Printf(\"Sleeping, waiting for the block with the Update.\\n\")\n time.Sleep(15 * time.Second)\n } else {\n if shouldWork {\n fmt.Printf(\"Timeout waiting for the Update.\\n\")\n os.Exit(1)\n } else {\n fmt.Printf(\"Timeout waiting for the Update. This is expected.\\n\")\n break\n }\n }\n } else {\n if shouldWork {\n log.Printf(\"Created agreement %v.\\n\", agID)\n break\n } else {\n fmt.Printf(\"Received contract hash. This is NOT expected: %v\\n\", res.(string))\n os.Exit(2)\n }\n }\n } else {\n fmt.Printf(\"Error on get_contract_hash: %v\\n\",err)\n os.Exit(1)\n }\n }\n}", "func (suite *InterestTestSuite) TestSynchronizeInterest() {\n\ttype args struct {\n\t\tctype string\n\t\tinitialTime time.Time\n\t\tinitialCollateral sdk.Coin\n\t\tinitialPrincipal sdk.Coin\n\t\ttimeElapsed int\n\t\texpectedFees sdk.Coin\n\t\texpectedFeesUpdatedTime time.Time\n\t}\n\n\ttype test struct {\n\t\tname string\n\t\targs args\n\t}\n\n\toneYearInSeconds := 31536000\n\ttestCases := []test{\n\t\t{\n\t\t\t\"1 year\",\n\t\t\targs{\n\t\t\t\tctype: \"bnb-a\",\n\t\t\t\tinitialTime: time.Date(2020, 12, 15, 14, 0, 0, 0, time.UTC),\n\t\t\t\tinitialCollateral: c(\"bnb\", 1000000000000),\n\t\t\t\tinitialPrincipal: c(\"usdx\", 100000000000),\n\t\t\t\ttimeElapsed: oneYearInSeconds,\n\t\t\t\texpectedFees: c(\"usdx\", 5000000000),\n\t\t\t\texpectedFeesUpdatedTime: time.Date(2020, 12, 15, 14, 0, 0, 0, time.UTC).Add(time.Duration(int(time.Second) * oneYearInSeconds)),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"1 month\",\n\t\t\targs{\n\t\t\t\tctype: \"bnb-a\",\n\t\t\t\tinitialTime: time.Date(2020, 12, 15, 14, 0, 0, 0, time.UTC),\n\t\t\t\tinitialCollateral: c(\"bnb\", 1000000000000),\n\t\t\t\tinitialPrincipal: c(\"usdx\", 100000000000),\n\t\t\t\ttimeElapsed: 86400 * 30,\n\t\t\t\texpectedFees: c(\"usdx\", 401820189),\n\t\t\t\texpectedFeesUpdatedTime: time.Date(2020, 12, 15, 14, 0, 0, 0, time.UTC).Add(time.Duration(int(time.Second) * 86400 * 30)),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"7 seconds\",\n\t\t\targs{\n\t\t\t\tctype: \"bnb-a\",\n\t\t\t\tinitialTime: time.Date(2020, 12, 15, 14, 0, 0, 0, time.UTC),\n\t\t\t\tinitialCollateral: c(\"bnb\", 1000000000000),\n\t\t\t\tinitialPrincipal: c(\"usdx\", 100000000000),\n\t\t\t\ttimeElapsed: 7,\n\t\t\t\texpectedFees: c(\"usdx\", 1083),\n\t\t\t\texpectedFeesUpdatedTime: time.Date(2020, 12, 15, 14, 0, 0, 0, time.UTC).Add(time.Duration(int(time.Second) * 7)),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"7 seconds - zero apy\",\n\t\t\targs{\n\t\t\t\tctype: \"busd-a\",\n\t\t\t\tinitialTime: time.Date(2020, 12, 15, 14, 0, 0, 0, time.UTC),\n\t\t\t\tinitialCollateral: c(\"busd\", 10000000000000),\n\t\t\t\tinitialPrincipal: c(\"usdx\", 10000000000),\n\t\t\t\ttimeElapsed: 7,\n\t\t\t\texpectedFees: c(\"usdx\", 0),\n\t\t\t\texpectedFeesUpdatedTime: time.Date(2020, 12, 15, 14, 0, 0, 0, time.UTC).Add(time.Duration(int(time.Second) * 7)),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"7 seconds - fees round to zero\",\n\t\t\targs{\n\t\t\t\tctype: \"bnb-a\",\n\t\t\t\tinitialTime: time.Date(2020, 12, 15, 14, 0, 0, 0, time.UTC),\n\t\t\t\tinitialCollateral: c(\"bnb\", 1000000000),\n\t\t\t\tinitialPrincipal: c(\"usdx\", 10000000),\n\t\t\t\ttimeElapsed: 7,\n\t\t\t\texpectedFees: c(\"usdx\", 0),\n\t\t\t\texpectedFeesUpdatedTime: time.Date(2020, 12, 15, 14, 0, 0, 0, time.UTC),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tsuite.Run(tc.name, func() {\n\t\t\tsuite.SetupTest()\n\t\t\tsuite.ctx = suite.ctx.WithBlockTime(tc.args.initialTime)\n\n\t\t\t// setup account state\n\t\t\t_, addrs := app.GeneratePrivKeyAddressPairs(1)\n\t\t\tak := suite.app.GetAccountKeeper()\n\t\t\t// setup the first account\n\t\t\tacc := ak.NewAccountWithAddress(suite.ctx, addrs[0])\n\t\t\tak.SetAccount(suite.ctx, acc)\n\t\t\tbk := suite.app.GetBankKeeper()\n\n\t\t\terr := bk.MintCoins(suite.ctx, types.ModuleName, cs(tc.args.initialCollateral))\n\t\t\tsuite.Require().NoError(err)\n\t\t\terr = bk.SendCoinsFromModuleToAccount(suite.ctx, types.ModuleName, addrs[0], cs(tc.args.initialCollateral))\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\t// setup pricefeed\n\t\t\tpk := suite.app.GetPriceFeedKeeper()\n\t\t\t_, err = pk.SetPrice(suite.ctx, sdk.AccAddress{}, \"bnb:usd\", d(\"17.25\"), tc.args.expectedFeesUpdatedTime.Add(time.Second))\n\t\t\tsuite.NoError(err)\n\t\t\t_, err = pk.SetPrice(suite.ctx, sdk.AccAddress{}, \"busd:usd\", d(\"1\"), tc.args.expectedFeesUpdatedTime.Add(time.Second))\n\t\t\tsuite.NoError(err)\n\n\t\t\t// setup cdp state\n\t\t\tsuite.keeper.SetPreviousAccrualTime(suite.ctx, tc.args.ctype, suite.ctx.BlockTime())\n\t\t\tsuite.keeper.SetInterestFactor(suite.ctx, tc.args.ctype, sdk.OneDec())\n\t\t\terr = suite.keeper.AddCdp(suite.ctx, addrs[0], tc.args.initialCollateral, tc.args.initialPrincipal, tc.args.ctype)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tupdatedBlockTime := suite.ctx.BlockTime().Add(time.Duration(int(time.Second) * tc.args.timeElapsed))\n\t\t\tsuite.ctx = suite.ctx.WithBlockTime(updatedBlockTime)\n\t\t\terr = suite.keeper.AccumulateInterest(suite.ctx, tc.args.ctype)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tcdp, found := suite.keeper.GetCDP(suite.ctx, tc.args.ctype, 1)\n\t\t\tsuite.Require().True(found)\n\n\t\t\tcdp = suite.keeper.SynchronizeInterest(suite.ctx, cdp)\n\n\t\t\tsuite.Require().Equal(tc.args.expectedFees, cdp.AccumulatedFees)\n\t\t\tsuite.Require().Equal(tc.args.expectedFeesUpdatedTime, cdp.FeesUpdated)\n\t\t})\n\t}\n}", "func TestReconcileSubscription(t *testing.T) {\n\tnewReconciledAdapter := mustNewReconciledAdapter(t)\n\tnewReconciledSource := mustNewReconciledSource(t)\n\n\ttestCases := rt.TableTest{\n\t\t// Regular lifecycle\n\n\t\t{\n\t\t\tName: \"Not yet subscribed\",\n\t\t\tKey: tKey,\n\t\t\tOtherTestData: mergeTableRowData(\n\t\t\t\tmakeMockTopics(true),\n\t\t\t\tmakeMockSubscriptionsPages(false),\n\t\t\t),\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tnewReconciledSource(),\n\t\t\t\tnewReconciledServiceAccount(),\n\t\t\t\tnewReconciledConfigWatchRoleBinding(),\n\t\t\t\tnewReconciledMTAdapterRoleBinding(),\n\t\t\t\tnewReconciledAdapter(),\n\t\t\t},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{{\n\t\t\t\tObject: newReconciledSource(subscribed),\n\t\t\t}},\n\t\t\tWantEvents: []string{\n\t\t\t\tsubscribedEvent(),\n\t\t\t},\n\t\t\tPostConditions: []func(*testing.T, *rt.TableRow){\n\t\t\t\tcalledSubscribe(true),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Already subscribed\",\n\t\t\tKey: tKey,\n\t\t\tOtherTestData: mergeTableRowData(\n\t\t\t\tmakeMockTopics(true),\n\t\t\t\tmakeMockSubscriptionsPages(true),\n\t\t\t),\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tnewReconciledSource(subscribed),\n\t\t\t\tnewReconciledServiceAccount(),\n\t\t\t\tnewReconciledConfigWatchRoleBinding(),\n\t\t\t\tnewReconciledMTAdapterRoleBinding(),\n\t\t\t\tnewReconciledAdapter(),\n\t\t\t},\n\t\t\tPostConditions: []func(*testing.T, *rt.TableRow){\n\t\t\t\tcalledSubscribe(false),\n\t\t\t},\n\t\t},\n\n\t\t// Finalization\n\n\t\t{\n\t\t\tName: \"Deletion while subscribed\",\n\t\t\tKey: tKey,\n\t\t\tOtherTestData: mergeTableRowData(\n\t\t\t\tmakeMockTopics(true),\n\t\t\t\tmakeMockSubscriptionsPages(true),\n\t\t\t),\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tnewReconciledSource(subscribed, deleted),\n\t\t\t\tnewReconciledServiceAccount(),\n\t\t\t\tnewReconciledConfigWatchRoleBinding(),\n\t\t\t\tnewReconciledMTAdapterRoleBinding(),\n\t\t\t\tnewReconciledAdapter(),\n\t\t\t},\n\t\t\tWantPatches: []clientgotesting.PatchActionImpl{\n\t\t\t\tunsetFinalizerPatch(),\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tfinalizedEvent(),\n\t\t\t\tunsubscribedEvent(),\n\t\t\t},\n\t\t\tPostConditions: []func(*testing.T, *rt.TableRow){\n\t\t\t\tcalledUnsubscribe(true),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Deletion while subscribed and topic is gone\",\n\t\t\tKey: tKey,\n\t\t\tOtherTestData: mergeTableRowData(\n\t\t\t\tmakeMockTopics(false),\n\t\t\t\tmakeMockSubscriptionsPages(true),\n\t\t\t),\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tnewReconciledSource(subscribed, deleted),\n\t\t\t\tnewReconciledServiceAccount(),\n\t\t\t\tnewReconciledConfigWatchRoleBinding(),\n\t\t\t\tnewReconciledMTAdapterRoleBinding(),\n\t\t\t\tnewReconciledAdapter(),\n\t\t\t},\n\t\t\tWantPatches: []clientgotesting.PatchActionImpl{\n\t\t\t\tunsetFinalizerPatch(),\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tfinalizedEvent(),\n\t\t\t\tunsubscribedEvent(),\n\t\t\t},\n\t\t\tPostConditions: []func(*testing.T, *rt.TableRow){\n\t\t\t\tcalledUnsubscribe(true),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Deletion while not subscribed\",\n\t\t\tKey: tKey,\n\t\t\tOtherTestData: mergeTableRowData(\n\t\t\t\tmakeMockTopics(true),\n\t\t\t\tmakeMockSubscriptionsPages(false),\n\t\t\t),\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tnewReconciledSource(deleted),\n\t\t\t\tnewReconciledServiceAccount(),\n\t\t\t\tnewReconciledConfigWatchRoleBinding(),\n\t\t\t\tnewReconciledMTAdapterRoleBinding(),\n\t\t\t\tnewReconciledAdapter(),\n\t\t\t},\n\t\t\tWantPatches: []clientgotesting.PatchActionImpl{\n\t\t\t\tunsetFinalizerPatch(),\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tfinalizedEvent(),\n\t\t\t\tnoopUnsubscribeEvent(),\n\t\t\t},\n\t\t\tPostConditions: []func(*testing.T, *rt.TableRow){\n\t\t\t\tcalledUnsubscribe(false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Deletion while not subscribed and topic is gone\",\n\t\t\tKey: tKey,\n\t\t\tOtherTestData: mergeTableRowData(\n\t\t\t\tmakeMockTopics(false),\n\t\t\t\tmakeMockSubscriptionsPages(false),\n\t\t\t),\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tnewReconciledSource(deleted),\n\t\t\t\tnewReconciledServiceAccount(),\n\t\t\t\tnewReconciledConfigWatchRoleBinding(),\n\t\t\t\tnewReconciledMTAdapterRoleBinding(),\n\t\t\t\tnewReconciledAdapter(),\n\t\t\t},\n\t\t\tWantPatches: []clientgotesting.PatchActionImpl{\n\t\t\t\tunsetFinalizerPatch(),\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tfinalizedEvent(),\n\t\t\t},\n\t\t\tPostConditions: []func(*testing.T, *rt.TableRow){\n\t\t\t\tcalledUnsubscribe(false),\n\t\t\t},\n\t\t},\n\n\t\t// Error cases\n\n\t\t{\n\t\t\tName: \"Topic not found while subscribing\",\n\t\t\tKey: tKey,\n\t\t\tWantErr: true,\n\t\t\tOtherTestData: mergeTableRowData(\n\t\t\t\tmakeMockTopics(false),\n\t\t\t\tmakeMockSubscriptionsPages(false),\n\t\t\t),\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tnewReconciledSource(),\n\t\t\t\tnewReconciledServiceAccount(),\n\t\t\t\tnewReconciledConfigWatchRoleBinding(),\n\t\t\t\tnewReconciledMTAdapterRoleBinding(),\n\t\t\t\tnewReconciledAdapter(),\n\t\t\t},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{{\n\t\t\t\tObject: newReconciledSource(notSubscribedTopicNotFound),\n\t\t\t}},\n\t\t\tWantEvents: []string{\n\t\t\t\ttopicNotFoundSubscribeEvent(),\n\t\t\t},\n\t\t\tPostConditions: []func(*testing.T, *rt.TableRow){\n\t\t\t\tcalledSubscribe(false),\n\t\t\t},\n\t\t},\n\t}\n\n\tctor := reconcilerCtor(adapterCfg)\n\n\ttestCases.Test(t, MakeFactory(ctor))\n}", "func TestV3ElectionObserve(t *testing.T) {\n\tintegration.BeforeTest(t)\n\tclus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})\n\tdefer clus.Terminate(t)\n\n\tlc := integration.ToGRPC(clus.Client(0)).Election\n\n\t// observe leadership events\n\tobservec := make(chan struct{}, 1)\n\tgo func() {\n\t\tdefer close(observec)\n\t\ts, err := lc.Observe(context.Background(), &epb.LeaderRequest{Name: []byte(\"foo\")})\n\t\tobservec <- struct{}{}\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tresp, rerr := s.Recv()\n\t\t\tif rerr != nil {\n\t\t\t\tt.Error(rerr)\n\t\t\t}\n\t\t\trespV := 0\n\t\t\tfmt.Sscanf(string(resp.Kv.Value), \"%d\", &respV)\n\t\t\t// leader transitions should not go backwards\n\t\t\tif respV < i {\n\t\t\t\tt.Errorf(`got observe value %q, expected >= \"%d\"`, string(resp.Kv.Value), i)\n\t\t\t}\n\t\t\ti = respV\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-observec:\n\tcase <-time.After(time.Second):\n\t\tt.Fatalf(\"observe stream took too long to start\")\n\t}\n\n\tlease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})\n\tif err1 != nil {\n\t\tt.Fatal(err1)\n\t}\n\tc1, cerr1 := lc.Campaign(context.TODO(), &epb.CampaignRequest{Name: []byte(\"foo\"), Lease: lease1.ID, Value: []byte(\"0\")})\n\tif cerr1 != nil {\n\t\tt.Fatal(cerr1)\n\t}\n\n\t// overlap other leader so it waits on resign\n\tleader2c := make(chan struct{})\n\tgo func() {\n\t\tdefer close(leader2c)\n\n\t\tlease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})\n\t\tif err2 != nil {\n\t\t\tt.Error(err2)\n\t\t}\n\t\tc2, cerr2 := lc.Campaign(context.TODO(), &epb.CampaignRequest{Name: []byte(\"foo\"), Lease: lease2.ID, Value: []byte(\"5\")})\n\t\tif cerr2 != nil {\n\t\t\tt.Error(cerr2)\n\t\t}\n\t\tfor i := 6; i < 10; i++ {\n\t\t\tv := []byte(fmt.Sprintf(\"%d\", i))\n\t\t\treq := &epb.ProclaimRequest{Leader: c2.Leader, Value: v}\n\t\t\tif _, err := lc.Proclaim(context.TODO(), req); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor i := 1; i < 5; i++ {\n\t\tv := []byte(fmt.Sprintf(\"%d\", i))\n\t\treq := &epb.ProclaimRequest{Leader: c1.Leader, Value: v}\n\t\tif _, err := lc.Proclaim(context.TODO(), req); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\t// start second leader\n\tlc.Resign(context.TODO(), &epb.ResignRequest{Leader: c1.Leader})\n\n\tselect {\n\tcase <-observec:\n\tcase <-time.After(time.Second):\n\t\tt.Fatalf(\"observe did not observe all events in time\")\n\t}\n\n\t<-leader2c\n}", "func terminate_agreement(ag *contract_api.SolidityContract, agID []byte, counterParty string, shouldWork bool) {\n log.Printf(\"Terminating agreement %v.\\n\", agID)\n tx_delay_toleration := 120\n err := error(nil)\n\n p := make([]interface{},0,10)\n p = append(p, counterParty)\n p = append(p, agID)\n p = append(p, 1)\n if _, err = ag.Invoke_method(\"terminate_agreement\", p); err != nil {\n log.Printf(\"...terminating, could not invoke terminate_agreement: %v\\n\", err)\n os.Exit(1)\n }\n log.Printf(\"Terminate agreement %v invoked.\\n\", agID)\n\n p = make([]interface{},0,10)\n p = append(p, counterParty)\n p = append(p, agID)\n empty_bytes := make([]byte, 32)\n var res interface{}\n start_timer := time.Now()\n for {\n fmt.Printf(\"There should NOT be a recorded contract hash, but it might still be visible for a few blocks.\\n\")\n if res, err = ag.Invoke_method(\"get_contract_hash\", p); err == nil {\n fmt.Printf(\"Received contract hash:%v.\\n\",res)\n if shouldWork {\n if bytes.Compare([]byte(res.(string)), empty_bytes) != 0 {\n if int(time.Now().Sub(start_timer).Seconds()) < tx_delay_toleration {\n fmt.Printf(\"Sleeping, waiting for the block with the Update.\\n\")\n time.Sleep(15 * time.Second)\n } else {\n fmt.Printf(\"Timeout waiting for the Update.\\n\")\n os.Exit(1)\n }\n } else {\n log.Printf(\"Terminated agreement %v.\\n\", agID)\n break\n }\n } else {\n if bytes.Compare([]byte(res.(string)), empty_bytes) == 0 {\n if int(time.Now().Sub(start_timer).Seconds()) < tx_delay_toleration {\n fmt.Printf(\"Sleeping, waiting for the block with the Update.\\n\")\n time.Sleep(15 * time.Second)\n } else {\n fmt.Printf(\"Timeout waiting for the Update. This is expected\\n\")\n break\n }\n } else {\n fmt.Printf(\"Received contract hash. This is NOT expected: %v\\n\", res.(string))\n os.Exit(2)\n }\n }\n } else {\n fmt.Printf(\"Error on get_contract_hash: %v\\n\",err)\n os.Exit(1)\n }\n }\n}", "func TestMultiClientCasSingleUpdate(t *testing.T) {\n\n\tdataChannel := make(chan int)\n\n\tconn := CreateConnections(t,10)\n\tname := \"hi.txt\"\n contents := \"hello 1\"\n scanner := bufio.NewScanner(conn[0])\n\t_,err := fmt.Fprintf(conn[0], \"write %v %v\\r\\n%v\\r\\n\", name, len(contents) ,contents)\n\tif err !=nil {\n\t\tt.Error(err.Error())\n\t}\n\tscanner.Scan()\n\tversion := VerifyWriteSucess(t,scanner.Text())\n\n\tfor i:=0;i<10;i++ {\n\t\tcontents = \"hello \" + strconv.Itoa(i+2);\n\t\tgo PerformCas(version,name,conn[i],dataChannel)\n\t}\n\t// verify with read\n\tvar b int = 0\n for i := 0; i < 10; i++ {\n b = b + <-dataChannel\n }\n if b!=1 {\n \tt.Error(fmt.Sprintf(\"More than one client has chabged version %v of file %v\",version,name)) // t.Error is visible when running `go test -verbose`\n }\n}", "func TestCompliancetestDemo(t *testing.T) {\n\t// Register new Vendor account\n\tvendor := utils.CreateNewAccount(auth.AccountRoles{auth.Vendor}, testconstants.VID)\n\n\t// Register new TestHouse account\n\ttestHouse := utils.CreateNewAccount(auth.AccountRoles{auth.TestHouse}, 0)\n\n\t// Register new TestHouse account\n\tsecondTestHouse := utils.CreateNewAccount(auth.AccountRoles{auth.TestHouse}, 0)\n\n\t// Publish model info\n\tmodel := utils.NewMsgAddModel(vendor.Address, testconstants.VID)\n\t_, _ = utils.AddModel(model, vendor)\n\t// Publish modelVersion\n\tmodelVersion := utils.NewMsgAddModelVersion(model.VID, model.PID,\n\t\ttestconstants.SoftwareVersion, testconstants.SoftwareVersionString, vendor.Address)\n\t_, _ = utils.AddModelVersion(modelVersion, vendor)\n\n\t// Publish first testing result using Sign and Broadcast AddTestingResult message\n\tfirstTestingResult := utils.NewMsgAddTestingResult(model.VID, model.PID,\n\t\tmodelVersion.SoftwareVersion, modelVersion.SoftwareVersionString, testHouse.Address)\n\tutils.SignAndBroadcastMessage(testHouse, firstTestingResult)\n\n\t// Check testing result is created\n\treceivedTestingResult, _ := utils.GetTestingResult(firstTestingResult.VID,\n\t\tfirstTestingResult.PID, firstTestingResult.SoftwareVersion)\n\trequire.Equal(t, receivedTestingResult.VID, firstTestingResult.VID)\n\trequire.Equal(t, receivedTestingResult.PID, firstTestingResult.PID)\n\trequire.Equal(t, receivedTestingResult.SoftwareVersion, firstTestingResult.SoftwareVersion)\n\trequire.Equal(t, 1, len(receivedTestingResult.Results))\n\trequire.Equal(t, receivedTestingResult.Results[0].TestResult, firstTestingResult.TestResult)\n\trequire.Equal(t, receivedTestingResult.Results[0].TestDate, firstTestingResult.TestDate)\n\trequire.Equal(t, receivedTestingResult.Results[0].Owner, firstTestingResult.Signer)\n\n\t// Publish second model info\n\tsecondModel := utils.NewMsgAddModel(vendor.Address, testconstants.VID)\n\t_, _ = utils.AddModel(secondModel, vendor)\n\t// Publish second modelVersion\n\tsecondModelVersion := utils.NewMsgAddModelVersion(secondModel.VID, secondModel.PID,\n\t\ttestconstants.SoftwareVersion, testconstants.SoftwareVersionString, vendor.Address)\n\t_, _ = utils.AddModelVersion(secondModelVersion, vendor)\n\n\t// Publish second testing result using POST\n\tsecondTestingResult := utils.NewMsgAddTestingResult(secondModel.VID, secondModel.PID,\n\t\tsecondModelVersion.SoftwareVersion, secondModelVersion.SoftwareVersionString, testHouse.Address)\n\t_, _ = utils.PublishTestingResult(secondTestingResult, testHouse)\n\n\t// Check testing result is created\n\treceivedTestingResult, _ = utils.GetTestingResult(secondTestingResult.VID,\n\t\tsecondTestingResult.PID, secondTestingResult.SoftwareVersion)\n\trequire.Equal(t, receivedTestingResult.VID, secondTestingResult.VID)\n\trequire.Equal(t, receivedTestingResult.PID, secondTestingResult.PID)\n\trequire.Equal(t, receivedTestingResult.SoftwareVersion, secondTestingResult.SoftwareVersion)\n\trequire.Equal(t, 1, len(receivedTestingResult.Results))\n\trequire.Equal(t, receivedTestingResult.Results[0].TestResult, secondTestingResult.TestResult)\n\trequire.Equal(t, receivedTestingResult.Results[0].TestDate, secondTestingResult.TestDate)\n\trequire.Equal(t, receivedTestingResult.Results[0].Owner, secondTestingResult.Signer)\n\n\t// Publish new testing result for second model\n\tthirdTestingResult := utils.NewMsgAddTestingResult(secondModel.VID, secondModel.PID,\n\t\tsecondModelVersion.SoftwareVersion, secondModelVersion.SoftwareVersionString, secondTestHouse.Address)\n\t_, _ = utils.PublishTestingResult(thirdTestingResult, secondTestHouse)\n\n\t// Check testing result is created\n\treceivedTestingResult, _ = utils.GetTestingResult(secondTestingResult.VID,\n\t\tsecondTestingResult.PID, secondTestingResult.SoftwareVersion)\n\trequire.Equal(t, 2, len(receivedTestingResult.Results))\n\trequire.Equal(t, receivedTestingResult.Results[0].Owner, secondTestingResult.Signer)\n\trequire.Equal(t, receivedTestingResult.Results[0].TestResult, secondTestingResult.TestResult)\n\trequire.Equal(t, receivedTestingResult.Results[1].Owner, thirdTestingResult.Signer)\n\trequire.Equal(t, receivedTestingResult.Results[1].TestResult, thirdTestingResult.TestResult)\n}", "func TestEventCreate(t *testing.T) {\n\ta := assert.New(t)\n\tctx := context.Background()\n\tpostgresC, db, err := setupPostgresContainer(ctx)\n\tif postgresC != nil {\n\t\tdefer postgresC.Terminate(ctx)\n\t}\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\teventsStorage := NewEvents(db)\n\tvar wg sync.WaitGroup\n\n\tassumeTotal := 0.\n\tvar tl sync.Mutex\n\n\tfor i := 0; i < 100; i++ {\n\t\twg.Add(1)\n\t\tgo func(i int) {\n\t\t\tdefer wg.Done()\n\t\t\tamount := float64(rand.Intn(100))\n\t\t\tvar e models.Event\n\t\t\tif i%5 == 0 { // StateWin\n\t\t\t\te = genTestEvent(amount)\n\t\t\t} else { // StateLoss\n\t\t\t\tamount = amount * -1\n\t\t\t\te = genTestEvent(amount)\n\t\t\t}\n\t\t\terr = eventsStorage.Create(ctx, e)\n\t\t\tif err != nil && errors.Cause(err) != errNegativeBalance {\n\t\t\t\tt.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err == nil {\n\t\t\t\ttl.Lock()\n\t\t\t\tassumeTotal += e.Amount\n\t\t\t\ttl.Unlock()\n\t\t\t}\n\t\t}(i)\n\t\tt := time.Duration(rand.Int63n(5))\n\t\ttime.Sleep(t * time.Millisecond)\n\t}\n\n\twg.Wait()\n\n\tbal, err := getBalanceWithLock(ctx, db)\n\ta.NoError(err)\n\n\tt.Logf(\"Total balance: %f\", bal)\n\n\ta.Equal(assumeTotal, bal)\n\n\tif bal < 0 {\n\t\tt.Error(\"Negative balance\")\n\t}\n}", "func Test_Complete_Flow(t *testing.T) {\n\tassert := assert.New(t)\n\n\tdao := dao.NewMetricDaoMemoryImpl(3) // Setting the TTL in 3 seconds for testing purpose\n\tsrv := service.NewMetricsServiceImpl(dao)\n\n\t// T0\n\terr := srv.AddMetric(\"metric1\", 1)\n\tassert.Nil(err)\n\n\terr = srv.AddMetric(\"metric2\", 3)\n\tassert.Nil(err)\n\n\terr = srv.AddMetric(\"metric2\", 1)\n\tassert.Nil(err)\n\n\terr = srv.AddMetric(\"metric3\", -2)\n\tassert.Nil(err)\n\n\t// Checking the metrics\n\tval, err := srv.SumMetric(\"metric1\")\n\tassert.Nil(err)\n\tassert.Equal(1, val)\n\n\tval, err = srv.SumMetric(\"metric2\")\n\tassert.Nil(err)\n\tassert.Equal(4, val)\n\n\tval, err = srv.SumMetric(\"metric3\")\n\tassert.Nil(err)\n\tassert.Equal(-2, val)\n\n\t// sleeping 3 secs\n\ttime.Sleep(time.Second * 2)\n\n\t// T1 - adding more values to the metrics\n\terr = srv.AddMetric(\"metric1\", 10)\n\tassert.Nil(err)\n\n\terr = srv.AddMetric(\"metric2\", -2)\n\tassert.Nil(err)\n\n\terr = srv.AddMetric(\"metric3\", 10)\n\tassert.Nil(err)\n\n\terr = srv.AddMetric(\"metric3\", 22)\n\tassert.Nil(err)\n\n\t// Checking the metrics again\n\tval, err = srv.SumMetric(\"metric1\")\n\tassert.Nil(err)\n\tassert.Equal(11, val)\n\n\tval, err = srv.SumMetric(\"metric2\")\n\tassert.Nil(err)\n\tassert.Equal(2, val)\n\n\tval, err = srv.SumMetric(\"metric3\")\n\tassert.Nil(err)\n\tassert.Equal(30, val)\n\n\t// sleeping 3 more seconds and the metrics added on T0 should be removed\n\ttime.Sleep(time.Second * 2)\n\n\t// T2 - Checking the metrics again\n\tval, err = srv.SumMetric(\"metric1\")\n\tassert.Nil(err)\n\tassert.Equal(10, val)\n\n\tval, err = srv.SumMetric(\"metric2\")\n\tassert.Nil(err)\n\tassert.Equal(-2, val)\n\n\tval, err = srv.SumMetric(\"metric3\")\n\tassert.Nil(err)\n\tassert.Equal(32, val)\n\n\t// sleeping 5 more seconds and there shouldn't be more metrics\n\ttime.Sleep(time.Second * 3)\n\n\tval, err = srv.SumMetric(\"metric1\")\n\tassert.Nil(err)\n\tassert.Equal(0, val)\n\n\tval, err = srv.SumMetric(\"metric2\")\n\tassert.Nil(err)\n\tassert.Equal(0, val)\n\n\tval, err = srv.SumMetric(\"metric3\")\n\tassert.Nil(err)\n\tassert.Equal(0, val)\n}", "func TestCmdConfigChangeEvents(t *testing.T) {\n\tdefer cleanTestArtifacts(t)\n\tvar err error\n\terr = testcert.GenerateCert(\"mail2.guerrillamail.com\", \"\", 365*24*time.Hour, false, 2048, \"P256\", \"../../tests/\")\n\tif err != nil {\n\t\tt.Error(\"failed to generate a test certificate\", err)\n\t\tt.FailNow()\n\t}\n\n\toldconf := &guerrilla.AppConfig{}\n\tif err := oldconf.Load([]byte(configJsonA)); err != nil {\n\t\tt.Error(\"configJsonA is invalid\", err)\n\t}\n\n\tnewconf := &guerrilla.AppConfig{}\n\tif err := newconf.Load([]byte(configJsonB)); err != nil {\n\t\tt.Error(\"configJsonB is invalid\", err)\n\t}\n\n\tnewerconf := &guerrilla.AppConfig{}\n\tif err := newerconf.Load([]byte(configJsonC)); err != nil {\n\t\tt.Error(\"configJsonC is invalid\", err)\n\t}\n\n\texpectedEvents := map[guerrilla.Event]bool{\n\t\tguerrilla.EventConfigBackendConfig: false,\n\t\tguerrilla.EventConfigServerNew: false,\n\t}\n\tmainlog, err = getTestLog()\n\tif err != nil {\n\t\tt.Error(\"could not get logger,\", err)\n\t\tt.FailNow()\n\t}\n\n\tbcfg := backends.BackendConfig{\"log_received_mails\": true}\n\tbackend, err := backends.New(bcfg, mainlog)\n\tapp, err := guerrilla.New(oldconf, backend, mainlog)\n\tif err != nil {\n\t\tt.Error(\"Failed to create new app\", err)\n\t}\n\ttoUnsubscribe := map[guerrilla.Event]func(c *guerrilla.AppConfig){}\n\ttoUnsubscribeS := map[guerrilla.Event]func(c *guerrilla.ServerConfig){}\n\n\tfor event := range expectedEvents {\n\t\t// Put in anon func since range is overwriting event\n\t\tfunc(e guerrilla.Event) {\n\t\t\tif strings.Index(e.String(), \"server_change\") == 0 {\n\t\t\t\tf := func(c *guerrilla.ServerConfig) {\n\t\t\t\t\texpectedEvents[e] = true\n\t\t\t\t}\n\t\t\t\t_ = app.Subscribe(e, f)\n\t\t\t\ttoUnsubscribeS[e] = f\n\t\t\t} else {\n\t\t\t\tf := func(c *guerrilla.AppConfig) {\n\t\t\t\t\texpectedEvents[e] = true\n\t\t\t\t}\n\t\t\t\t_ = app.Subscribe(e, f)\n\t\t\t\ttoUnsubscribe[e] = f\n\t\t\t}\n\n\t\t}(event)\n\t}\n\n\t// emit events\n\tnewconf.EmitChangeEvents(oldconf, app)\n\tnewerconf.EmitChangeEvents(newconf, app)\n\t// unsubscribe\n\tfor unevent, unfun := range toUnsubscribe {\n\t\t_ = app.Unsubscribe(unevent, unfun)\n\t}\n\tfor unevent, unfun := range toUnsubscribeS {\n\t\t_ = app.Unsubscribe(unevent, unfun)\n\t}\n\n\tfor event, val := range expectedEvents {\n\t\tif val == false {\n\t\t\tt.Error(\"Did not fire config change event:\", event)\n\t\t\tt.FailNow()\n\t\t}\n\t}\n\n}", "func TestMockValidity(t *testing.T) {\n\tnr := 50\n\t_, hlp := agreement.WireAgreement(nr)\n\thash, _ := crypto.RandEntropy(32)\n\thandler := agreement.NewHandler(hlp.Keys[0], *hlp.P)\n\n\tfor i := 0; i < nr; i++ {\n\t\ta := message.MockAgreement(hash, 1, 3, hlp.Keys, hlp.P, i)\n\t\tif !assert.NoError(t, handler.Verify(a)) {\n\t\t\tt.FailNow()\n\t\t}\n\t}\n}", "func TestViewChangeProofReceiveMessages(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 5})\n\n\tif exp, a := uint64(1), p.lastAttempted; a != exp {\n\t\tt.Fatalf(\"expected last attempted view %d; found %d\", exp, a)\n\t}\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\n\t// View change proofs from current or previous views are discarded.\n\tvcp := &pb.ViewChangeProof{NodeId: 0, InstalledView: 0}\n\tp.onViewChangeProof(vcp)\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\n\t// View change proofs from self are discarded.\n\tvcp = &pb.ViewChangeProof{NodeId: 1, InstalledView: 4}\n\tp.onViewChangeProof(vcp)\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\n\t// View change proofs from node 0 with later view should be applied successfully.\n\tvcp = &pb.ViewChangeProof{NodeId: 0, InstalledView: 4}\n\tp.onViewChangeProof(vcp)\n\tif exp, a := uint64(4), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d after view change proof; found %d\", exp, a)\n\t}\n}", "func checkReconcileEventsOccur() {\n\t// These events are fired when the reconcile loop makes a change\n\tgomega.Eventually(func() string {\n\t\tout, err := RunKubeCtlCommand(\"describe\", \"hostpathprovisioner\", \"hostpath-provisioner\")\n\t\tgomega.Expect(err).ToNot(gomega.HaveOccurred())\n\t\treturn out\n\t}, 90*time.Second, 1*time.Second).Should(gomega.ContainSubstring(\"UpdateResourceStart\"))\n\n\tgomega.Eventually(func() string {\n\t\tout, err := RunKubeCtlCommand(\"describe\", \"hostpathprovisioner\", \"hostpath-provisioner\")\n\t\tgomega.Expect(err).ToNot(gomega.HaveOccurred())\n\t\treturn out\n\t}, 90*time.Second, 1*time.Second).Should(gomega.ContainSubstring(\"UpdateResourceSuccess\"))\n}", "func TestEvents(t *testing.T) {\n\tti := tInfo{}\n\tAssertOk(t, ti.setup(t), \"failed to setup test\")\n\tdefer ti.teardown()\n\n\t// uuid to make each source unique\n\tcomponentID := uuid.NewV4().String()\n\n\t// create recorder events directory\n\trecorderEventsDir, err := ioutil.TempDir(\"\", \"\")\n\tAssertOk(t, err, \"failed to create recorder events directory\")\n\tdefer os.RemoveAll(recorderEventsDir)\n\n\t// create recorder\n\tevtsRecorder, err := recorder.NewRecorder(&recorder.Config{\n\t\tComponent: componentID,\n\t\tEvtsProxyURL: ti.evtProxyServices.EvtsProxy.RPCServer.GetListenURL(),\n\t\tBackupDir: recorderEventsDir}, ti.logger)\n\tAssertOk(t, err, \"failed to create events recorder\")\n\tdefer evtsRecorder.Close()\n\n\t// send events (recorder -> proxy -> dispatcher -> writer -> evtsmgr -> elastic)\n\tevtsRecorder.Event(eventtypes.SERVICE_STARTED, \"test event - 1\", nil)\n\tevtsRecorder.Event(eventtypes.SERVICE_RUNNING, \"test event - 2\", nil)\n\n\ttime.Sleep(1 * time.Second)\n\n\t// verify that it has reached elasticsearch; these are the first occurrences of an event\n\t// so it should have reached elasticsearch without being de-duped.\n\tquery := es.NewBoolQuery().Must(es.NewMatchQuery(\"source.component\", componentID),\n\t\tes.NewTermQuery(\"type.keyword\", eventtypes.SERVICE_STARTED.String()))\n\tti.assertElasticUniqueEvents(t, query, true, 1, \"4s\") // unique == 1\n\tti.assertElasticTotalEvents(t, query, true, 1, \"4s\") // total == 1\n\tquery = es.NewBoolQuery().Must(es.NewMatchQuery(\"source.component\", componentID),\n\t\tes.NewMatchQuery(\"message\", \"test event -2\").Operator(\"and\"))\n\tti.assertElasticUniqueEvents(t, query, true, 1, \"4s\") // unique == 1\n\tti.assertElasticTotalEvents(t, query, true, 1, \"4s\") // total == 1\n\n\t// send duplicates and check whether they're compressed\n\tnumDuplicates := 25\n\tfor i := 0; i < numDuplicates; i++ {\n\t\tevtsRecorder.Event(eventtypes.SERVICE_STARTED, \"test dup event - 1\", nil)\n\t\tevtsRecorder.Event(eventtypes.SERVICE_RUNNING, \"test dup event - 2\", nil)\n\t}\n\n\t// ensure the de-duped events reached elasticsearch\n\t// test duplicate event - 1\n\tquery = es.NewBoolQuery().Must(es.NewMatchQuery(\"source.component\", componentID),\n\t\tes.NewMatchQuery(\"message\", \"test dup event - 1\").Operator(\"and\"))\n\tti.assertElasticUniqueEvents(t, query, true, 1, \"4s\") // unique == 1\n\tti.assertElasticTotalEvents(t, query, true, numDuplicates, \"2s\") // total == numDuplicates\n\n\t// test duplicate event - 2\n\tquery = es.NewBoolQuery().Must(es.NewMatchQuery(\"source.component\", componentID),\n\t\tes.NewMatchQuery(\"message\", \"test dup event - 2\").Operator(\"and\"))\n\tti.assertElasticUniqueEvents(t, query, true, 1, \"4s\") // unique == 1\n\tti.assertElasticTotalEvents(t, query, true, numDuplicates, \"2s\") // total == numDuplicates\n\n\t// create test NIC object\n\ttestNIC := policygen.CreateSmartNIC(\"00-14-22-01-23-45\",\n\t\tcluster.DistributedServiceCardStatus_ADMITTED.String(),\n\t\t\"esx-1\",\n\t\t&cluster.DSCCondition{\n\t\t\tType: cluster.DSCCondition_HEALTHY.String(),\n\t\t\tStatus: cluster.ConditionStatus_FALSE.String(),\n\t\t})\n\n\t// record events with reference object\n\tfor i := 0; i < numDuplicates; i++ {\n\t\tevtsRecorder.Event(eventtypes.SERVICE_STARTED, \"test dup event - 1\", testNIC)\n\t\tevtsRecorder.Event(eventtypes.SERVICE_RUNNING, \"test dup event - 2\", testNIC)\n\t}\n\n\t// query by kind\n\tqueryByKind := es.NewTermQuery(\"object-ref.kind.keyword\", testNIC.GetKind())\n\tti.assertElasticUniqueEvents(t, queryByKind, true, 2, \"4s\") // unique == 2 (eventType1 and eventType2)\n\tti.assertElasticTotalEvents(t, queryByKind, true, numDuplicates*2, \"4s\") // total == numDuplicates\n}", "func subscribeAndAssertSubscriptionCreatedEvent(\n\tt *testing.T,\n\tvrfConsumerHandle vrfConsumerContract,\n\tconsumerOwner *bind.TransactOpts,\n\tconsumerContractAddress common.Address,\n\tfundingJuels *big.Int,\n\tuni coordinatorV2Universe,\n) uint64 {\n\t// Create a subscription and fund with LINK.\n\tsub, subID := subscribeVRF(t, consumerOwner, vrfConsumerHandle, uni.rootContract, uni.backend, fundingJuels)\n\trequire.Equal(t, uint64(1), subID)\n\trequire.Equal(t, fundingJuels.String(), sub.Balance.String())\n\n\t// Assert the subscription event in the coordinator contract.\n\titer, err := uni.rootContract.FilterSubscriptionCreated(nil, []uint64{subID})\n\trequire.NoError(t, err)\n\tfound := false\n\tfor iter.Next() {\n\t\tif iter.Event.Owner != consumerContractAddress {\n\t\t\trequire.FailNowf(t, \"SubscriptionCreated event contains wrong owner address\", \"expected: %+v, actual: %+v\", consumerContractAddress, iter.Event.Owner)\n\t\t} else {\n\t\t\tfound = true\n\t\t}\n\t}\n\trequire.True(t, found, \"could not find SubscriptionCreated event for subID %d\", subID)\n\n\treturn subID\n}", "func DigestEvents (roomChannel chan bson.M, roomId string) {\n q := bson.M{\"roomid\": roomId}\n // isBusy := false\n // TODO: ensure we always make the change if it's the last one in the channel (intermediates arent as important)\n for {\n select {\n case up := <- roomChannel:\n if err := Rooms.Update(q, up); err != nil {\n log.Println(err.Error())\n }\n }\n }\n}", "func TestV3ElectionCampaign(t *testing.T) {\n\tintegration.BeforeTest(t)\n\tclus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})\n\tdefer clus.Terminate(t)\n\n\tlease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})\n\tif err1 != nil {\n\t\tt.Fatal(err1)\n\t}\n\tlease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30})\n\tif err2 != nil {\n\t\tt.Fatal(err2)\n\t}\n\n\tlc := integration.ToGRPC(clus.Client(0)).Election\n\treq1 := &epb.CampaignRequest{Name: []byte(\"foo\"), Lease: lease1.ID, Value: []byte(\"abc\")}\n\tl1, lerr1 := lc.Campaign(context.TODO(), req1)\n\tif lerr1 != nil {\n\t\tt.Fatal(lerr1)\n\t}\n\n\tcampaignc := make(chan struct{})\n\tgo func() {\n\t\tdefer close(campaignc)\n\t\treq2 := &epb.CampaignRequest{Name: []byte(\"foo\"), Lease: lease2.ID, Value: []byte(\"def\")}\n\t\tl2, lerr2 := lc.Campaign(context.TODO(), req2)\n\t\tif lerr2 != nil {\n\t\t\tt.Error(lerr2)\n\t\t}\n\t\tif l1.Header.Revision >= l2.Header.Revision {\n\t\t\tt.Errorf(\"expected l1 revision < l2 revision, got %d >= %d\", l1.Header.Revision, l2.Header.Revision)\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-time.After(200 * time.Millisecond):\n\tcase <-campaignc:\n\t\tt.Fatalf(\"got leadership before resign\")\n\t}\n\n\tif _, uerr := lc.Resign(context.TODO(), &epb.ResignRequest{Leader: l1.Leader}); uerr != nil {\n\t\tt.Fatal(uerr)\n\t}\n\n\tselect {\n\tcase <-time.After(200 * time.Millisecond):\n\t\tt.Fatalf(\"campaigner unelected after resign\")\n\tcase <-campaignc:\n\t}\n\n\tlval, lverr := lc.Leader(context.TODO(), &epb.LeaderRequest{Name: []byte(\"foo\")})\n\tif lverr != nil {\n\t\tt.Fatal(lverr)\n\t}\n\n\tif string(lval.Kv.Value) != \"def\" {\n\t\tt.Fatalf(\"got election value %q, expected %q\", string(lval.Kv.Value), \"def\")\n\t}\n}", "func TestLeaderAcknowledgeCommit(t *testing.T) {\n\ttests := []struct {\n\t\tsize int\n\t\tacceptors map[uint64]bool\n\t\twack bool\n\t}{\n\t\t{1, nil, true},\n\t\t{3, nil, false},\n\t\t{3, map[uint64]bool{2: true}, true},\n\t\t{3, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, nil, false},\n\t\t{5, map[uint64]bool{2: true}, false},\n\t\t{5, map[uint64]bool{2: true, 3: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true}, true},\n\t\t{5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, true},\n\t}\n\tfor i, tt := range tests {\n\t\ts := NewMemoryStorage()\n\t\tr := newTestRaft(1, idsBySize(tt.size), 10, 1, s)\n\t\tdefer closeAndFreeRaft(r)\n\t\tr.becomeCandidate()\n\t\tr.becomeLeader()\n\t\tcommitNoopEntry(r, s)\n\t\tli := r.raftLog.lastIndex()\n\t\tr.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte(\"some data\")}}})\n\n\t\tfor _, m := range r.readMessages() {\n\t\t\tif tt.acceptors[m.To] {\n\t\t\t\tr.Step(acceptAndReply(m))\n\t\t\t}\n\t\t}\n\n\t\tif g := r.raftLog.committed > li; g != tt.wack {\n\t\t\tt.Errorf(\"#%d: ack commit = %v, want %v\", i, g, tt.wack)\n\t\t}\n\t}\n}", "func TestEventualConsistency(t *testing.T) {\n\ttestCases := []struct {\n\t\tdescription string\n\t\tpods []v1.Pod\n\t\tprevPod *v1.Pod\n\t\tnewPod *v1.Pod\n\t\toldNode *v1.Node\n\t\tnewNode *v1.Node\n\t\texpectPatch bool\n\t\texpectDelete bool\n\t}{\n\t\t{\n\t\t\tdescription: \"existing pod2 scheduled onto tainted Node\",\n\t\t\tpods: []v1.Pod{\n\t\t\t\t*testutil.NewPod(\"pod1\", \"node1\"),\n\t\t\t},\n\t\t\tprevPod: testutil.NewPod(\"pod2\", \"\"),\n\t\t\tnewPod: testutil.NewPod(\"pod2\", \"node1\"),\n\t\t\toldNode: testutil.NewNode(\"node1\"),\n\t\t\tnewNode: addTaintsToNode(testutil.NewNode(\"node1\"), \"testTaint1\", \"taint1\", []int{1}),\n\t\t\texpectPatch: true,\n\t\t\texpectDelete: true,\n\t\t},\n\t\t{\n\t\t\tdescription: \"existing pod2 with taint toleration scheduled onto tainted Node\",\n\t\t\tpods: []v1.Pod{\n\t\t\t\t*testutil.NewPod(\"pod1\", \"node1\"),\n\t\t\t},\n\t\t\tprevPod: addToleration(testutil.NewPod(\"pod2\", \"\"), 1, 100),\n\t\t\tnewPod: addToleration(testutil.NewPod(\"pod2\", \"node1\"), 1, 100),\n\t\t\toldNode: testutil.NewNode(\"node1\"),\n\t\t\tnewNode: addTaintsToNode(testutil.NewNode(\"node1\"), \"testTaint1\", \"taint1\", []int{1}),\n\t\t\texpectPatch: true,\n\t\t\texpectDelete: true,\n\t\t},\n\t\t{\n\t\t\tdescription: \"new pod2 created on tainted Node\",\n\t\t\tpods: []v1.Pod{\n\t\t\t\t*testutil.NewPod(\"pod1\", \"node1\"),\n\t\t\t},\n\t\t\tprevPod: nil,\n\t\t\tnewPod: testutil.NewPod(\"pod2\", \"node1\"),\n\t\t\toldNode: testutil.NewNode(\"node1\"),\n\t\t\tnewNode: addTaintsToNode(testutil.NewNode(\"node1\"), \"testTaint1\", \"taint1\", []int{1}),\n\t\t\texpectPatch: true,\n\t\t\texpectDelete: true,\n\t\t},\n\t\t{\n\t\t\tdescription: \"new pod2 with tait toleration created on tainted Node\",\n\t\t\tpods: []v1.Pod{\n\t\t\t\t*testutil.NewPod(\"pod1\", \"node1\"),\n\t\t\t},\n\t\t\tprevPod: nil,\n\t\t\tnewPod: addToleration(testutil.NewPod(\"pod2\", \"node1\"), 1, 100),\n\t\t\toldNode: testutil.NewNode(\"node1\"),\n\t\t\tnewNode: addTaintsToNode(testutil.NewNode(\"node1\"), \"testTaint1\", \"taint1\", []int{1}),\n\t\t\texpectPatch: true,\n\t\t\texpectDelete: true,\n\t\t},\n\t}\n\n\tfor _, item := range testCases {\n\t\tt.Run(item.description, func(t *testing.T) {\n\t\t\tctx, cancel := context.WithCancel(context.Background())\n\t\t\tdefer cancel()\n\n\t\t\tfakeClientset := fake.NewSimpleClientset(&v1.PodList{Items: item.pods})\n\t\t\tcontroller, podIndexer, nodeIndexer := setupNewNoExecuteTaintManager(ctx, fakeClientset)\n\t\t\tnodeIndexer.Add(item.newNode)\n\t\t\tcontroller.recorder = testutil.NewFakeRecorder()\n\t\t\tgo controller.Run(ctx)\n\n\t\t\tif item.prevPod != nil {\n\t\t\t\tpodIndexer.Add(item.prevPod)\n\t\t\t\tcontroller.PodUpdated(nil, item.prevPod)\n\t\t\t}\n\n\t\t\t// First we simulate NodeUpdate that should delete 'pod1'. It doesn't know about 'pod2' yet.\n\t\t\tcontroller.NodeUpdated(item.oldNode, item.newNode)\n\n\t\t\tverifyPodActions(t, item.description, fakeClientset, item.expectPatch, item.expectDelete)\n\t\t\tfakeClientset.ClearActions()\n\n\t\t\t// And now the delayed update of 'pod2' comes to the TaintManager. We should delete it as well.\n\t\t\tpodIndexer.Update(item.newPod)\n\t\t\tcontroller.PodUpdated(item.prevPod, item.newPod)\n\t\t\t// wait a bit\n\t\t\ttime.Sleep(timeForControllerToProgressForSanityCheck)\n\t\t})\n\t}\n}", "func (suite *EventsTestSuite) TestUpdateEventAllInstance() {\n\tstart := time.Date(2019, 12, 1, 12, 0, 0, 0, time.UTC)\n\tend := time.Date(2019, 12, 1, 13, 0, 0, 0, time.UTC)\n\tdur := end.Sub(start)\n\n\t// create an rrule\n\tr, _ := rrule.NewRRule(rrule.ROption{\n\t\tFreq: rrule.DAILY,\n\t\tCount: 7,\n\t\tDtstart: start,\n\t})\n\n\treq := &events.Event{\n\t\tTitle: \"Some Event\",\n\t\tStart: start,\n\t\tEnd: end,\n\t\tDuration: dur,\n\t\tAllday: false,\n\t\tRecurring: true,\n\t\tRrule: r.String(),\n\t}\n\n\trsp := &events.EventResponse{}\n\terr := suite.service.CreateEvent(context.TODO(), req, rsp)\n\tassert.Nil(suite.T(), err)\n\n\t// change the start time\n\t// rrule updates are handled on client side\n\tstart = time.Date(2019, 12, 1, 13, 0, 0, 0, time.UTC)\n\tend = time.Date(2019, 12, 1, 14, 0, 0, 0, time.UTC)\n\trsp.Event.Start = start\n\trsp.Event.End = end\n\tr.OrigOptions.Dtstart = start\n\trsp.Event.Rrule = r.String()\n\n\t// update all instance of recurring\n\tupdatereq := &events.EventUpdateRequest{}\n\tupdatereq.Updatetype = events.AllInstances\n\tupdatereq.Event = rsp.Event\n\n\terr = suite.service.UpdateEvent(context.TODO(), updatereq, rsp)\n\tassert.Nil(suite.T(), err)\n\tassert.Equal(suite.T(), start, rsp.Event.Start)\n\tassert.Equal(suite.T(), end, rsp.Event.End)\n\tassert.Equal(suite.T(), r.String(), rsp.Event.Rrule)\n}", "func TestCheckEvents(t *testing.T) {\n\ttestNamespace := \"test_namespace\"\n\tcha := make(chan *events.Envelope)\n\terrorsCh := make(chan error)\n\tme := &mockEvt{\n\t\tmockSubscribe: func(ctx context.Context, filter ...string) (ch <-chan *events.Envelope, errs <-chan error) {\n\t\t\treturn cha, errorsCh\n\t\t},\n\t}\n\titf := &fake.MockedContainerdClient{\n\t\tMockEvents: func() containerd.EventService {\n\t\t\treturn containerd.EventService(me)\n\t\t},\n\t\tMockNamespaces: func(ctx context.Context) ([]string, error) {\n\t\t\treturn []string{testNamespace}, nil\n\t\t},\n\t\tMockContainers: func(namespace string) ([]containerd.Container, error) {\n\t\t\treturn nil, nil\n\t\t},\n\t}\n\t// Test the basic listener\n\tsub := createEventSubscriber(\"subscriberTest1\", containerdutil.ContainerdItf(itf), nil)\n\tsub.CheckEvents()\n\n\ttp := &containerdevents.TaskPaused{\n\t\tContainerID: \"42\",\n\t}\n\n\tvp, err := typeurl.MarshalAny(tp)\n\tassert.NoError(t, err)\n\n\ten := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/tasks/paused\",\n\t\tEvent: vp,\n\t}\n\tcha <- &en\n\n\ttimeout := time.NewTimer(2 * time.Second)\n\tticker := time.NewTicker(5 * time.Millisecond)\n\tcondition := false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting event listener to be healthy\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tev := sub.Flush(time.Now().Unix())\n\tassert.Len(t, ev, 1)\n\tassert.Equal(t, ev[0].Topic, \"/tasks/paused\")\n\terrorsCh <- fmt.Errorf(\"chan breaker\")\n\tcondition = false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting for error\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Test the multiple events one unsupported\n\tsub = createEventSubscriber(\"subscriberTest2\", containerdutil.ContainerdItf(itf), nil)\n\tsub.CheckEvents()\n\n\ttk := &containerdevents.TaskOOM{\n\t\tContainerID: \"42\",\n\t}\n\tvk, err := typeurl.MarshalAny(tk)\n\tassert.NoError(t, err)\n\n\tek := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/tasks/oom\",\n\t\tEvent: vk,\n\t}\n\n\tnd := &containerdevents.NamespaceDelete{\n\t\tName: \"k10s.io\",\n\t}\n\tvnd, err := typeurl.MarshalAny(nd)\n\tassert.NoError(t, err)\n\n\tevnd := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/namespaces/delete\",\n\t\tEvent: vnd,\n\t}\n\n\tcha <- &ek\n\tcha <- &evnd\n\n\tcondition = false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting event listener to be healthy\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\tev2 := sub.Flush(time.Now().Unix())\n\tfmt.Printf(\"\\n\\n 2/ Flush %v\\n\\n\", ev2)\n\tassert.Len(t, ev2, 1)\n\tassert.Equal(t, ev2[0].Topic, \"/tasks/oom\")\n}", "func TestBackend(ctx context.Context, t *testing.T, a *assertions.Assertion, backend events.PubSub) {\n\tnow := time.Now()\n\tcorrelationID := fmt.Sprintf(\"%s@%s\", t.Name(), now)\n\n\tctx = events.ContextWithCorrelationID(ctx, correlationID)\n\n\ttimeout := test.Delay\n\tif deadline, ok := ctx.Deadline(); ok {\n\t\ttimeout = time.Until(deadline) / 10\n\t}\n\n\teui := types.EUI64{1, 2, 3, 4, 5, 6, 7, 8}\n\tdevAddr := types.DevAddr{1, 2, 3, 4}\n\tappID := ttnpb.ApplicationIdentifiers{\n\t\tApplicationId: \"test-app\",\n\t}\n\tdevID := ttnpb.EndDeviceIdentifiers{\n\t\tApplicationIds: &appID,\n\t\tDeviceId: \"test-dev\",\n\t\tDevEui: eui.Bytes(),\n\t\tJoinEui: eui.Bytes(),\n\t\tDevAddr: devAddr.Bytes(),\n\t}\n\tgtwID := ttnpb.GatewayIdentifiers{\n\t\tGatewayId: \"test-gtw\",\n\t\tEui: eui.Bytes(),\n\t}\n\n\tch0 := make(events.Channel, 10)\n\tch1 := make(events.Channel, 10)\n\tch2 := make(events.Channel, 10)\n\n\tcheckEvent := func(e events.Event) {\n\t\ta.So(e.Time().IsZero(), should.BeFalse)\n\t\ta.So(e.Context(), should.NotBeNil)\n\t}\n\n\tsubCtx, unsubscribe := context.WithCancel(ctx)\n\tdefer unsubscribe()\n\n\tbackend.Publish(events.New(ctx, \"test.some.evt1\", \"test event 1\", events.WithIdentifiers(&appID)))\n\n\truntime.Gosched()\n\ttime.Sleep(timeout)\n\n\tif store, ok := backend.(events.Store); ok {\n\t\tchx := make(events.Channel, 10)\n\t\thistSubCtx, cancel := context.WithCancel(subCtx)\n\t\tvar g errgroup.Group\n\t\tg.Go(func() error {\n\t\t\tafter := now.Add(-1 * time.Second)\n\t\t\treturn store.SubscribeWithHistory(\n\t\t\t\thistSubCtx,\n\t\t\t\t[]string{\"test.some.evt1\"},\n\t\t\t\t[]*ttnpb.EntityIdentifiers{appID.GetEntityIdentifiers()},\n\t\t\t\t&after, 1, chx,\n\t\t\t)\n\t\t})\n\t\tdefer func() {\n\t\t\tcancel()\n\t\t\tif err := g.Wait(); err != nil && !errors.IsCanceled(err) {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t\ta.So(chx, should.HaveLength, 2)\n\t\t}()\n\t}\n\n\ta.So(backend.Subscribe(\n\t\tsubCtx,\n\t\t[]string{\"test.some.evt0\", \"test.some.evt1\"},\n\t\tnil,\n\t\tch0,\n\t), should.BeNil)\n\n\ta.So(backend.Subscribe(\n\t\tsubCtx,\n\t\t[]string{\"test.some.evt0\", \"test.some.evt1\"},\n\t\t[]*ttnpb.EntityIdentifiers{appID.GetEntityIdentifiers()},\n\t\tch1,\n\t), should.BeNil)\n\n\ta.So(backend.Subscribe(\n\t\tsubCtx,\n\t\t[]string{\"test.other.evt2\"},\n\t\t[]*ttnpb.EntityIdentifiers{gtwID.GetEntityIdentifiers()},\n\t\tch2,\n\t), should.BeNil)\n\n\truntime.Gosched()\n\ttime.Sleep(timeout)\n\n\tbackend.Publish(events.New(ctx, \"test.some.evt0\", \"test event 0\"))\n\tcheckEvt0 := func(e events.Event) {\n\t\tcheckEvent(e)\n\t\ta.So(e.Name(), should.Equal, \"test.some.evt0\")\n\t\ta.So(e.Identifiers(), should.BeNil)\n\t}\n\n\tbackend.Publish(events.New(ctx, \"test.some.evt1\", \"test event 1\", events.WithIdentifiers(&appID)))\n\tcheckEvt1 := func(e events.Event) {\n\t\tcheckEvent(e)\n\t\ta.So(e.Name(), should.Equal, \"test.some.evt1\")\n\t\tif a.So(e.Identifiers(), should.NotBeNil) && a.So(e.Identifiers(), should.HaveLength, 1) {\n\t\t\ta.So(e.Identifiers()[0].GetApplicationIds(), should.Resemble, &appID)\n\t\t}\n\t}\n\n\tbackend.Publish(events.New(ctx, \"test.other.evt2\", \"test event 2\", events.WithIdentifiers(&devID, &gtwID)))\n\tcheckEvt2 := func(e events.Event) {\n\t\tcheckEvent(e)\n\t\ta.So(e.Name(), should.Equal, \"test.other.evt2\")\n\t\tif a.So(e.Identifiers(), should.NotBeNil) && a.So(e.Identifiers(), should.HaveLength, 2) {\n\t\t\ta.So(e.Identifiers()[0].GetDeviceIds(), should.Resemble, &devID)\n\t\t\ta.So(e.Identifiers()[1].GetGatewayIds(), should.Resemble, &gtwID)\n\t\t}\n\t}\n\n\truntime.Gosched()\n\ttime.Sleep(timeout)\n\n\tif a.So(ch0, should.HaveLength, 2) {\n\t\tevt := <-ch0\n\t\tif evt.Name() == \"test.some.evt0\" { // Events may arrive out-of-order.\n\t\t\tcheckEvt0(evt)\n\t\t\tcheckEvt1(<-ch0)\n\t\t} else {\n\t\t\tcheckEvt1(evt)\n\t\t\tcheckEvt0(<-ch0)\n\t\t}\n\t}\n\n\tif a.So(ch1, should.HaveLength, 1) {\n\t\tcheckEvt1(<-ch1)\n\t}\n\n\tif a.So(ch2, should.HaveLength, 1) {\n\t\tcheckEvt2(<-ch2)\n\t}\n\n\tif store, ok := backend.(events.Store); ok {\n\t\tafter := now.Add(-1 * time.Second)\n\n\t\tevts, err := store.FetchHistory(ctx, []string{\n\t\t\t\"test.some.evt1\",\n\t\t}, []*ttnpb.EntityIdentifiers{\n\t\t\tappID.GetEntityIdentifiers(),\n\t\t\tdevID.GetEntityIdentifiers(),\n\t\t\tgtwID.GetEntityIdentifiers(),\n\t\t}, &after, 0)\n\t\ta.So(err, should.BeNil)\n\t\ta.So(evts, should.HaveLength, 2)\n\n\t\tevts, err = store.FetchHistory(ctx, nil, []*ttnpb.EntityIdentifiers{\n\t\t\tappID.GetEntityIdentifiers(),\n\t\t}, &after, 1)\n\t\ta.So(err, should.BeNil)\n\t\ta.So(evts, should.HaveLength, 1)\n\n\t\tevts, err = store.FindRelated(ctx, correlationID)\n\t\ta.So(err, should.BeNil)\n\t\ta.So(evts, should.HaveLength, 4)\n\t}\n}", "func TestInitBroker(t *testing.T) {\n\tcommitteeMock, k := agreement.MockCommittee(2, true, 2)\n\tbus := wire.NewEventBus()\n\troundChan := consensus.InitRoundUpdate(bus)\n\n\tgo agreement.Launch(bus, committeeMock, k[0])\n\ttime.Sleep(200 * time.Millisecond)\n\tinit := make([]byte, 8)\n\tbinary.LittleEndian.PutUint64(init, 1)\n\tbus.Publish(msg.InitializationTopic, bytes.NewBuffer(init))\n\n\tround := <-roundChan\n\tassert.Equal(t, uint64(1), round)\n}", "func TestEventServiceUpdate(t *testing.T) {\n\tvar result EventService\n\terr := json.NewDecoder(strings.NewReader(eventServiceBody)).Decode(&result)\n\n\tif err != nil {\n\t\tt.Errorf(\"Error decoding JSON: %s\", err)\n\t}\n\n\ttestClient := &common.TestClient{}\n\tresult.SetClient(testClient)\n\n\tresult.DeliveryRetryAttempts = 20\n\tresult.DeliveryRetryIntervalSeconds = 60\n\tresult.ServiceEnabled = true\n\terr = result.Update(context.Background())\n\n\tif err != nil {\n\t\tt.Errorf(\"Error making Update call: %s\", err)\n\t}\n\n\tcalls := testClient.CapturedCalls()\n\n\tif !strings.Contains(calls[0].Payload, \"DeliveryRetryAttempts:20\") {\n\t\tt.Errorf(\"Unexpected DeliveryRetryAttempts update payload: %s\", calls[0].Payload)\n\t}\n\n\tif !strings.Contains(calls[0].Payload, \"DeliveryRetryIntervalSeconds:60\") {\n\t\tt.Errorf(\"Unexpected DeliveryRetryIntervalSeconds update payload: %s\", calls[0].Payload)\n\t}\n\n\tif strings.Contains(calls[0].Payload, \"ServiceEnabled\") {\n\t\tt.Errorf(\"Unexpected DeliveryRetryIntervalSeconds update payload: %s\", calls[0].Payload)\n\t}\n}", "func TestComputeEvents(t *testing.T) {\n\tcontainerdCheck := &ContainerdCheck{\n\t\tinstance: &ContainerdConfig{},\n\t\tCheckBase: corechecks.NewCheckBase(\"containerd\"),\n\t}\n\tmocked := mocksender.NewMockSender(containerdCheck.ID())\n\tvar err error\n\tdefer containers.ResetSharedFilter()\n\tcontainerdCheck.containerFilter, err = containers.GetSharedMetricFilter()\n\trequire.NoError(t, err)\n\n\ttests := []struct {\n\t\tname string\n\t\tevents []containerdEvent\n\t\texpectedTitle string\n\t\texpectedTags []string\n\t\tnumberEvents int\n\t}{\n\t\t{\n\t\t\tname: \"No events\",\n\t\t\tevents: []containerdEvent{},\n\t\t\texpectedTitle: \"\",\n\t\t\tnumberEvents: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"Events on wrong type\",\n\t\t\tevents: []containerdEvent{\n\t\t\t\t{\n\t\t\t\t\tTopic: \"/containers/delete/extra\",\n\t\t\t\t}, {\n\t\t\t\t\tTopic: \"containers/delete\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedTitle: \"\",\n\t\t\tnumberEvents: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"High cardinality Events with one invalid\",\n\t\t\tevents: []containerdEvent{\n\t\t\t\t{\n\t\t\t\t\tTopic: \"/containers/delete\",\n\t\t\t\t\tTimestamp: time.Now(),\n\t\t\t\t\tExtra: map[string]string{\"foo\": \"bar\"},\n\t\t\t\t\tMessage: \"Container xxx deleted\",\n\t\t\t\t\tID: \"xxx\",\n\t\t\t\t}, {\n\t\t\t\t\tTopic: \"containers/delete\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedTitle: \"Event on containers from Containerd\",\n\t\t\texpectedTags: []string{\"foo:bar\", \"event_type:destroy\"},\n\t\t\tnumberEvents: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"Low cardinality Event\",\n\t\t\tevents: []containerdEvent{\n\t\t\t\t{\n\t\t\t\t\tTopic: \"/images/update\",\n\t\t\t\t\tTimestamp: time.Now(),\n\t\t\t\t\tExtra: map[string]string{\"foo\": \"baz\"},\n\t\t\t\t\tMessage: \"Image yyy updated\",\n\t\t\t\t\tID: \"yyy\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedTitle: \"Event on images from Containerd\",\n\t\t\texpectedTags: []string{\"foo:baz\"},\n\t\t\tnumberEvents: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"Filtered event\",\n\t\t\tevents: []containerdEvent{\n\t\t\t\t{\n\t\t\t\t\tTopic: \"/images/create\",\n\t\t\t\t\tTimestamp: time.Now(),\n\t\t\t\t\tExtra: map[string]string{},\n\t\t\t\t\tMessage: \"Image kubernetes/pause created\",\n\t\t\t\t\tID: \"kubernetes/pause\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedTitle: \"Event on images from Containerd\",\n\t\t\texpectedTags: nil,\n\t\t\tnumberEvents: 0,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tcomputeEvents(test.events, mocked, containerdCheck.containerFilter)\n\t\t\tmocked.On(\"Event\", mock.AnythingOfType(\"event.Event\"))\n\t\t\tif len(mocked.Calls) > 0 {\n\t\t\t\tres := (mocked.Calls[0].Arguments.Get(0)).(event.Event)\n\t\t\t\tassert.Contains(t, res.Title, test.expectedTitle)\n\t\t\t\tassert.ElementsMatch(t, res.Tags, test.expectedTags)\n\t\t\t}\n\t\t\tmocked.AssertNumberOfCalls(t, \"Event\", test.numberEvents)\n\t\t\tmocked.ResetCalls()\n\t\t})\n\t}\n}", "func TestAsyncEvents(t *testing.T) {\n\tif !haveArchive {\n\t\treturn\n\t}\n\n\tif testing.Verbose() && DEBUG {\n\t\tlogging.SetLevel(logging.DEBUG, \"archive\")\n\t}\n\n\tarchive.Listeners.RecordingSignalListener = RecordingSignalListener\n\tarchive.Listeners.RecordingEventStartedListener = RecordingEventStartedListener\n\tarchive.Listeners.RecordingEventProgressListener = RecordingEventProgressListener\n\tarchive.Listeners.RecordingEventStoppedListener = RecordingEventStoppedListener\n\n\ttestCounters = TestCounters{0, 0, 0, 0}\n\tif !CounterValuesMatch(testCounters, 0, 0, 0, 0, t) {\n\t\tt.Log(\"Async event counters mismatch\")\n\t\tt.FailNow()\n\t}\n\n\tarchive.EnableRecordingEvents()\n\tarchive.RecordingEventsPoll()\n\n\tif !CounterValuesMatch(testCounters, 0, 0, 0, 0, t) {\n\t\tt.Log(\"Async event counters mismatch\")\n\t\tt.FailNow()\n\t}\n\n\tpublication, err := archive.AddRecordedPublication(testCases[0].sampleChannel, testCases[0].sampleStream)\n\tif err != nil {\n\t\tt.Log(err)\n\t\tt.FailNow()\n\t}\n\n\t// Delay a little to get the publication is established\n\tidler := idlestrategy.Sleeping{SleepFor: time.Millisecond * 100}\n\tidler.Idle(0)\n\n\tarchive.RecordingEventsPoll()\n\tif !CounterValuesMatch(testCounters, 1, 1, 0, 0, t) {\n\t\tt.Log(\"Async event counters mismatch\")\n\t\tt.FailNow()\n\t}\n\n\tif err := archive.StopRecordingByPublication(*publication); err != nil {\n\t\tt.Log(err)\n\t\tt.FailNow()\n\t}\n\n\tif !CounterValuesMatch(testCounters, 2, 1, 0, 0, t) {\n\t\tt.Log(\"Async event counters mismatch\")\n\t\tt.FailNow()\n\t}\n\n\tarchive.RecordingEventsPoll()\n\tif !CounterValuesMatch(testCounters, 2, 1, 0, 1, t) {\n\t\tt.Log(\"Async event counters mismatch\")\n\t\tt.FailNow()\n\t}\n\n\t// Cleanup\n\tarchive.DisableRecordingEvents()\n\tarchive.Listeners.RecordingSignalListener = nil\n\tarchive.Listeners.RecordingEventStartedListener = nil\n\tarchive.Listeners.RecordingEventProgressListener = nil\n\tarchive.Listeners.RecordingEventStoppedListener = nil\n\ttestCounters = TestCounters{0, 0, 0, 0}\n\tif !CounterValuesMatch(testCounters, 0, 0, 0, 0, t) {\n\t\tt.Log(\"Async event counters mismatch\")\n\t\tt.FailNow()\n\t}\n\n\tpublication.Close()\n}", "func TestSettleInvoice(t *testing.T) {\n\tcdb, cleanup, err := newDB()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cleanup()\n\n\t// Instantiate and start the invoice registry.\n\tregistry := NewRegistry(cdb, &chaincfg.MainNetParams)\n\n\terr = registry.Start()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer registry.Stop()\n\n\tallSubscriptions := registry.SubscribeNotifications(0, 0)\n\tdefer allSubscriptions.Cancel()\n\n\t// Subscribe to the not yet existing invoice.\n\tsubscription := registry.SubscribeSingleInvoice(hash)\n\tdefer subscription.Cancel()\n\n\tif subscription.hash != hash {\n\t\tt.Fatalf(\"expected subscription for provided hash\")\n\t}\n\n\t// Add the invoice.\n\tinvoice := &channeldb.Invoice{\n\t\tTerms: channeldb.ContractTerm{\n\t\t\tPaymentPreimage: preimage,\n\t\t\tValue: lnwire.MilliSatoshi(100000),\n\t\t},\n\t\tPaymentRequest: []byte(testPayReq),\n\t}\n\n\taddIdx, err := registry.AddInvoice(invoice, hash)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif addIdx != 1 {\n\t\tt.Fatalf(\"expected addIndex to start with 1, but got %v\",\n\t\t\taddIdx)\n\t}\n\n\t// We expect the open state to be sent to the single invoice subscriber.\n\tselect {\n\tcase update := <-subscription.Updates:\n\t\tif update.Terms.State != channeldb.ContractOpen {\n\t\t\tt.Fatalf(\"expected state ContractOpen, but got %v\",\n\t\t\t\tupdate.Terms.State)\n\t\t}\n\tcase <-time.After(testTimeout):\n\t\tt.Fatal(\"no update received\")\n\t}\n\n\t// We expect a new invoice notification to be sent out.\n\tselect {\n\tcase newInvoice := <-allSubscriptions.NewInvoices:\n\t\tif newInvoice.Terms.State != channeldb.ContractOpen {\n\t\t\tt.Fatalf(\"expected state ContractOpen, but got %v\",\n\t\t\t\tnewInvoice.Terms.State)\n\t\t}\n\tcase <-time.After(testTimeout):\n\t\tt.Fatal(\"no update received\")\n\t}\n\n\t// Settle invoice with a slightly higher amount.\n\tamtPaid := lnwire.MilliSatoshi(100500)\n\terr = registry.SettleInvoice(hash, amtPaid)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// We expect the settled state to be sent to the single invoice\n\t// subscriber.\n\tselect {\n\tcase update := <-subscription.Updates:\n\t\tif update.Terms.State != channeldb.ContractSettled {\n\t\t\tt.Fatalf(\"expected state ContractOpen, but got %v\",\n\t\t\t\tupdate.Terms.State)\n\t\t}\n\t\tif update.AmtPaid != amtPaid {\n\t\t\tt.Fatal(\"invoice AmtPaid incorrect\")\n\t\t}\n\tcase <-time.After(testTimeout):\n\t\tt.Fatal(\"no update received\")\n\t}\n\n\t// We expect a settled notification to be sent out.\n\tselect {\n\tcase settledInvoice := <-allSubscriptions.SettledInvoices:\n\t\tif settledInvoice.Terms.State != channeldb.ContractSettled {\n\t\t\tt.Fatalf(\"expected state ContractOpen, but got %v\",\n\t\t\t\tsettledInvoice.Terms.State)\n\t\t}\n\tcase <-time.After(testTimeout):\n\t\tt.Fatal(\"no update received\")\n\t}\n\n\t// Try to settle again.\n\terr = registry.SettleInvoice(hash, amtPaid)\n\tif err != nil {\n\t\tt.Fatal(\"expected duplicate settle to succeed\")\n\t}\n\n\t// Try to settle again with a different amount.\n\terr = registry.SettleInvoice(hash, amtPaid+600)\n\tif err != nil {\n\t\tt.Fatal(\"expected duplicate settle to succeed\")\n\t}\n\n\t// Check that settled amount remains unchanged.\n\tinv, _, err := registry.LookupInvoice(hash)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif inv.AmtPaid != amtPaid {\n\t\tt.Fatal(\"expected amount to be unchanged\")\n\t}\n\n\t// Try to cancel.\n\terr = registry.CancelInvoice(hash)\n\tif err != channeldb.ErrInvoiceAlreadySettled {\n\t\tt.Fatal(\"expected cancelation of a settled invoice to fail\")\n\t}\n}", "func TestOrchestratorCheckSafeReSchedule(t *testing.T) {\n\tvar wg sync.WaitGroup\n\n\tclient := fake.NewSimpleClientset()\n\tinformerFactory := informers.NewSharedInformerFactory(client, 0)\n\tcl := &apiserver.APIClient{Cl: client, InformerFactory: informerFactory, UnassignedPodInformerFactory: informerFactory}\n\torchCheck := OrchestratorFactory().(*OrchestratorCheck)\n\torchCheck.apiClient = cl\n\n\tbundle := NewCollectorBundle(orchCheck)\n\terr := bundle.Initialize()\n\tassert.NoError(t, err)\n\n\twg.Add(2)\n\n\tnodeInformer := informerFactory.Core().V1().Nodes().Informer()\n\tnodeInformer.AddEventHandler(&cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\twg.Done()\n\t\t},\n\t})\n\n\twriteNode(t, client, \"1\")\n\n\t// getting rescheduled.\n\torchCheck.Cancel()\n\t// This part is not optimal as the cancel closes a channel which gets propagated everywhere that might take some time.\n\t// If things are too fast the close is not getting propagated fast enough.\n\t// But even if we are too fast and don't catch that part it will not lead to a false positive\n\ttime.Sleep(1 * time.Millisecond)\n\terr = bundle.Initialize()\n\tassert.NoError(t, err)\n\twriteNode(t, client, \"2\")\n\n\twg.Wait()\n}", "func TestSubscriberV3NewClaimSameResult(t *testing.T) {\n\tdb := make(map[string]Fact)\n\tclaim(&db, Fact{[]Term{Term{\"text\", \"Sensor\"}, Term{\"text\", \"is\"}, Term{\"text\", \"low\"}}})\n\n\tsource := \"1234\"\n\tsubscriptionId := \"21dcca0a-ed5e-4593-b92e-fc9f16499cc8\"\n\tquery_part1 := []Term{\n\t\tTerm{\"variable\", \"A\"},\n\t\tTerm{\"variable\", \"\"},\n\t\tTerm{\"variable\", \"B\"},\n\t}\n\tquery := [][]Term{query_part1}\n\tsubscription := Subscription{source, subscriptionId, query, make(chan []BatchMessage, 1000), &sync.WaitGroup{}, &sync.WaitGroup{}}\n\tsubscription.dead.Add(1)\n\tsubscription.warmed.Add(1)\n\tnotifications := make(chan Notification, 1000)\n\tgo startSubscriberV3(subscription, notifications, db)\n\n\ttime.Sleep(CHANNEL_MESSAGE_DELIVERY_TEST_WAIT)\n\n\tif len(notifications) != 1 {\n\t\tt.Error(\"Wrong count of notifications\", len(notifications))\n\t\treturn\n\t}\n\n\t// Make a claim a unique claim that matches the subscription query, but does not product a unique result\n\tmessages := make([]BatchMessage, 1)\n\tmessages[0] = BatchMessage{\"claim\", [][]string{[]string{\"text\", \"Sensor\"}, []string{\"text\", \"was\"}, []string{\"text\", \"low\"}}}\n\tsubscription.batch_messages <- messages\n\n\ttime.Sleep(CHANNEL_MESSAGE_DELIVERY_TEST_WAIT)\n\n\t// If last result cache added to subscriber worker: change this to 1\n\tEXPECT_COUNT := 2\n\tif len(notifications) != EXPECT_COUNT {\n\t\tt.Error(\"Notification count is not correct\", EXPECT_COUNT, len(notifications))\n\t\treturn\n\t}\n\n\t// check the original notification result\n\texpectedResult := make(map[string][]string)\n\texpectedResult[\"A\"] = []string{\"text\", \"Sensor\"}\n\texpectedResult[\"B\"] = []string{\"text\", \"low\"}\n\n\t// All notifications should contain the same results\n\tfor i := 0; i < EXPECT_COUNT; i++ {\n\t\tnotification := <-notifications\n\t\tencoded_results := parseNotificationResult(notification, t)\n\t\tfor _, encoded_result := range encoded_results {\n\t\t\tif !reflect.DeepEqual(expectedResult, encoded_result) {\n\t\t\t\tt.Error(\"Wrong notification result\", expectedResult, encoded_result, i)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\t\n\t}\n}", "func TestReductionTimeout(t *testing.T) {\n\teb, _, streamer, _, _ := launchReductionTest(true, 2)\n\n\t// send a hash to start reduction\n\thash, _ := crypto.RandEntropy(32)\n\n\t// Because round updates are asynchronous (sent through a channel), we wait\n\t// for a bit to let the broker update its round.\n\ttime.Sleep(200 * time.Millisecond)\n\tsendSelection(1, hash, eb)\n\n\ttimer := time.After(1 * time.Second)\n\t<-timer\n\n\tstopChan := make(chan struct{})\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tseenTopics := streamer.SeenTopics()\n\t\tfor _, topic := range seenTopics {\n\t\t\tif topic == topics.Agreement {\n\t\t\t\tt.Fatal(\"\")\n\t\t\t}\n\t\t}\n\n\t\tstopChan <- struct{}{}\n\t})\n\n\t<-stopChan\n}", "func checkEvents(t *testing.T, ctx context.Context, expectedEvents []string, ctrl *PersistentVolumeController) error {\n\tvar err error\n\n\t// Read recorded events - wait up to 1 minute to get all the expected ones\n\t// (just in case some goroutines are slower with writing)\n\ttimer := time.NewTimer(time.Minute)\n\tdefer timer.Stop()\n\tlogger := klog.FromContext(ctx)\n\tfakeRecorder := ctrl.eventRecorder.(*record.FakeRecorder)\n\tgotEvents := []string{}\n\tfinished := false\n\tfor len(gotEvents) < len(expectedEvents) && !finished {\n\t\tselect {\n\t\tcase event, ok := <-fakeRecorder.Events:\n\t\t\tif ok {\n\t\t\t\tlogger.V(5).Info(\"Event recorder got event\", \"event\", event)\n\t\t\t\tgotEvents = append(gotEvents, event)\n\t\t\t} else {\n\t\t\t\tlogger.V(5).Info(\"Event recorder finished\")\n\t\t\t\tfinished = true\n\t\t\t}\n\t\tcase _, _ = <-timer.C:\n\t\t\tlogger.V(5).Info(\"Event recorder timeout\")\n\t\t\tfinished = true\n\t\t}\n\t}\n\n\t// Evaluate the events\n\tfor i, expected := range expectedEvents {\n\t\tif len(gotEvents) <= i {\n\t\t\tt.Errorf(\"Event %q not emitted\", expected)\n\t\t\terr = fmt.Errorf(\"events do not match\")\n\t\t\tcontinue\n\t\t}\n\t\treceived := gotEvents[i]\n\t\tif !strings.HasPrefix(received, expected) {\n\t\t\tt.Errorf(\"Unexpected event received, expected %q, got %q\", expected, received)\n\t\t\terr = fmt.Errorf(\"events do not match\")\n\t\t}\n\t}\n\tfor i := len(expectedEvents); i < len(gotEvents); i++ {\n\t\tt.Errorf(\"Unexpected event received: %q\", gotEvents[i])\n\t\terr = fmt.Errorf(\"events do not match\")\n\t}\n\treturn err\n}", "func TestAgentClientEventNotify(t *testing.T) {\n\tstate := &ssntpTestState{}\n\tac := agentClient{conn: state}\n\tac.EventNotify(ssntp.TenantAdded, nil)\n}", "func Test_releaseLock_Update(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tf func(t *testing.T, internalClient *kubefake.Clientset, isLeader *isLeaderTracker, cancel context.CancelFunc)\n\t}{\n\t\t{\n\t\t\tname: \"renewal fails on update\",\n\t\t\tf: func(t *testing.T, internalClient *kubefake.Clientset, isLeader *isLeaderTracker, cancel context.CancelFunc) {\n\t\t\t\tinternalClient.PrependReactor(\"update\", \"*\", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\t\tlease := action.(kubetesting.UpdateAction).GetObject().(*coordinationv1.Lease)\n\t\t\t\t\tif len(ptr.Deref(lease.Spec.HolderIdentity, \"\")) == 0 {\n\t\t\t\t\t\trequire.False(t, isLeader.canWrite(), \"client must release in-memory leader status before Kube API call\")\n\t\t\t\t\t}\n\t\t\t\t\treturn true, nil, errors.New(\"cannot renew\")\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"renewal fails due to context\",\n\t\t\tf: func(t *testing.T, internalClient *kubefake.Clientset, isLeader *isLeaderTracker, cancel context.CancelFunc) {\n\t\t\t\tt.Cleanup(func() {\n\t\t\t\t\trequire.False(t, isLeader.canWrite(), \"client must release in-memory leader status when context is canceled\")\n\t\t\t\t})\n\t\t\t\tstart := time.Now()\n\t\t\t\tinternalClient.PrependReactor(\"update\", \"*\", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\t\t// keep going for a bit\n\t\t\t\t\tif time.Since(start) < 5*time.Second {\n\t\t\t\t\t\treturn false, nil, nil\n\t\t\t\t\t}\n\n\t\t\t\t\tcancel()\n\t\t\t\t\treturn false, nil, nil\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\ttt := tt\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tinternalClient := kubefake.NewSimpleClientset()\n\t\t\tisLeader := &isLeaderTracker{tracker: &atomic.Bool{}}\n\n\t\t\tleaderElectorCtx, cancel := context.WithCancel(context.Background())\n\n\t\t\ttt.f(t, internalClient, isLeader, cancel)\n\n\t\t\tleaderElectionConfig := newLeaderElectionConfig(\"ns-001\", \"lease-001\", \"foo-001\", internalClient, isLeader)\n\n\t\t\t// make the tests run quicker\n\t\t\tleaderElectionConfig.LeaseDuration = 2 * time.Second\n\t\t\tleaderElectionConfig.RenewDeadline = 1 * time.Second\n\t\t\tleaderElectionConfig.RetryPeriod = 250 * time.Millisecond\n\n\t\t\t// note that this will block until it exits on its own or tt.f calls cancel()\n\t\t\tleaderelection.RunOrDie(leaderElectorCtx, leaderElectionConfig)\n\t\t})\n\t}\n}", "func checkEvents(t *testing.T, expectedEvents []string, ctrl *PersistentVolumeController) error {\n\tvar err error\n\n\t// Read recorded events - wait up to 1 minute to get all the expected ones\n\t// (just in case some goroutines are slower with writing)\n\ttimer := time.NewTimer(time.Minute)\n\tdefer timer.Stop()\n\n\tfakeRecorder := ctrl.eventRecorder.(*record.FakeRecorder)\n\tgotEvents := []string{}\n\tfinished := false\n\tfor len(gotEvents) < len(expectedEvents) && !finished {\n\t\tselect {\n\t\tcase event, ok := <-fakeRecorder.Events:\n\t\t\tif ok {\n\t\t\t\tglog.V(5).Infof(\"event recorder got event %s\", event)\n\t\t\t\tgotEvents = append(gotEvents, event)\n\t\t\t} else {\n\t\t\t\tglog.V(5).Infof(\"event recorder finished\")\n\t\t\t\tfinished = true\n\t\t\t}\n\t\tcase _, _ = <-timer.C:\n\t\t\tglog.V(5).Infof(\"event recorder timeout\")\n\t\t\tfinished = true\n\t\t}\n\t}\n\n\t// Evaluate the events\n\tfor i, expected := range expectedEvents {\n\t\tif len(gotEvents) <= i {\n\t\t\tt.Errorf(\"Event %q not emitted\", expected)\n\t\t\terr = fmt.Errorf(\"Events do not match\")\n\t\t\tcontinue\n\t\t}\n\t\treceived := gotEvents[i]\n\t\tif !strings.HasPrefix(received, expected) {\n\t\t\tt.Errorf(\"Unexpected event received, expected %q, got %q\", expected, received)\n\t\t\terr = fmt.Errorf(\"Events do not match\")\n\t\t}\n\t}\n\tfor i := len(expectedEvents); i < len(gotEvents); i++ {\n\t\tt.Errorf(\"Unexpected event received: %q\", gotEvents[i])\n\t\terr = fmt.Errorf(\"Events do not match\")\n\t}\n\treturn err\n}", "func TestEventsAlertEngine(t *testing.T) {\n\t// setup events pipeline to record and distribute events\n\tti := tInfo{batchInterval: 100 * time.Millisecond, dedupInterval: 100 * time.Second}\n\tAssertOk(t, ti.setup(t), \"failed to setup test\")\n\tdefer ti.teardown()\n\n\t// start spyglass (backend service for events)\n\tfdrTemp, fdrAddr, err := testutils.StartSpyglass(\"finder\", \"\", ti.mockResolver, nil, ti.logger, ti.esClient)\n\tAssertOk(t, err, \"failed to start spyglass finder, err: %v\", err)\n\tfdr := fdrTemp.(finder.Interface)\n\tdefer fdr.Stop()\n\tti.updateResolver(globals.Spyglass, fdrAddr)\n\n\t// API gateway\n\tapiGw, apiGwAddr, err := testutils.StartAPIGateway(\":0\", false,\n\t\tmap[string]string{}, []string{\"telemetry_query\", \"objstore\", \"tokenauth\", \"routing\"}, []string{}, ti.mockResolver, ti.logger)\n\tAssertOk(t, err, \"failed to start API gateway, err: %v\", err)\n\tdefer apiGw.Stop()\n\n\t// setup authn and get authz token\n\tuserCreds := &auth.PasswordCredential{Username: testutils.TestLocalUser, Password: testutils.TestLocalPassword, Tenant: testutils.TestTenant}\n\terr = testutils.SetupAuth(ti.apiServerAddr, true, nil, nil, userCreds, ti.logger)\n\tAssertOk(t, err, \"failed to setup authN service, err: %v\", err)\n\tdefer testutils.CleanupAuth(ti.apiServerAddr, true, false, userCreds, ti.logger)\n\tauthzHeader, err := testutils.GetAuthorizationHeader(apiGwAddr, userCreds)\n\tAssertOk(t, err, \"failed to get authZ header, err: %v\", err)\n\n\t// add event based alert policies\n\t// policy - 1\n\talertPolicy1 := policygen.CreateAlertPolicyObj(globals.DefaultTenant, globals.DefaultNamespace, fmt.Sprintf(\"ap1-%s\", uuid.NewV4().String()),\n\t\t\"Event\", eventattrs.Severity_CRITICAL, \"critical alerts from events\",\n\t\t[]*fields.Requirement{\n\t\t\t{Key: \"count\", Operator: \"gte\", Values: []string{\"15\"}},\n\t\t\t{Key: \"source.node-name\", Operator: \"equals\", Values: []string{t.Name()}},\n\t\t}, []string{})\n\n\talertPolicy1, err = ti.apiClient.MonitoringV1().AlertPolicy().Create(context.Background(), alertPolicy1)\n\tAssertOk(t, err, \"failed to add alert policy{ap1-*}, err: %v\", err)\n\n\t// policy - 2\n\talertPolicy2 := policygen.CreateAlertPolicyObj(globals.DefaultTenant, globals.DefaultNamespace, fmt.Sprintf(\"ap2-%s\", uuid.NewV4().String()),\n\t\t\"Event\", eventattrs.Severity_WARN, \"warning alerts from events\",\n\t\t[]*fields.Requirement{\n\t\t\t{Key: \"count\", Operator: \"gte\", Values: []string{\"5\"}},\n\t\t\t{Key: \"count\", Operator: \"lt\", Values: []string{\"7\"}},\n\t\t\t{Key: \"severity\", Operator: \"equals\", Values: []string{\n\t\t\t\teventattrs.Severity_DEBUG.String(),\n\t\t\t\teventattrs.Severity_WARN.String(),\n\t\t\t\teventattrs.Severity_INFO.String()}},\n\t\t\t{Key: \"type\", Operator: \"equals\", Values: []string{\n\t\t\t\teventtypes.SERVICE_STOPPED.String(),\n\t\t\t\teventtypes.LEADER_LOST.String()}},\n\t\t}, []string{})\n\n\talertPolicy2, err = ti.apiClient.MonitoringV1().AlertPolicy().Create(context.Background(), alertPolicy2)\n\tAssertOk(t, err, \"failed to add alert policy{ap2-*}, err: %v\", err)\n\n\talertPolicy3 := policygen.CreateAlertPolicyObj(globals.DefaultTenant, globals.DefaultNamespace, fmt.Sprintf(\"ap3-%s\", uuid.NewV4().String()),\n\t\t\"Event\", eventattrs.Severity_WARN, \"policy with no reqs\", []*fields.Requirement{}, []string{})\n\talertPolicy3, err = ti.apiClient.MonitoringV1().AlertPolicy().Create(context.Background(), alertPolicy3)\n\tAssertOk(t, err, \"failed to add alert policy, err: %v\", err)\n\n\tdefer func() {\n\t\terr := ti.cleanupPolicies()\n\t\tAssertOk(t, err, \"failed to cleanup policies\")\n\t}()\n\n\t// generate events\n\t// define list of events to be recorded\n\tdummyObjRef := &cluster.Node{\n\t\tTypeMeta: api.TypeMeta{\n\t\t\tKind: \"Node\",\n\t\t},\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tTenant: globals.DefaultTenant,\n\t\t\tNamespace: globals.DefaultNamespace,\n\t\t},\n\t}\n\trecordEvents := []*struct {\n\t\teventType eventtypes.EventType\n\t\tmessage string\n\t\tobjRef interface{}\n\t\trepeat int // number of times to repeat the event\n\t}{\n\t\t// any of these events can generate an alert based on when it is getting dispatched from evtsproxy\n\t\t// to evtsmgr. e.g. ap2-*, count >=5 and count < 7\n\t\t{eventtypes.SERVICE_STARTED, fmt.Sprintf(\"(tenant:%s) test %s started\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 10},\n\t\t{eventtypes.SERVICE_RUNNING, fmt.Sprintf(\"(tenant:%s) test %s running\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 10},\n\t\t{eventtypes.SERVICE_UNRESPONSIVE, fmt.Sprintf(\"(tenant:%s) test %s unresponsive\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 15}, // this should generate an alert (alertPolicy1)\n\t\t{eventtypes.SERVICE_STOPPED, fmt.Sprintf(\"(tenant:%s) test %s stopped\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 5}, // this should generate an alert (alertPolicy2)\n\n\t\t{eventtypes.ELECTION_STARTED, fmt.Sprintf(\"(tenant:%s) dummy election: election started %s\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 10},\n\t\t{eventtypes.LEADER_ELECTED, fmt.Sprintf(\"(tenant:%s) dummy election: leader elected %s\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 10},\n\t\t{eventtypes.LEADER_CHANGED, fmt.Sprintf(\"(tenant:%s) dummy election: leader changed %s\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 15}, // this should generate an alert (alertPolicy1)\n\t\t{eventtypes.LEADER_LOST, fmt.Sprintf(\"(tenant:%s) dummy election: leader lost %s\", dummyObjRef.Tenant, t.Name()), *dummyObjRef, 5}, // this should generate an alert (alertPolicy2)\n\n\t\t// events in non default tenant\n\t\t{eventtypes.SERVICE_STARTED, fmt.Sprintf(\"(tenant:%s) test %s started\", globals.DefaultTenant, t.Name()), nil, 10},\n\t\t{eventtypes.SERVICE_RUNNING, fmt.Sprintf(\"(tenant:%s) test %s running\", globals.DefaultTenant, t.Name()), nil, 10},\n\t\t{eventtypes.SERVICE_UNRESPONSIVE, fmt.Sprintf(\"(tenant:%s) test %s unresponsive\", globals.DefaultTenant, t.Name()), nil, 15}, // this should generate an alert (alertPolicy1)\n\t\t{eventtypes.SERVICE_STOPPED, fmt.Sprintf(\"(tenant:%s) test %s stopped\", globals.DefaultTenant, t.Name()), nil, 5}, // this should generate an alert (alertPolicy2)\n\t}\n\n\twg := new(sync.WaitGroup)\n\twg.Add(2)\n\n\t// start recorder\n\trecorderEventsDir, err := ioutil.TempDir(\"\", \"\")\n\tAssertOk(t, err, \"failed to create recorder events directory\")\n\tdefer os.RemoveAll(recorderEventsDir)\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tevtsRecorder, err := recorder.NewRecorder(&recorder.Config{\n\t\t\tComponent: uuid.NewV4().String(),\n\t\t\tEvtsProxyURL: ti.evtProxyServices.EvtsProxy.RPCServer.GetListenURL(),\n\t\t\tBackupDir: recorderEventsDir}, ti.logger)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create recorder, err: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tti.recorders.Lock()\n\t\tti.recorders.list = append(ti.recorders.list, evtsRecorder)\n\t\tti.recorders.Unlock()\n\n\t\t// record events\n\t\tfor i := range recordEvents {\n\t\t\tif objRef, ok := recordEvents[i].objRef.(cluster.Node); ok {\n\t\t\t\tobjRef.ObjectMeta.Name = CreateAlphabetString(5)\n\t\t\t\trecordEvents[i].objRef = &objRef\n\t\t\t}\n\t\t\tfor j := 0; j < recordEvents[i].repeat; j++ {\n\t\t\t\tevtsRecorder.Event(recordEvents[i].eventType, recordEvents[i].message, recordEvents[i].objRef)\n\t\t\t}\n\t\t}\n\n\t\t// wait for the batch interval\n\t\ttime.Sleep(3 * time.Second)\n\t\t// if objRef!=nil, this should increase the hits but not recreate the alerts.\n\t\t// it will recreate alerts otherwise.\n\t\tfor i := range recordEvents {\n\t\t\tevtsRecorder.Event(recordEvents[i].eventType, recordEvents[i].message, recordEvents[i].objRef)\n\t\t}\n\t}()\n\n\t// TODO: cannot add criteria meta.tenant=\"default\" or any meta.*\n\t// list of alerts to be generated by the alert engine\n\ttests := []struct {\n\t\tselector string\n\t\texpMessage string // stings will spaces are not allowed in field selector; so, this attribute\n\t\texpSuccess bool\n\t}{\n\t\t{\n\t\t\tselector: fmt.Sprintf(\"status.reason.alert-policy-id=%s/%s,status.severity=%s,status.object-ref.kind=%s,meta.tenant=%s\",\n\t\t\t\talertPolicy1.GetName(), alertPolicy1.GetUUID(), alertPolicy1.Spec.GetSeverity(), dummyObjRef.GetKind(), dummyObjRef.GetTenant()),\n\t\t\texpMessage: fmt.Sprintf(\"(tenant:%s) test %s unresponsive\", dummyObjRef.Tenant, t.Name()),\n\t\t\texpSuccess: true,\n\t\t},\n\t\t{\n\t\t\tselector: fmt.Sprintf(\"status.reason.alert-policy-id=%s/%s,status.severity=%s,status.object-ref.kind=%s,meta.tenant=%s\",\n\t\t\t\talertPolicy1.GetName(), alertPolicy1.GetUUID(), alertPolicy1.Spec.GetSeverity(), dummyObjRef.GetKind(), dummyObjRef.GetTenant()),\n\t\t\texpMessage: fmt.Sprintf(\"(tenant:%s) dummy election: leader changed %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\texpSuccess: true,\n\t\t},\n\t\t{\n\t\t\tselector: fmt.Sprintf(\"status.reason.alert-policy-id=%s/%s,status.severity=%s\",\n\t\t\t\talertPolicy1.GetName(), alertPolicy1.GetUUID(), alertPolicy1.Spec.GetSeverity()),\n\t\t\texpMessage: fmt.Sprintf(\"(tenant:%s) test %s unresponsive\", globals.DefaultTenant, t.Name()),\n\t\t\texpSuccess: true,\n\t\t},\n\t\t{\n\t\t\tselector: fmt.Sprintf(\"status.reason.alert-policy-id=%s/%s,status.severity=%s,status.object-ref.kind=%s,meta.tenant=%s\",\n\t\t\t\talertPolicy2.GetName(), alertPolicy2.GetUUID(), alertPolicy2.Spec.GetSeverity(), dummyObjRef.GetKind(), dummyObjRef.GetTenant()),\n\t\t\texpMessage: fmt.Sprintf(\"(tenant:%s) test %s stopped\", dummyObjRef.Tenant, t.Name()),\n\t\t\texpSuccess: true,\n\t\t},\n\t\t{\n\t\t\tselector: fmt.Sprintf(\"status.reason.alert-policy-id=%s/%s,status.severity=%s,meta.tenant=%s\",\n\t\t\t\talertPolicy2.GetName(), alertPolicy2.GetUUID(), alertPolicy2.Spec.GetSeverity(), globals.DefaultTenant),\n\t\t\texpMessage: fmt.Sprintf(\"(tenant:%s) test %s stopped\", dummyObjRef.Tenant, t.Name()),\n\t\t\texpSuccess: true,\n\t\t},\n\t\t{\n\t\t\tselector: fmt.Sprintf(\"status.reason.alert-policy-id=%s/%s,status.severity=%s,status.object-ref.kind=%s,meta.tenant=%s\",\n\t\t\t\talertPolicy2.GetName(), alertPolicy2.GetUUID(), alertPolicy2.Spec.GetSeverity(),\n\t\t\t\tdummyObjRef.GetKind(), dummyObjRef.GetTenant()),\n\t\t\texpMessage: fmt.Sprintf(\"(tenant:%s) dummy election: leader lost %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\texpSuccess: true,\n\t\t},\n\t\t{\n\t\t\tselector: fmt.Sprintf(\"status.reason.alert-policy-id=%s/%s,status.severity=%s,status.object-ref.kind=%s\",\n\t\t\t\talertPolicy2.GetName(), alertPolicy2.GetUUID(), alertPolicy2.Spec.GetSeverity(), \"invalid\"),\n\t\t\texpMessage: fmt.Sprintf(\"(tenant:%s) dummy election: leader lost %s\", dummyObjRef.Tenant, t.Name()),\n\t\t\texpSuccess: false,\n\t\t},\n\t\t{\n\t\t\tselector: fmt.Sprintf(\"status.object-ref.kind=invalid\"),\n\t\t\texpSuccess: false,\n\t\t},\n\t}\n\n\t// test if the expected alerts are generated\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor _, test := range tests {\n\t\t\tAssertEventually(t, func() (bool, interface{}) {\n\t\t\t\talerts, err := ti.apiClient.MonitoringV1().Alert().List(context.Background(),\n\t\t\t\t\t&api.ListWatchOptions{\n\t\t\t\t\t\tObjectMeta: api.ObjectMeta{Tenant: globals.DefaultTenant},\n\t\t\t\t\t\tFieldSelector: test.selector})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, fmt.Sprintf(\"%v failed, err: %v\", test.selector, err)\n\t\t\t\t}\n\n\t\t\t\tif test.expSuccess {\n\t\t\t\t\tfor _, alert := range alerts {\n\t\t\t\t\t\t// expecting a hit count of 2 since we duplicated all the events after the batch interval\n\t\t\t\t\t\tif alert.Status.Message == test.expMessage && alert.Status.TotalHits == 2 {\n\t\t\t\t\t\t\tif alert.ModTime == alert.CreationTime {\n\t\t\t\t\t\t\t\treturn false, fmt.Sprintf(\"mod-time of the alert did not get updated: %v\", alert.GetObjectMeta())\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn true, nil\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif !test.expSuccess && len(alerts) == 0 {\n\t\t\t\t\treturn true, nil\n\t\t\t\t}\n\n\t\t\t\treturn false, fmt.Sprintf(\"expected: %v, obtained: %v\", test, alerts)\n\t\t\t}, \"did not receive the expected alert\", string(\"1s\"), string(\"20s\"))\n\t\t}\n\t}()\n\n\twg.Wait()\n\n\t// make sure the policy status got updated\n\texpectedAlertStatus := []struct {\n\t\tpolicyMeta *api.ObjectMeta\n\t\tminTotalHits int32\n\t\tmaxTotalHits int32\n\t\topenAlerts int32\n\t\tacknowledgedAlerts int32\n\t}{\n\t\t{policyMeta: alertPolicy1.GetObjectMeta(), minTotalHits: 3, maxTotalHits: 6, openAlerts: 3, acknowledgedAlerts: 0},\n\t\t{policyMeta: alertPolicy2.GetObjectMeta(), minTotalHits: 3, maxTotalHits: 6, openAlerts: 3, acknowledgedAlerts: 0},\n\t\t{policyMeta: alertPolicy3.GetObjectMeta(), minTotalHits: 0, maxTotalHits: 0, openAlerts: 0, acknowledgedAlerts: 0}, // no reqs so, there should be no alerts\n\t}\n\tfor _, as := range expectedAlertStatus {\n\t\tAssertEventually(t, func() (bool, interface{}) {\n\t\t\tres, err := ti.apiClient.MonitoringV1().AlertPolicy().Get(context.Background(),\n\t\t\t\t&api.ObjectMeta{Name: as.policyMeta.GetName(), Tenant: as.policyMeta.GetTenant(), Namespace: as.policyMeta.GetNamespace(), UUID: as.policyMeta.GetUUID()})\n\t\t\tif err != nil {\n\t\t\t\treturn false, fmt.Sprintf(\":%v, err: %v\", as.policyMeta.GetName(), err)\n\t\t\t}\n\n\t\t\tif (res.Status.GetTotalHits() < as.minTotalHits) || (res.Status.GetTotalHits() > as.maxTotalHits) {\n\t\t\t\treturn false, fmt.Sprintf(\"total hits on policy %v expected total hits to be between (%v, %v) obtained: %v\", res.GetObjectMeta().GetName(), as.minTotalHits, as.maxTotalHits, res.Status.GetTotalHits())\n\t\t\t}\n\n\t\t\tif as.openAlerts != res.Status.GetOpenAlerts() {\n\t\t\t\treturn false, fmt.Sprintf(\"open alerts on policy %v expected: %v, obtained: %v\", res.GetObjectMeta().GetName(), as.openAlerts, res.Status.GetOpenAlerts())\n\t\t\t}\n\n\t\t\tif as.acknowledgedAlerts != res.Status.GetAcknowledgedAlerts() {\n\t\t\t\treturn false, fmt.Sprintf(\"acknowledged alerts on policy %v expected: %v, obtained: %v\", res.GetObjectMeta().GetName(), as.acknowledgedAlerts, res.Status.GetAcknowledgedAlerts())\n\t\t\t}\n\n\t\t\treturn true, nil\n\t\t}, \"alert status does not match the expected\", string(\"200ms\"), string(\"10s\"))\n\t}\n\n\t// resolve or acknowledge alerts\n\talerts, err := ti.apiClient.MonitoringV1().Alert().List(context.Background(),\n\t\t&api.ListWatchOptions{\n\t\t\tObjectMeta: api.ObjectMeta{Tenant: globals.DefaultTenant},\n\t\t\tFieldSelector: fmt.Sprintf(\"status.reason.alert-policy-id in (%s,%s,%s)\",\n\t\t\t\tfmt.Sprintf(\"%s/%s\", alertPolicy1.GetName(), alertPolicy1.GetUUID()),\n\t\t\t\tfmt.Sprintf(\"%s/%s\", alertPolicy2.GetName(), alertPolicy2.GetUUID()),\n\t\t\t\tfmt.Sprintf(\"%s/%s\", alertPolicy3.GetName(), alertPolicy3.GetUUID())),\n\t\t})\n\tAssertOk(t, err, \"failed to list alerts, err: %v\", err)\n\tAssert(t, len(alerts) > 2, \"expected more than 2 alerts, got: %v\", len(alerts))\n\n\talertTests := []*struct {\n\t\talert monitoring.Alert\n\t\tresolve bool\n\t\tacknowledge bool\n\t}{\n\t\t{alert: *alerts[0], resolve: true, acknowledge: false},\n\t\t{alert: *alerts[len(alerts)-1], resolve: false, acknowledge: true},\n\t}\n\n\tfor _, at := range alertTests {\n\t\taURL := fmt.Sprintf(\"https://%s/configs/monitoring/v1/alerts/%s\", apiGwAddr, at.alert.GetName())\n\t\tapURL := fmt.Sprintf(\"https://%s/configs/monitoring/v1/alertPolicies/%s\", apiGwAddr,\n\t\t\tstrings.Split(at.alert.Status.Reason.GetPolicyID(), \"/\")[0])\n\n\t\thttpClient := netutils.NewHTTPClient()\n\t\thttpClient.WithTLSConfig(&tls.Config{InsecureSkipVerify: true})\n\t\thttpClient.SetHeader(\"Authorization\", authzHeader)\n\t\thttpClient.DisableKeepAlives()\n\t\tdefer httpClient.CloseIdleConnections()\n\n\t\t// check alert policy before update\n\t\tap := &monitoring.AlertPolicy{}\n\t\tstatusCode, err := httpClient.Req(\"GET\", apURL, &api.ListWatchOptions{}, &ap)\n\t\tAssertOk(t, err, \"failed to get alert policy, err: %v\", err)\n\t\tAssert(t, statusCode == http.StatusOK, \"failed to get alert policy\")\n\n\t\t// UPDATE alert state (to acknowledged or resolved)\n\t\tif at.acknowledge {\n\t\t\tresp := monitoring.Alert{}\n\t\t\tAssertEventually(t,\n\t\t\t\tfunc() (bool, interface{}) {\n\t\t\t\t\tat.alert.Spec.State = monitoring.AlertState_ACKNOWLEDGED.String()\n\t\t\t\t\tstatusCode, err := httpClient.Req(\"PUT\", aURL, at.alert, &resp)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false, fmt.Sprintf(\"err: %v\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\tif statusCode != http.StatusOK {\n\t\t\t\t\t\treturn false, fmt.Sprintf(\"update failed with status: %d\", statusCode)\n\t\t\t\t\t}\n\n\t\t\t\t\tif resp.Status.Acknowledged == nil {\n\t\t\t\t\t\treturn false, fmt.Sprintf(\"alert status not updated, acknowledged: nil\")\n\t\t\t\t\t}\n\n\t\t\t\t\treturn true, nil\n\t\t\t\t}, \"failed to update alert state\", \"200ms\", \"6s\")\n\t\t} else if at.resolve {\n\t\t\tresp := monitoring.Alert{}\n\t\t\tAssertEventually(t,\n\t\t\t\tfunc() (bool, interface{}) {\n\t\t\t\t\tat.alert.Spec.State = monitoring.AlertState_RESOLVED.String()\n\t\t\t\t\tstatusCode, err := httpClient.Req(\"PUT\", aURL, at.alert, &resp)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false, fmt.Sprintf(\"err: %v\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\tif statusCode != http.StatusOK {\n\t\t\t\t\t\treturn false, fmt.Sprintf(\"update failed with status: %d\", statusCode)\n\t\t\t\t\t}\n\n\t\t\t\t\tif resp.Status.Resolved == nil {\n\t\t\t\t\t\treturn false, fmt.Sprintf(\"alert status not updated, resolved: nil\")\n\t\t\t\t\t}\n\n\t\t\t\t\treturn true, nil\n\t\t\t\t}, \"failed to update alert state\", \"200ms\", \"6s\")\n\t\t}\n\n\t\tupdatedAp := &monitoring.AlertPolicy{}\n\t\tstatusCode, err = httpClient.Req(\"GET\", apURL, &api.ListWatchOptions{}, &updatedAp)\n\t\tAssertOk(t, err, \"failed to get alert policy, err: %v\", err)\n\t\tAssert(t, statusCode == http.StatusOK, \"failed to get alert policy\")\n\t\tAssert(t, !at.acknowledge || (at.acknowledge && updatedAp.Status.AcknowledgedAlerts > ap.Status.AcknowledgedAlerts),\n\t\t\t\"expected #acknowledged alerts: >%d, got: %d\", ap.Status.AcknowledgedAlerts, updatedAp.Status.AcknowledgedAlerts)\n\t\tAssert(t, !at.resolve || (at.resolve && updatedAp.Status.OpenAlerts < ap.Status.OpenAlerts),\n\t\t\t\"expected #acknowledged alerts: <%d, got: %d\", ap.Status.OpenAlerts, updatedAp.Status.OpenAlerts)\n\t}\n}", "func TestAppdynamicsOK(t *testing.T) {\n\t// channel to collect received calls\n\tch := make(chan string, 1)\n\n\th := func(w http.ResponseWriter, r *http.Request) {\n\t\ts := r.URL.String()\n\t\tfmt.Fprintf(w, \"Hi there, I love %s!\", s)\n\t\tch <- r.URL.RawQuery\n\t}\n\thttp.HandleFunc(\"/\", h)\n\tgo http.ListenAndServe(\":8293\", nil)\n\ttime.Sleep(time.Millisecond * 100)\n\n\ta := Appdynamics{\n\t\tControllerTierURL: \"https://foo.saas.appdynamics.com/controller/rest/applications/bar/tiers/baz?output=JSON\",\n\t\tControllerUserName: \"[email protected]\",\n\t\tControllerPassword: \"pass123\",\n\t\tAgentURL: \"http://localhost:8293/machineagent/metrics?name=Server|Component:%d|Custom+Metrics|\",\n\t}\n\n\t// this error is expected since we are not connecting to actual controller\n\tassert.Error(t, a.Connect())\n\t// reset agent url value with '123' tier id\n\ta.AgentURL = fmt.Sprintf(a.AgentURL, 123)\n\tassert.Equal(t, a.AgentURL, \"http://localhost:8293/machineagent/metrics?name=Server|Component:123|Custom+Metrics|\")\n\n\ttm := time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)\n\t// counter type - appd-type: sum\n\tm, _ := telegraf.NewMetric(\n\t\t\"foo\",\n\t\tmap[string]string{\"metric_type\": \"counter\"},\n\t\tmap[string]interface{}{\"value\": float64(1.23)},\n\t\ttm,\n\t)\n\tmetrics := []telegraf.Metric{m}\n\tassert.NoError(t, a.Write(metrics))\n\tcall := <-ch\n\tassert.Equal(t, \"name=Server|Component:123|Custom+Metrics|foo&value=1.23&type=sum\", call)\n\n\t// gauge type - appd-type: average\n\tm, _ = telegraf.NewMetric(\n\t\t\"foo\",\n\t\tmap[string]string{\"metric_type\": \"gauge\"},\n\t\tmap[string]interface{}{\"value\": float64(4.56)},\n\t\ttm,\n\t)\n\tmetrics = []telegraf.Metric{m}\n\tassert.NoError(t, a.Write(metrics))\n\tcall = <-ch\n\tassert.Equal(t, \"name=Server|Component:123|Custom+Metrics|foo&value=4.56&type=average\", call)\n\n\t// other type - defaults to appd-type: sum\n\tm, _ = telegraf.NewMetric(\n\t\t\"foo\",\n\t\tmap[string]string{\"metric_type\": \"other\"},\n\t\tmap[string]interface{}{\"value\": float64(7.89)},\n\t\ttm,\n\t)\n\tmetrics = []telegraf.Metric{m}\n\tassert.NoError(t, a.Write(metrics))\n\tcall = <-ch\n\tassert.Equal(t, \"name=Server|Component:123|Custom+Metrics|foo&value=7.89&type=sum\", call)\n\n\t// invalid: missing value\n\tm, _ = telegraf.NewMetric(\n\t\t\"foo\",\n\t\tmap[string]string{\"metric_type\": \"bar\"},\n\t\tmap[string]interface{}{\"values\": float64(7.89)},\n\t\ttm,\n\t)\n\tmetrics = []telegraf.Metric{m}\n\tassert.NoError(t, a.Write(metrics))\n\tselect {\n\tcase call = <-ch:\n\t\tt.Error(\"No messages expected, but got: \", call)\n\tdefault:\n\t}\n}", "func eventTestChannelConsumer(c *Consumer, mt *msgtracker, expCnt int) {\n\tfor ev := range c.Events() {\n\t\tif !handleTestEvent(c, mt, expCnt, ev) {\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (e *Eval) assertionsConsistent(f *Flow, list []*reflow.Assertions) error {\n\tif !f.Op.External() {\n\t\treturn nil\n\t}\n\t// Add assertions of dependencies of flow.\n\tas := f.depAssertions()\n\tas = append(as, list...)\n\t_, err := reflow.MergeAssertions(as...)\n\treturn err\n}", "func TestInvalidToValidSubscription(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\tcst, err := createConsensusSetTester(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cst.Close()\n\n\t// Start by performing a bad subscribe.\n\tms := newMockSubscriber()\n\tbadCCID := modules.ConsensusChangeID{255, 255, 255}\n\terr = cst.cs.ConsensusSetSubscribe(&ms, badCCID, cst.cs.tg.StopChan())\n\tif err != modules.ErrInvalidConsensusChangeID {\n\t\tt.Error(\"consensus set returning the wrong error during an invalid subscription:\", err)\n\t}\n\n\t// Perform a correct subscribe.\n\terr = cst.cs.ConsensusSetSubscribe(&ms, modules.ConsensusChangeBeginning, cst.cs.tg.StopChan())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Mine a block and check that the mock subscriber only got a single\n\t// consensus change.\n\tnumPrevUpdates := len(ms.updates)\n\t_, err = cst.miner.AddBlock()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(ms.updates) != numPrevUpdates+1 {\n\t\tt.Error(\"subscriber received two consensus changes for a single block\")\n\t}\n}", "func TestSkipNoMember(t *testing.T) {\n\tcommitteeMock, keys := agreement.MockCommittee(1, false, 2)\n\teb, roundChan := initAgreement(committeeMock)\n\thash, _ := crypto.RandEntropy(32)\n\teb.Publish(string(topics.Agreement), agreement.MockAgreement(hash, 1, 1, keys))\n\n\tselect {\n\tcase <-roundChan:\n\t\tassert.FailNow(t, \"not supposed to get a round update without reaching quorum\")\n\tcase <-time.After(100 * time.Millisecond):\n\t\t// all good\n\t}\n}", "func TestElectionSessionRecampaignBug5349(t *testing.T) {\n\tclus := NewClusterV3(t, &ClusterConfig{Size: 1})\n\tdefer clus.Terminate(t)\n\tcli := clus.RandClient()\n\n\te := concurrency.NewElection(cli, \"test-elect\")\n\tif err := e.Campaign(context.TODO(), \"abc\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\te2 := concurrency.NewElection(cli, \"test-elect\")\n\tif err := e2.Campaign(context.TODO(), \"def\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tctx, cancel := context.WithCancel(context.TODO())\n\tdefer cancel()\n\tif resp := <-e.Observe(ctx); len(resp.Kvs) == 0 || string(resp.Kvs[0].Value) != \"def\" {\n\t\tt.Fatalf(\"expected value=%q, got response %v\", \"def\", resp)\n\t}\n}", "func TestSubscribeSensors(t *testing.T) {\n\n\t// System contains sensors and zones\n\tsys, _, sensors := makeSystemWithZonesAndSensors(0, 4)\n\tsensor1 := sensors[0]\n\tsensor2 := sensors[1]\n\tsensor3 := sensors[2]\n\tsensor4 := sensors[3]\n\n\tevtBus := evtbus.NewBus(100, 100)\n\tevtConsumer := &EventConsumer{}\n\tevtBus.AddConsumer(evtConsumer)\n\n\t// Create a monitor, we will pass in some initial state to pretend we know\n\t// about the value of sensor2, sensor4, this should cause the monitor to not request\n\t// a value for it and also return the value it knows about to the monitor group\n\tinitialSensorValues := make(map[string]gohome.SensorAttr)\n\tvar attr2 = sensor2.Attr\n\tattr2.Value = \"10\"\n\tinitialSensorValues[sensor2.ID] = attr2\n\tvar attr4 = sensor4.Attr\n\tattr4.Value = \"20\"\n\tinitialSensorValues[sensor4.ID] = attr4\n\n\tm := gohome.NewMonitor(sys, evtBus, initialSensorValues, nil)\n\n\tmockHandler := &MockChangeHandler{}\n\n\t// Request to monitor certain items\n\tgroup := &gohome.MonitorGroup{\n\t\tSensors: make(map[string]bool),\n\t\tHandler: mockHandler,\n\t\tTimeout: time.Duration(5) * time.Second,\n\t}\n\n\t// Add a sensor to the group, so we monitor it\n\tgroup.Sensors[sensor1.ID] = true\n\tgroup.Sensors[sensor2.ID] = true\n\tgroup.Sensors[sensor3.ID] = true\n\tgroup.Sensors[sensor4.ID] = true\n\n\t// Begin the subscription, should get back a monitor ID\n\tmID, _ := m.Subscribe(group, true)\n\trequire.NotEqual(t, \"\", mID)\n\n\t// Processing is async, small delay to let event bus process\n\ttime.Sleep(time.Millisecond * 1000)\n\n\t// Should have got an event asking for certain sensors to report their status\n\t// our sensor should be included in that\n\trequire.NotNil(t, evtConsumer.SensorsReport)\n\trequire.True(t, evtConsumer.SensorsReport.SensorIDs[sensor1.ID])\n\trequire.True(t, evtConsumer.SensorsReport.SensorIDs[sensor3.ID])\n\trequire.False(t, evtConsumer.SensorsReport.SensorIDs[sensor2.ID])\n\trequire.False(t, evtConsumer.SensorsReport.SensorIDs[sensor4.ID])\n\n\t// For sensors 2 and 4 we should have got an update callback since we passed in their\n\t// values to the monitor when we inited it\n\trequire.Equal(t, attr2, mockHandler.ChangeBatches[0].Sensors[sensor2.ID])\n\trequire.Equal(t, attr4, mockHandler.ChangeBatches[0].Sensors[sensor4.ID])\n\n\t// Now respond to the request for sensors 1 and 3 to report their values\n\treporting := &gohome.SensorsReportingEvt{}\n\tsensor1Attr := gohome.SensorAttr{\n\t\tName: \"sensor1\",\n\t\tValue: \"111\",\n\t}\n\treporting.Add(sensor1.ID, sensor1Attr)\n\tsensor3Attr := gohome.SensorAttr{\n\t\tName: \"sensor3\",\n\t\tValue: \"333\",\n\t}\n\treporting.Add(sensor3.ID, sensor3Attr)\n\n\t// Processing is async, small delay to let event bus process\n\tmockHandler.ChangeBatches = nil\n\tevtBus.Enqueue(reporting)\n\n\ttime.Sleep(time.Millisecond * 1000)\n\n\t// We should have got updates with the attribute values we are expecting\n\trequire.Equal(t, 2, len(mockHandler.ChangeBatches))\n\trequire.Equal(t, sensor1Attr, mockHandler.ChangeBatches[0].Sensors[sensor1.ID])\n\trequire.Equal(t, sensor3Attr, mockHandler.ChangeBatches[1].Sensors[sensor3.ID])\n}", "func TestSubscribe(t *testing.T) {\n\tassert := asserts.NewTesting(t, asserts.FailStop)\n\tmsh := mesh.New()\n\n\terr := msh.SpawnCells(\n\t\tNewTestBehavior(\"foo\"),\n\t\tNewTestBehavior(\"bar\"),\n\t\tNewTestBehavior(\"baz\"),\n\t)\n\tassert.NoError(err)\n\n\tmsh.Subscribe(\"foo\", \"bar\", \"baz\")\n\n\t// Directly ask mesh.\n\tfooS, err := msh.Subscribers(\"foo\")\n\tassert.NoError(err)\n\tassert.Length(fooS, 2)\n\tassert.Contains(fooS, \"bar\")\n\tassert.Contains(fooS, \"baz\")\n\n\t// Send event to store subscribers\n\tmsh.Emit(\"foo\", event.New(\"subscribers\"))\n\tpl, plc := event.NewReplyPayload()\n\tmsh.Emit(\"foo\", event.New(\"send\", pl))\n\tplr, err := plc.Wait(waitTimeout)\n\tassert.NoError(err)\n\tassert.Equal(plr.At(\"bar\").AsInt(0), 1)\n\tassert.Equal(plr.At(\"baz\").AsInt(0), 1)\n\n\t// Set additional values and let emit length.\n\tmsh.Emit(\"foo\", event.New(\"set\", \"a\", 1, \"b\", 1234))\n\tmsh.Emit(\"foo\", event.New(\"length\"))\n\twaitEvents(assert, msh, \"foo\")\n\n\t// Ask bar for received length.\n\tmsh.Emit(\"bar\", event.New(\"send\", pl))\n\tplr, err = plc.Wait(waitTimeout)\n\tassert.NoError(err)\n\tassert.Equal(plr.At(\"length\").AsInt(0), 4)\n\n\terr = msh.Stop()\n\tassert.NoError(err)\n}", "func TestPagarmeSubscriptionUpdate(t *testing.T) {\n \n Pagarme := pagarme.NewPagarme(\"pt-BR\", ApiKey, CryptoKey)\n Pagarme.SetDebug()\n\n planId, _ := client.Get(\"PlanoId\").Int64()\n cardId, _ := client.Get(\"CardId\").Result()\n subscriptionId, _ := client.Get(\"SubscriptionId\").Int64()\n subscription := pagarme.NewSubscriptionWithCard(planId)\n subscription.CardId = cardId\n subscription.Id = subscriptionId\n subscription.PostbackUrl = \"https://mobilemind.free.beeceptor.com/webhook/pagarme\"\n\n\n pagarmefillCustomer(subscription.Customer)\n\n result, err := Pagarme.SubscriptionUpdate(subscription)\n\n if err != nil {\n t.Errorf(\"Erro ao create subscription: %v\", err)\n }else{\n //t.Log(fmt.Sprintf(\"result = %v\", customer.Id)) \n\n if result.Id == 0 {\n t.Errorf(\"Subscription id is expected\")\n return\n }\n\n\n }\n\n}", "func TestAcceptReorg(t *testing.T) {\n\tissuer1, vm1, _, sharedMemory1 := GenesisVM(t, true, genesisJSONApricotPhase0, \"\", \"\")\n\tissuer2, vm2, _, sharedMemory2 := GenesisVM(t, true, genesisJSONApricotPhase0, \"\", \"\")\n\n\tdefer func() {\n\t\tif err := vm1.Shutdown(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif err := vm2.Shutdown(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\tkey, err := accountKeystore.NewKey(rand.Reader)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Import 1 AVAX\n\timportAmount := uint64(1000000000)\n\tutxoID := avax.UTXOID{\n\t\tTxID: ids.ID{\n\t\t\t0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee,\n\t\t\t0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec,\n\t\t\t0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea,\n\t\t\t0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8,\n\t\t},\n\t}\n\n\tutxo := &avax.UTXO{\n\t\tUTXOID: utxoID,\n\t\tAsset: avax.Asset{ID: vm1.ctx.LUVAssetID},\n\t\tOut: &secp256k1fx.TransferOutput{\n\t\t\tAmt: importAmount,\n\t\t\tOutputOwners: secp256k1fx.OutputOwners{\n\t\t\t\tThreshold: 1,\n\t\t\t\tAddrs: []ids.ShortID{testKeys[0].PublicKey().Address()},\n\t\t\t},\n\t\t},\n\t}\n\tutxoBytes, err := vm1.codec.Marshal(codecVersion, utxo)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\txChainSharedMemory1 := sharedMemory1.NewSharedMemory(vm1.ctx.XChainID)\n\txChainSharedMemory2 := sharedMemory2.NewSharedMemory(vm2.ctx.XChainID)\n\tinputID := utxo.InputID()\n\tif err := xChainSharedMemory1.Put(vm1.ctx.ChainID, []*atomic.Element{{\n\t\tKey: inputID[:],\n\t\tValue: utxoBytes,\n\t\tTraits: [][]byte{\n\t\t\ttestKeys[0].PublicKey().Address().Bytes(),\n\t\t},\n\t}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := xChainSharedMemory2.Put(vm2.ctx.ChainID, []*atomic.Element{{\n\t\tKey: inputID[:],\n\t\tValue: utxoBytes,\n\t\tTraits: [][]byte{\n\t\t\ttestKeys[0].PublicKey().Address().Bytes(),\n\t\t},\n\t}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\timportTx, err := vm1.newImportTx(vm1.ctx.XChainID, key.Address, []*crypto.PrivateKeySECP256K1R{testKeys[0]})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := vm1.issueTx(importTx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t<-issuer1\n\n\tvm1BlkA, err := vm1.BuildBlock()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to build block with import transaction: %s\", err)\n\t}\n\n\tif err := vm1BlkA.Verify(); err != nil {\n\t\tt.Fatalf(\"Block failed verification on VM1: %s\", err)\n\t}\n\n\tif status := vm1BlkA.Status(); status != choices.Processing {\n\t\tt.Fatalf(\"Expected status of built block to be %s, but found %s\", choices.Processing, status)\n\t}\n\n\tif err := vm1.SetPreference(vm1BlkA.ID()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvm2BlkA, err := vm2.ParseBlock(vm1BlkA.Bytes())\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error parsing block from vm2: %s\", err)\n\t}\n\tif err := vm2BlkA.Verify(); err != nil {\n\t\tt.Fatalf(\"Block failed verification on VM2: %s\", err)\n\t}\n\tif status := vm2BlkA.Status(); status != choices.Processing {\n\t\tt.Fatalf(\"Expected status of block on VM2 to be %s, but found %s\", choices.Processing, status)\n\t}\n\tif err := vm2.SetPreference(vm2BlkA.ID()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := vm1BlkA.Accept(); err != nil {\n\t\tt.Fatalf(\"VM1 failed to accept block: %s\", err)\n\t}\n\tif err := vm2BlkA.Accept(); err != nil {\n\t\tt.Fatalf(\"VM2 failed to accept block: %s\", err)\n\t}\n\n\t// Create list of 10 successive transactions to build block A on vm1\n\t// and to be split into two separate blocks on VM2\n\ttxs := make([]*types.Transaction, 10)\n\tfor i := 0; i < 10; i++ {\n\t\ttx := types.NewTransaction(uint64(i), key.Address, big.NewInt(10), 21000, params.LaunchMinGasPrice, nil)\n\t\tsignedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainID), key.PrivateKey)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\ttxs[i] = signedTx\n\t}\n\n\t// Add the remote transactions, build the block, and set VM1's preference\n\t// for block B\n\terrs := vm1.chain.AddRemoteTxs(txs)\n\tfor i, err := range errs {\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to add transaction to VM1 at index %d: %s\", i, err)\n\t\t}\n\t}\n\n\t<-issuer1\n\n\tvm1BlkB, err := vm1.BuildBlock()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := vm1BlkB.Verify(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif status := vm1BlkB.Status(); status != choices.Processing {\n\t\tt.Fatalf(\"Expected status of built block to be %s, but found %s\", choices.Processing, status)\n\t}\n\n\tif err := vm1.SetPreference(vm1BlkB.ID()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terrs = vm2.chain.AddRemoteTxs(txs[0:5])\n\tfor i, err := range errs {\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to add transaction to VM2 at index %d: %s\", i, err)\n\t\t}\n\t}\n\n\t<-issuer2\n\n\tvm2BlkC, err := vm2.BuildBlock()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to build BlkC on VM2: %s\", err)\n\t}\n\n\tif err := vm2BlkC.Verify(); err != nil {\n\t\tt.Fatalf(\"BlkC failed verification on VM2: %s\", err)\n\t}\n\n\tif err := vm2.SetPreference(vm2BlkC.ID()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terrs = vm2.chain.AddRemoteTxs(txs[5:])\n\tfor i, err := range errs {\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to add transaction to VM2 at index %d: %s\", i, err)\n\t\t}\n\t}\n\n\t<-issuer2\n\n\tvm2BlkD, err := vm2.BuildBlock()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to build BlkD on VM2: %s\", err)\n\t}\n\n\t// Parse blocks produced in vm2\n\tvm1BlkC, err := vm1.ParseBlock(vm2BlkC.Bytes())\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error parsing block from vm2: %s\", err)\n\t}\n\n\tvm1BlkD, err := vm1.ParseBlock(vm2BlkD.Bytes())\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error parsing block from vm2: %s\", err)\n\t}\n\n\tif err := vm1BlkC.Verify(); err != nil {\n\t\tt.Fatalf(\"Block failed verification on VM1: %s\", err)\n\t}\n\tif err := vm1BlkD.Verify(); err != nil {\n\t\tt.Fatalf(\"Block failed verification on VM1: %s\", err)\n\t}\n\n\tblkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash()\n\tif b := vm1.chain.BlockChain().CurrentBlock(); b.Hash() != blkBHash {\n\t\tt.Fatalf(\"expected current block to have hash %s but got %s\", blkBHash.Hex(), b.Hash().Hex())\n\t}\n\n\tif err := vm1BlkC.Accept(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tblkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash()\n\tif b := vm1.chain.BlockChain().CurrentBlock(); b.Hash() != blkCHash {\n\t\tt.Fatalf(\"expected current block to have hash %s but got %s\", blkCHash.Hex(), b.Hash().Hex())\n\t}\n\n\tif err := vm1BlkD.Accept(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tblkDHash := vm1BlkD.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash()\n\tif b := vm1.chain.BlockChain().CurrentBlock(); b.Hash() != blkDHash {\n\t\tt.Fatalf(\"expected current block to have hash %s but got %s\", blkDHash.Hex(), b.Hash().Hex())\n\t}\n}", "func TestSubscribeConsecutively(t *testing.T) {\n\tmaybeSkipIntegrationTest(t)\n\n\ts, err := NewSession()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\tif err := s.Subscribe(\"ike-updown\", \"child-updown\"); err != nil {\n\t\tt.Fatalf(\"Unexpected error subscribing for events: %v\", err)\n\t}\n\n\tif !reflect.DeepEqual(s.el.events, []string{\"ike-updown\", \"child-updown\"}) {\n\t\tt.Fatalf(\"Expected to find ike-updown and child-updown registered, got: %v\", s.el.events)\n\t}\n\n\tif err := s.Subscribe(\"child-updown\", \"log\", \"ike-updown\"); err != nil {\n\t\tt.Fatalf(\"Unexpected error subscribing for additional events: %v\", err)\n\t}\n\n\t// Only the 'log' event should have been added.\n\tif !reflect.DeepEqual(s.el.events, []string{\"ike-updown\", \"child-updown\", \"log\"}) {\n\t\tt.Fatalf(\"Expected to find ike-updown and child-updown registered, got: %v\", s.el.events)\n\t}\n}", "func TestChannelGrantedPeriods(t *testing.T) {\n\tdefer db.SuspendSequenceBatching()()\n\trevocationTester, rt := InitScenario(t, nil)\n\tdefer rt.Close()\n\n\trevocationTester.addUserChannel(\"user\", \"a\")\n\trevId := rt.CreateDocReturnRev(t, \"doc\", \"\", map[string]interface{}{\"channels\": []string{\"a\"}})\n\tchanges := revocationTester.getChanges(0, 2)\n\tassert.Len(t, changes.Results, 2)\n\n\trevocationTester.removeUserChannel(\"user\", \"a\")\n\trevId = rt.CreateDocReturnRev(t, \"doc\", revId, map[string]interface{}{\"mutate\": \"mutate\", \"channels\": []string{\"a\"}})\n\tchanges = revocationTester.getChanges(changes.Last_Seq, 1)\n\tassert.Len(t, changes.Results, 1)\n\n\trevocationTester.addUserChannel(\"user\", \"a\")\n\tchanges = revocationTester.getChanges(changes.Last_Seq, 1)\n\tassert.Len(t, changes.Results, 1)\n\n\trevocationTester.removeUserChannel(\"user\", \"a\")\n\trevId = rt.CreateDocReturnRev(t, \"doc\", revId, map[string]interface{}{\"mutate\": \"mutate2\", \"channels\": []string{\"a\"}})\n\tchanges = revocationTester.getChanges(changes.Last_Seq, 1)\n\tassert.Len(t, changes.Results, 1)\n\n\trevocationTester.addUserChannel(\"user\", \"a\")\n\trevId = rt.CreateDocReturnRev(t, \"doc\", revId, map[string]interface{}{\"mutate\": \"mutate3\", \"channels\": []string{\"a\"}})\n\tchanges = revocationTester.getChanges(changes.Last_Seq, 1)\n\tassert.Len(t, changes.Results, 1)\n\n\trevocationTester.addRole(\"user\", \"foo\")\n\trevocationTester.addRoleChannel(\"foo\", \"b\")\n\trevId = rt.CreateDocReturnRev(t, \"doc\", revId, map[string]interface{}{\"channels\": []string{\"b\"}})\n\tchanges = revocationTester.getChanges(changes.Last_Seq, 1)\n\n\trevocationTester.removeRoleChannel(\"foo\", \"b\")\n\trevocationTester.removeRole(\"user\", \"foo\")\n\t_ = rt.CreateDocReturnRev(t, \"doc\", revId, map[string]interface{}{\"mutate\": \"mutate\", \"channels\": []string{\"b\"}})\n\tchanges = revocationTester.getChanges(changes.Last_Seq, 1)\n}", "func (r *MockRepoManager) assertUpdate() {\n\tr.mtx.RLock()\n\tdefer r.mtx.RUnlock()\n\tassert.Equal(r.t, 0, r.updateCount)\n}", "func TestNoPublishingIfNotInCommittee(t *testing.T) {\n\teventBus, _, streamer, _, _ := launchReductionTest(false, 2)\n\n\t// Because round updates are asynchronous (sent through a channel), we wait\n\t// for a bit to let the broker update its round.\n\ttime.Sleep(200 * time.Millisecond)\n\t// send a hash to start reduction\n\thash, _ := crypto.RandEntropy(32)\n\tsendSelection(1, hash, eventBus)\n\n\t// Try to read from the stream, and see if we get any reduction messages from\n\t// ourselves.\n\tgo func() {\n\t\tfor {\n\t\t\t_, err := streamer.Read()\n\t\t\tassert.NoError(t, err)\n\t\t\t// HACK: what's the point?\n\t\t\tt.Fatal(\"\")\n\t\t}\n\t}()\n\n\ttimer := time.NewTimer(1 * time.Second)\n\t// if we dont get anything after a second, we can assume nothing was published.\n\t<-timer.C\n}", "func MockConsensusEvent(hash []byte, round uint64, step uint8, keys []key.ConsensusKeys, p *user.Provisioners, i ...int) consensus.Event {\n\taev := MockAgreementEvent(hash, round, step, keys, p, i...)\n\thdr := aev.Header\n\n\tbuf := new(bytes.Buffer)\n\t_ = Marshal(buf, *aev)\n\n\treturn consensus.Event{\n\t\tHeader: hdr,\n\t\tPayload: *buf,\n\t}\n}", "func (t *tInfo) assertElasticTotalEvents(te *testing.T, query es.Query, exact bool, totalEventsSent int, timeout string) {\n\tAssertEventually(te,\n\t\tfunc() (bool, interface{}) {\n\t\t\tvar totalEventsReceived int\n\t\t\tvar evt evtsapi.Event\n\n\t\t\t// total number of docs/events available (single events and de-duped events)\n\n\t\t\t// 1. query single events, count = 1\n\t\t\tsingleEvents := es.NewBoolQuery()\n\t\t\tsingleEvents.Must(query, es.NewRangeQuery(\"count\").Lte(1).Gt(0))\n\t\t\t// count = 1\n\t\t\tresp, err := t.esClient.Search(context.Background(), elastic.GetIndex(globals.Events, globals.DefaultTenant), singleEvents, nil, 0, 10, sortByField, sortAsc)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\ttotalEventsReceived += int(resp.TotalHits())\n\n\t\t\t// 2. query de-duped events, count>1\n\t\t\tdedupedEvents := es.NewBoolQuery()\n\t\t\tdedupedEvents.Must(query, es.NewRangeQuery(\"count\").Gt(1))\n\t\t\tresp, err = t.esClient.Search(context.Background(), elastic.GetIndex(globals.Events, globals.DefaultTenant), dedupedEvents, nil, 0, 10000, sortByField, sortAsc)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tfor _, hit := range resp.Hits.Hits {\n\t\t\t\t_ = json.Unmarshal(*hit.Source, &evt)\n\t\t\t\ttotalEventsReceived += int(evt.GetCount())\n\t\t\t}\n\n\t\t\tif exact {\n\t\t\t\tif !(totalEventsReceived == totalEventsSent) {\n\t\t\t\t\treturn false, fmt.Sprintf(\"expected: %d, got: %d\", totalEventsSent, totalEventsReceived)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif !(totalEventsReceived >= totalEventsSent) {\n\t\t\t\t\treturn false, fmt.Sprintf(\"expected: >=%d, got: %d\", totalEventsSent, totalEventsReceived)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn true, nil\n\t\t}, \"couldn't get the expected number of total events\", \"100ms\", timeout)\n}", "func (s *Service) Subscribe(ctx context.Context,\n\tepoch phase0.Epoch,\n\taccounts map[phase0.ValidatorIndex]e2wtypes.Account,\n) (map[phase0.Slot]map[phase0.CommitteeIndex]*beaconcommitteesubscriber.Subscription, error) {\n\tif len(accounts) == 0 {\n\t\t// Nothing to do.\n\t\treturn map[phase0.Slot]map[phase0.CommitteeIndex]*beaconcommitteesubscriber.Subscription{}, nil\n\t}\n\n\tstarted := time.Now()\n\tlog := log.With().Uint64(\"epoch\", uint64(epoch)).Logger()\n\tlog.Trace().Msg(\"Subscribing\")\n\n\tvalidatorIndices := make([]phase0.ValidatorIndex, 0, len(accounts))\n\tfor index := range accounts {\n\t\tvalidatorIndices = append(validatorIndices, index)\n\t}\n\tattesterDuties, err := s.attesterDutiesProvider.AttesterDuties(ctx, epoch, validatorIndices)\n\tif err != nil {\n\t\ts.monitor.BeaconCommitteeSubscriptionCompleted(started, \"failed\")\n\t\treturn nil, errors.Wrap(err, \"failed to obtain attester duties\")\n\t}\n\n\tlog.Trace().Dur(\"elapsed\", time.Since(started)).Int(\"accounts\", len(validatorIndices)).Msg(\"Fetched attester duties\")\n\tduties, err := attester.MergeDuties(ctx, attesterDuties)\n\tif err != nil {\n\t\ts.monitor.BeaconCommitteeSubscriptionCompleted(started, \"failed\")\n\t\treturn nil, errors.Wrap(err, \"failed to merge attester duties\")\n\t}\n\n\tsubscriptionInfo, err := s.calculateSubscriptionInfo(ctx, epoch, accounts, duties)\n\tif err != nil {\n\t\ts.monitor.BeaconCommitteeSubscriptionCompleted(started, \"failed\")\n\t\treturn nil, errors.Wrap(err, \"failed to calculate subscription duties\")\n\t}\n\tlog.Trace().Dur(\"elapsed\", time.Since(started)).Msg(\"Calculated subscription info\")\n\n\t// Update metrics.\n\tsubscriptions := 0\n\taggregators := 0\n\tfor _, v := range subscriptionInfo {\n\t\tfor _, v2 := range v {\n\t\t\tsubscriptions++\n\t\t\tif v2.IsAggregator {\n\t\t\t\taggregators++\n\t\t\t}\n\t\t}\n\t}\n\ts.monitor.BeaconCommitteeSubscribers(subscriptions)\n\ts.monitor.BeaconCommitteeAggregators(aggregators)\n\n\t// Submit the subscription information.\n\tgo func() {\n\t\tlog.Trace().Msg(\"Submitting subscription\")\n\t\tsubscriptions := make([]*api.BeaconCommitteeSubscription, 0, len(duties))\n\t\tfor slot, slotInfo := range subscriptionInfo {\n\t\t\tfor committeeIndex, info := range slotInfo {\n\t\t\t\tsubscriptions = append(subscriptions, &api.BeaconCommitteeSubscription{\n\t\t\t\t\tValidatorIndex: info.Duty.ValidatorIndex,\n\t\t\t\t\tSlot: slot,\n\t\t\t\t\tCommitteeIndex: committeeIndex,\n\t\t\t\t\tCommitteesAtSlot: info.Duty.CommitteesAtSlot,\n\t\t\t\t\tIsAggregator: info.IsAggregator,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t\tif err := s.submitter.SubmitBeaconCommitteeSubscriptions(ctx, subscriptions); err != nil {\n\t\t\tlog.Error().Err(err).Msg(\"Failed to submit beacon committees\")\n\t\t\ts.monitor.BeaconCommitteeSubscriptionCompleted(started, \"failed\")\n\t\t\treturn\n\t\t}\n\t\tlog.Trace().Dur(\"elapsed\", time.Since(started)).Msg(\"Submitted subscription request\")\n\t\ts.monitor.BeaconCommitteeSubscriptionCompleted(started, \"succeeded\")\n\t}()\n\n\t// Return the subscription info so the calling function knows the subnets to which we are subscribing.\n\treturn subscriptionInfo, nil\n}", "func handleTestEvent(c *Consumer, mt *msgtracker, expCnt int, ev Event) bool {\n\tswitch e := ev.(type) {\n\tcase *Message:\n\t\tif e.TopicPartition.Error != nil {\n\t\t\tmt.t.Errorf(\"Error: %v\", e.TopicPartition)\n\t\t}\n\t\tmt.msgs[mt.msgcnt] = e\n\t\tmt.msgcnt++\n\t\tif mt.msgcnt >= int64(expCnt) {\n\t\t\treturn false\n\t\t}\n\tcase PartitionEOF:\n\t\tbreak // silence\n\tdefault:\n\t\tmt.t.Fatalf(\"Consumer error: %v\", e)\n\t}\n\treturn true\n\n}", "func only_make_agreement(ag *contract_api.SolidityContract, agID []byte, sig_hash string, sig string, counterparty string) {\n err := error(nil)\n\n log.Printf(\"Make an agreement with ID:%v\\n\", agID)\n p := make([]interface{},0,10)\n p = append(p, agID)\n p = append(p, sig_hash[2:])\n p = append(p, sig[2:])\n p = append(p, counterparty)\n if _, err = ag.Invoke_method(\"create_agreement\", p); err != nil {\n log.Printf(\"...terminating, could not invoke create_agreement: %v\\n\", err)\n os.Exit(1)\n }\n log.Printf(\"Create agreement %v successfully submitted.\\n\", agID)\n}", "func TestIntegrationNewListener(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration test in short mode...\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*120)\n\tdefer cancel()\n\n\tlistener := NewAmqpConnection(ctx, config)\n\t// Remove topic to ensure each test has a clean topic to work with\n\tdefer deleteSubscription(listener, config)\n\n\tnonce := time.Now().String()\n\tsender, err := listener.CreateAmqpSender(config.SubscribesToEvent)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\terr = sender.Send(ctx, amqp.NewMessage([]byte(nonce)))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tstats, err := listener.GetQueueDepth()\n\tdepth := stats.ActiveMessageCount\n\tif err != nil || depth == -1 {\n\t\tt.Error(\"Failed to get queue depth\")\n\t\tt.Error(err)\n\t}\n\n\tif depth != 1 {\n\t\tt.Errorf(\"Expected queue depth of 1 Got:%v\", depth)\n\t\tt.Fail()\n\t}\n\n\tamqpMessage, err := listener.Receiver.Receive(ctx)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tmessage := messaging.NewAmqpMessageWrapper(amqpMessage)\n\n\tgo func() {\n\t\ttime.Sleep(time.Duration(45) * time.Second)\n\t\terr := listener.RenewLocks(ctx, []*amqp.Message{\n\t\t\tamqpMessage,\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\t//Added to ensure that locks are renewed\n\ttime.Sleep(time.Duration(75) * time.Second)\n\n\terr = message.Accept()\n\tif string(message.Body()) != nonce {\n\t\tt.Errorf(\"value not as expected in message Expected: %s Got: %s\", nonce, message.Body())\n\t}\n\n\tstats, err = listener.GetQueueDepth()\n\tdepth = stats.ActiveMessageCount\n\tif err != nil || depth == -1 {\n\t\tt.Error(\"Failed to get queue depth\")\n\t\tt.Error(err)\n\t}\n\n\tif depth != 0 {\n\t\tt.Errorf(\"Expected queue depth of 0 Got:%v\", depth)\n\t\tt.Fail()\n\t}\n}", "func TestEmitEvents(t *testing.T) {\n\tassert := asserts.NewTesting(t, asserts.FailStop)\n\tmsh := mesh.New()\n\n\terr := msh.SpawnCells(NewTestBehavior(\"foo\"))\n\tassert.NoError(err)\n\n\tmsh.Emit(\"foo\", event.New(\"set\", \"a\", 1))\n\tmsh.Emit(\"foo\", event.New(\"set\", \"b\", 2))\n\tmsh.Emit(\"foo\", event.New(\"set\", \"c\", 3))\n\n\tpl, plc := event.NewReplyPayload()\n\n\tmsh.Emit(\"foo\", event.New(\"send\", pl))\n\n\tplr, err := plc.Wait(waitTimeout)\n\n\tassert.NoError(err)\n\tassert.Equal(plr.At(\"a\").AsInt(0), 1)\n\tassert.Equal(plr.At(\"b\").AsInt(0), 2)\n\tassert.Equal(plr.At(\"c\").AsInt(0), 3)\n\n\terr = msh.Stop()\n\tassert.NoError(err)\n}", "func (s *ServicesTestSuite) Events(t *testing.T) {\n\tctx := context.Background()\n\ttestCases := []eventTest{\n\t\t{\n\t\t\tname: \"Cert authority with secrets\",\n\t\t\tkind: types.WatchKind{\n\t\t\t\tKind: types.KindCertAuthority,\n\t\t\t\tLoadSecrets: true,\n\t\t\t},\n\t\t\tcrud: func(context.Context) types.Resource {\n\t\t\t\tca := NewTestCA(types.UserCA, \"example.com\")\n\t\t\t\trequire.NoError(t, s.CAS.UpsertCertAuthority(ctx, ca))\n\n\t\t\t\tout, err := s.CAS.GetCertAuthority(ctx, *ca.ID(), true)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\trequire.NoError(t, s.CAS.DeleteCertAuthority(ctx, *ca.ID()))\n\t\t\t\treturn out\n\t\t\t},\n\t\t},\n\t}\n\ts.runEventsTests(t, testCases, types.Watch{Kinds: eventsTestKinds(testCases)})\n\n\ttestCases = []eventTest{\n\t\t{\n\t\t\tname: \"Cert authority without secrets\",\n\t\t\tkind: types.WatchKind{\n\t\t\t\tKind: types.KindCertAuthority,\n\t\t\t\tLoadSecrets: false,\n\t\t\t},\n\t\t\tcrud: func(context.Context) types.Resource {\n\t\t\t\tca := NewTestCA(types.UserCA, \"example.com\")\n\t\t\t\trequire.NoError(t, s.CAS.UpsertCertAuthority(ctx, ca))\n\n\t\t\t\tout, err := s.CAS.GetCertAuthority(ctx, *ca.ID(), false)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\trequire.NoError(t, s.CAS.DeleteCertAuthority(ctx, *ca.ID()))\n\t\t\t\treturn out\n\t\t\t},\n\t\t},\n\t}\n\ts.runEventsTests(t, testCases, types.Watch{Kinds: eventsTestKinds(testCases)})\n\n\ttestCases = []eventTest{\n\t\t{\n\t\t\tname: \"Token\",\n\t\t\tkind: types.WatchKind{\n\t\t\t\tKind: types.KindToken,\n\t\t\t},\n\t\t\tcrud: func(context.Context) types.Resource {\n\t\t\t\texpires := time.Now().UTC().Add(time.Hour)\n\t\t\t\ttok, err := types.NewProvisionToken(\"token\",\n\t\t\t\t\ttypes.SystemRoles{types.RoleAuth, types.RoleNode}, expires)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\trequire.NoError(t, s.ProvisioningS.UpsertToken(ctx, tok))\n\n\t\t\t\ttoken, err := s.ProvisioningS.GetToken(ctx, \"token\")\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\trequire.NoError(t, s.ProvisioningS.DeleteToken(ctx, \"token\"))\n\t\t\t\treturn token\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Namespace\",\n\t\t\tkind: types.WatchKind{\n\t\t\t\tKind: types.KindNamespace,\n\t\t\t},\n\t\t\tcrud: func(context.Context) types.Resource {\n\t\t\t\tns := types.Namespace{\n\t\t\t\t\tKind: types.KindNamespace,\n\t\t\t\t\tVersion: types.V2,\n\t\t\t\t\tMetadata: types.Metadata{\n\t\t\t\t\t\tName: \"testnamespace\",\n\t\t\t\t\t\tNamespace: apidefaults.Namespace,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\terr := s.PresenceS.UpsertNamespace(ns)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tout, err := s.PresenceS.GetNamespace(ns.Metadata.Name)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = s.PresenceS.DeleteNamespace(ns.Metadata.Name)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\treturn out\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Static tokens\",\n\t\t\tkind: types.WatchKind{\n\t\t\t\tKind: types.KindStaticTokens,\n\t\t\t},\n\t\t\tcrud: func(context.Context) types.Resource {\n\t\t\t\tstaticTokens, err := types.NewStaticTokens(types.StaticTokensSpecV2{\n\t\t\t\t\tStaticTokens: []types.ProvisionTokenV1{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tToken: \"tok1\",\n\t\t\t\t\t\t\tRoles: types.SystemRoles{types.RoleNode},\n\t\t\t\t\t\t\tExpires: time.Now().UTC().Add(time.Hour),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = s.ConfigS.SetStaticTokens(staticTokens)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tout, err := s.ConfigS.GetStaticTokens()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = s.ConfigS.DeleteStaticTokens()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\treturn out\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Role\",\n\t\t\tkind: types.WatchKind{\n\t\t\t\tKind: types.KindRole,\n\t\t\t},\n\t\t\tcrud: func(context.Context) types.Resource {\n\t\t\t\trole, err := types.NewRole(\"role1\", types.RoleSpecV6{\n\t\t\t\t\tOptions: types.RoleOptions{\n\t\t\t\t\t\tMaxSessionTTL: types.Duration(time.Hour),\n\t\t\t\t\t},\n\t\t\t\t\tAllow: types.RoleConditions{\n\t\t\t\t\t\tLogins: []string{\"root\", \"bob\"},\n\t\t\t\t\t\tNodeLabels: types.Labels{types.Wildcard: []string{types.Wildcard}},\n\t\t\t\t\t},\n\t\t\t\t\tDeny: types.RoleConditions{},\n\t\t\t\t})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = s.Access.UpsertRole(ctx, role)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tout, err := s.Access.GetRole(ctx, role.GetName())\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = s.Access.DeleteRole(ctx, role.GetName())\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\treturn out\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"User\",\n\t\t\tkind: types.WatchKind{\n\t\t\t\tKind: types.KindUser,\n\t\t\t},\n\t\t\tcrud: func(context.Context) types.Resource {\n\t\t\t\tuser := newUser(\"user1\", []string{constants.DefaultImplicitRole})\n\t\t\t\terr := s.Users().UpsertUser(user)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tout, err := s.Users().GetUser(user.GetName(), false)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\trequire.NoError(t, s.Users().DeleteUser(ctx, user.GetName()))\n\t\t\t\treturn out\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Node\",\n\t\t\tkind: types.WatchKind{\n\t\t\t\tKind: types.KindNode,\n\t\t\t},\n\t\t\tcrud: func(context.Context) types.Resource {\n\t\t\t\tsrv := NewServer(types.KindNode, \"srv1\", \"127.0.0.1:2022\", apidefaults.Namespace)\n\n\t\t\t\t_, err := s.PresenceS.UpsertNode(ctx, srv)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tout, err := s.PresenceS.GetNodes(ctx, srv.Metadata.Namespace)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = s.PresenceS.DeleteAllNodes(ctx, srv.Metadata.Namespace)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\treturn out[0]\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Proxy\",\n\t\t\tkind: types.WatchKind{\n\t\t\t\tKind: types.KindProxy,\n\t\t\t},\n\t\t\tcrud: func(context.Context) types.Resource {\n\t\t\t\tsrv := NewServer(types.KindProxy, \"srv1\", \"127.0.0.1:2022\", apidefaults.Namespace)\n\n\t\t\t\terr := s.PresenceS.UpsertProxy(ctx, srv)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tout, err := s.PresenceS.GetProxies()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = s.PresenceS.DeleteAllProxies()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\treturn out[0]\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Tunnel connection\",\n\t\t\tkind: types.WatchKind{\n\t\t\t\tKind: types.KindTunnelConnection,\n\t\t\t},\n\t\t\tcrud: func(context.Context) types.Resource {\n\t\t\t\tconn, err := types.NewTunnelConnection(\"conn1\", types.TunnelConnectionSpecV2{\n\t\t\t\t\tClusterName: \"example.com\",\n\t\t\t\t\tProxyName: \"p1\",\n\t\t\t\t\tLastHeartbeat: time.Now().UTC(),\n\t\t\t\t})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = s.PresenceS.UpsertTunnelConnection(conn)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tout, err := s.PresenceS.GetTunnelConnections(\"example.com\")\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = s.PresenceS.DeleteAllTunnelConnections()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\treturn out[0]\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Reverse tunnel\",\n\t\t\tkind: types.WatchKind{\n\t\t\t\tKind: types.KindReverseTunnel,\n\t\t\t},\n\t\t\tcrud: func(context.Context) types.Resource {\n\t\t\t\ttunnel := newReverseTunnel(\"example.com\", []string{\"example.com:2023\"})\n\t\t\t\trequire.NoError(t, s.PresenceS.UpsertReverseTunnel(tunnel))\n\n\t\t\t\tout, err := s.PresenceS.GetReverseTunnels(context.Background())\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = s.PresenceS.DeleteReverseTunnel(tunnel.Spec.ClusterName)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\treturn out[0]\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Remote cluster\",\n\t\t\tkind: types.WatchKind{\n\t\t\t\tKind: types.KindRemoteCluster,\n\t\t\t},\n\t\t\tcrud: func(ctx context.Context) types.Resource {\n\t\t\t\trc, err := types.NewRemoteCluster(\"example.com\")\n\t\t\t\trc.SetConnectionStatus(teleport.RemoteClusterStatusOffline)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.NoError(t, s.PresenceS.CreateRemoteCluster(rc))\n\n\t\t\t\tout, err := s.PresenceS.GetRemoteClusters()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = s.PresenceS.DeleteRemoteCluster(ctx, rc.GetName())\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\treturn out[0]\n\t\t\t},\n\t\t},\n\t}\n\t// this also tests the partial success mode by requesting an unknown kind\n\ts.runEventsTests(t, testCases, types.Watch{\n\t\tKinds: append(eventsTestKinds(testCases), types.WatchKind{Kind: \"unknown\"}),\n\t\tAllowPartialSuccess: true,\n\t})\n\n\t// Namespace with a name\n\ttestCases = []eventTest{\n\t\t{\n\t\t\tname: \"Namespace with a name\",\n\t\t\tkind: types.WatchKind{\n\t\t\t\tKind: types.KindNamespace,\n\t\t\t\tName: \"shmest\",\n\t\t\t},\n\t\t\tcrud: func(context.Context) types.Resource {\n\t\t\t\tns := types.Namespace{\n\t\t\t\t\tKind: types.KindNamespace,\n\t\t\t\t\tVersion: types.V2,\n\t\t\t\t\tMetadata: types.Metadata{\n\t\t\t\t\t\tName: \"shmest\",\n\t\t\t\t\t\tNamespace: apidefaults.Namespace,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\terr := s.PresenceS.UpsertNamespace(ns)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tout, err := s.PresenceS.GetNamespace(ns.Metadata.Name)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = s.PresenceS.DeleteNamespace(ns.Metadata.Name)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\treturn out\n\t\t\t},\n\t\t},\n\t}\n\ts.runEventsTests(t, testCases, types.Watch{Kinds: eventsTestKinds(testCases)})\n\n\t// tests that a watch fails given an unknown kind when the partial success mode is not enabled\n\ts.runUnknownEventsTest(t, types.Watch{Kinds: []types.WatchKind{\n\t\t{Kind: types.KindNamespace},\n\t\t{Kind: \"unknown\"},\n\t}})\n\n\t// tests that a watch fails if all given kinds are unknown even if the success mode is enabled\n\ts.runUnknownEventsTest(t, types.Watch{\n\t\tKinds: []types.WatchKind{\n\t\t\t{Kind: \"unrecognized\"},\n\t\t\t{Kind: \"unidentified\"},\n\t\t},\n\t\tAllowPartialSuccess: true,\n\t})\n}", "func (s *TestSuite) TestSubscribe(t *testing.T) {\n\t// Create a simulated device\n\tsimulator := gnmi.CreateSimulator(t)\n\tdefer gnmi.DeleteSimulator(t, simulator)\n\n\t// Wait for config to connect to the device\n\tgnmi.WaitForDeviceAvailable(t, device.ID(simulator.Name()), time.Minute)\n\n\t// Make a GNMI client to use for subscribe\n\tsubC := client.BaseClient{}\n\n\tpath, err := utils.ParseGNMIElements(utils.SplitPath(subTzPath))\n\n\tassert.NoError(t, err, \"Unexpected error doing parsing\")\n\n\tname := simulator.Name()\n\tpath.Target = name\n\n\tsubReq := subscribeRequest{\n\t\tpath: path,\n\t\tsubListMode: gpb.SubscriptionList_STREAM,\n\t\tsubStreamMode: gpb.SubscriptionMode_ON_CHANGE,\n\t}\n\n\tq, errQuery := buildQueryRequest(subReq)\n\tassert.NoError(t, errQuery, \"Can't build Query\")\n\n\tupdateCount := 0\n\tsyncCount := 0\n\n\tdone := make(chan bool, 1)\n\n\tq.ProtoHandler = func(msg protobuf.Message) error {\n\t\tresp, ok := msg.(*ocgnmi.SubscribeResponse)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"failed to type assert message %#v\", msg)\n\t\t}\n\n\t\tswitch v := resp.Response.(type) {\n\t\tcase *gpb.SubscribeResponse_Update:\n\t\t\t// validate update response\n\t\t\tif len(resp.GetUpdate().Update) == 1 {\n\t\t\t\tvalidateResponse(t, resp, simulator.Name(), false)\n\t\t\t}\n\n\t\t\t// validate delete response\n\t\t\tif len(resp.GetUpdate().Delete) == 1 {\n\t\t\t\tvalidateResponse(t, resp, simulator.Name(), true)\n\t\t\t}\n\t\t\ts.mux.Lock()\n\t\t\tupdateCount++\n\t\t\ts.mux.Unlock()\n\n\t\tcase *gpb.SubscribeResponse_Error:\n\t\t\treturn fmt.Errorf(\"error in response: %s\", v)\n\t\tcase *gpb.SubscribeResponse_SyncResponse:\n\t\t\t// validate sync response\n\t\t\tvalidateResponse(t, resp, simulator.Name(), false)\n\t\t\ts.mux.Lock()\n\t\t\tsyncCount++\n\t\t\ts.mux.Unlock()\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unknown response %T: %s\", v, v)\n\t\t}\n\n\t\tif syncCount == expectedNumSyncs && updateCount == expectedNumUpdates {\n\t\t\tdone <- true\n\t\t}\n\t\treturn nil\n\t}\n\n\tgo func() {\n\t\t_ = subC.Subscribe(gnmi.MakeContext(), *q, \"gnmi\")\n\t\tdefer subC.Close()\n\n\t}()\n\n\t// Make a GNMI client to use for requests\n\tgnmiClient := gnmi.GetGNMIClientOrFail(t)\n\n\t// Set a value using gNMI client\n\tdevicePath := gnmi.GetDevicePathWithValue(simulator.Name(), subTzPath, subTzValue, proto.StringVal)\n\tgnmi.SetGNMIValueOrFail(t, gnmiClient, devicePath, gnmi.NoPaths, gnmi.NoExtensions)\n\n\t// Check that the value was set correctly\n\tgnmi.CheckGNMIValue(t, gnmiClient, devicePath, subTzValue, 0, \"Query after set returned the wrong value\")\n\n\t// Remove the path we added\n\tgnmi.SetGNMIValueOrFail(t, gnmiClient, gnmi.NoPaths, devicePath, gnmi.NoExtensions)\n\n\t// Make sure it got removed\n\tgnmi.CheckGNMIValue(t, gnmiClient, devicePath, \"\", 0, \"incorrect value found for path /system/clock/config/timezone-name after delete\")\n}", "func TestCommit(t *testing.T) {\n\tconst n = 4\n\tctrl := gomock.NewController(t)\n\ths := New()\n\tkeys := testutil.GenerateKeys(t, n, testutil.GenerateECDSAKey)\n\tbl := testutil.CreateBuilders(t, ctrl, n, keys...)\n\tacceptor := mocks.NewMockAcceptor(ctrl)\n\texecutor := mocks.NewMockExecutor(ctrl)\n\tsynchronizer := synchronizer.New(testutil.FixedTimeout(1000))\n\tcfg, replicas := testutil.CreateMockConfigWithReplicas(t, ctrl, n, keys...)\n\tbl[0].Register(hs, cfg, acceptor, executor, synchronizer, leaderrotation.NewFixed(2))\n\thl := bl.Build()\n\tsigners := hl.Signers()\n\n\t// create the needed blocks and QCs\n\tgenesisQC := hotstuff.NewQuorumCert(nil, 0, hotstuff.GetGenesis().Hash())\n\tb1 := testutil.NewProposeMsg(hotstuff.GetGenesis().Hash(), genesisQC, \"1\", 1, 2)\n\tb1QC := testutil.CreateQC(t, b1.Block, signers)\n\tb2 := testutil.NewProposeMsg(b1.Block.Hash(), b1QC, \"2\", 2, 2)\n\tb2QC := testutil.CreateQC(t, b2.Block, signers)\n\tb3 := testutil.NewProposeMsg(b2.Block.Hash(), b2QC, \"3\", 3, 2)\n\tb3QC := testutil.CreateQC(t, b3.Block, signers)\n\tb4 := testutil.NewProposeMsg(b3.Block.Hash(), b3QC, \"4\", 4, 2)\n\n\t// the second replica will be the leader, so we expect it to receive votes\n\treplicas[1].EXPECT().Vote(gomock.Any()).AnyTimes()\n\treplicas[1].EXPECT().NewView(gomock.Any()).AnyTimes()\n\n\t// executor will check that the correct command is executed\n\texecutor.EXPECT().Exec(gomock.Any()).Do(func(arg interface{}) {\n\t\tif arg.(hotstuff.Command) != b1.Block.Command() {\n\t\t\tt.Errorf(\"Wrong command executed: got: %s, want: %s\", arg, b1.Block.Command())\n\t\t}\n\t})\n\n\t// acceptor expects to receive the commands in order\n\tgomock.InOrder(\n\t\tacceptor.EXPECT().Proposed(gomock.Any()),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"1\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"1\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"2\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"2\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"3\")).Return(true),\n\t\tacceptor.EXPECT().Proposed(hotstuff.Command(\"3\")),\n\t\tacceptor.EXPECT().Accept(hotstuff.Command(\"4\")).Return(true),\n\t)\n\n\ths.OnPropose(b1)\n\ths.OnPropose(b2)\n\ths.OnPropose(b3)\n\ths.OnPropose(b4)\n}", "func (s *BasevhdlListener) EnterTolerance_aspect(ctx *Tolerance_aspectContext) {}", "func TestService_Handle_Inviter(t *testing.T) {\n\tmockStore := &mockstorage.MockStore{Store: make(map[string]mockstorage.DBEntry)}\n\tstoreProv := mockstorage.NewCustomMockStoreProvider(mockStore)\n\tk := newKMS(t, storeProv)\n\tprov := &protocol.MockProvider{\n\t\tStoreProvider: storeProv,\n\t\tServiceMap: map[string]interface{}{\n\t\t\tmediator.Coordination: &mockroute.MockMediatorSvc{},\n\t\t},\n\t\tCustomKMS: k,\n\t\tKeyTypeValue: kms.ED25519Type,\n\t\tKeyAgreementTypeValue: kms.X25519ECDHKWType,\n\t}\n\n\tctx := &context{\n\t\toutboundDispatcher: prov.OutboundDispatcher(),\n\t\tcrypto: &tinkcrypto.Crypto{},\n\t\tkms: k,\n\t\tkeyType: kms.ED25519Type,\n\t\tkeyAgreementType: kms.X25519ECDHKWType,\n\t}\n\n\tverPubKey, encPubKey := newSigningAndEncryptionDIDKeys(t, ctx)\n\n\tctx.vdRegistry = &mockvdr.MockVDRegistry{CreateValue: createDIDDocWithKey(verPubKey, encPubKey)}\n\n\tconnRec, err := connection.NewRecorder(prov)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, connRec)\n\n\tctx.connectionRecorder = connRec\n\n\tdoc, err := ctx.vdRegistry.Create(testMethod, nil)\n\trequire.NoError(t, err)\n\n\ts, err := New(prov)\n\trequire.NoError(t, err)\n\n\tactionCh := make(chan service.DIDCommAction, 10)\n\terr = s.RegisterActionEvent(actionCh)\n\trequire.NoError(t, err)\n\n\tstatusCh := make(chan service.StateMsg, 10)\n\terr = s.RegisterMsgEvent(statusCh)\n\trequire.NoError(t, err)\n\n\tcompletedFlag := make(chan struct{})\n\trespondedFlag := make(chan struct{})\n\n\tgo msgEventListener(t, statusCh, respondedFlag, completedFlag)\n\n\tgo func() { service.AutoExecuteActionEvent(actionCh) }()\n\n\tinvitation := &Invitation{\n\t\tType: InvitationMsgType,\n\t\tID: randomString(),\n\t\tLabel: \"Bob\",\n\t\tRecipientKeys: []string{verPubKey},\n\t\tServiceEndpoint: \"http://alice.agent.example.com:8081\",\n\t}\n\n\terr = ctx.connectionRecorder.SaveInvitation(invitation.ID, invitation)\n\trequire.NoError(t, err)\n\n\tthid := randomString()\n\n\t// Invitation was previously sent by Alice to Bob.\n\t// Bob now sends a did-exchange Invitation\n\tpayloadBytes, err := json.Marshal(\n\t\t&Request{\n\t\t\tType: RequestMsgType,\n\t\t\tID: thid,\n\t\t\tLabel: \"Bob\",\n\t\t\tThread: &decorator.Thread{\n\t\t\t\tPID: invitation.ID,\n\t\t\t},\n\t\t\tDID: doc.DIDDocument.ID,\n\t\t\tDocAttach: unsignedDocAttach(t, doc.DIDDocument),\n\t\t})\n\trequire.NoError(t, err)\n\tmsg, err := service.ParseDIDCommMsgMap(payloadBytes)\n\trequire.NoError(t, err)\n\t_, err = s.HandleInbound(msg, service.NewDIDCommContext(doc.DIDDocument.ID, \"\", nil))\n\trequire.NoError(t, err)\n\n\tselect {\n\tcase <-respondedFlag:\n\tcase <-time.After(2 * time.Second):\n\t\trequire.Fail(t, \"didn't receive post event responded\")\n\t}\n\t// Alice automatically sends exchange Response to Bob\n\t// Bob replies with an ACK\n\tpayloadBytes, err = json.Marshal(\n\t\t&model.Ack{\n\t\t\tType: AckMsgType,\n\t\t\tID: randomString(),\n\t\t\tStatus: \"OK\",\n\t\t\tThread: &decorator.Thread{ID: thid},\n\t\t})\n\trequire.NoError(t, err)\n\n\tdidMsg, err := service.ParseDIDCommMsgMap(payloadBytes)\n\trequire.NoError(t, err)\n\n\t_, err = s.HandleInbound(didMsg, service.NewDIDCommContext(doc.DIDDocument.ID, \"\", nil))\n\trequire.NoError(t, err)\n\n\tselect {\n\tcase <-completedFlag:\n\tcase <-time.After(2 * time.Second):\n\t\trequire.Fail(t, \"didn't receive post event complete\")\n\t}\n\n\tvalidateState(t, s, thid, findNamespace(AckMsgType), (&completed{}).Name())\n}", "func TestAcquireTargets(t *testing.T) {\n\ttype mockTarget struct {\n\t\tID string\n\t\tMessage string\n\t\tCreatedOn string\n\t}\n\n\tmockData := mockTarget{\n\t\tID: \"01EBP4DP4VECW8PHDJJFNEDVKE\",\n\t\tMessage: \"send a message\",\n\t\tCreatedOn: \"2020-06-25T16:23:37.720Z\",\n\t}\n\n\tres, err := NewClient().AcquireTargets(context.Background(), &pb.EventMessage{\n\t\tId: \"01EBP4DP4VECW8PHDJJFNEDVKE\",\n\t\tName: \"targets.acquired\",\n\t\tData: []*pb.TargetResponse{\n\t\t\t{\n\t\t\t\tId: \"01EBP4DP4VECW8PHDJJFNEDVKE\",\n\t\t\t\tMessage: mockData.Message,\n\t\t\t\tCreatedOn: mockData.CreatedOn,\n\t\t\t},\n\t\t},\n\t\tCreatedOn: \"2020-06-25T16:23:37.720Z\",\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"Test failed with err %v\", err)\n\t}\n\tt.Log(res)\n}", "func (suite *FeeTestSuite) TestUpdateFees() {\n\t// this helper function creates two CDPs with id 1 and 2 respectively, each with zero fees\n\tsuite.createCdps()\n\n\t// move the context forward in time so that cdps will have fees accumulate if CalculateFees is called\n\t// note - time must be moved forward by a sufficient amount in order for additional\n\t// fees to accumulate, in this example 600 seconds\n\toldtime := suite.ctx.BlockTime()\n\tsuite.ctx = suite.ctx.WithBlockTime(suite.ctx.BlockTime().Add(time.Second * 600))\n\terr := suite.keeper.UpdateFeesForAllCdps(suite.ctx, \"xrp\")\n\tsuite.NoError(err) // check that we don't have any error\n\n\t// cdp we expect fees to accumulate for\n\tcdp1, found := suite.keeper.GetCDP(suite.ctx, \"xrp\", 1)\n\tsuite.True(found)\n\t// check fees are not zero\n\t// check that the fees have been updated\n\tsuite.False(cdp1.AccumulatedFees.IsZero())\n\t// now check that we have the correct amount of fees overall (22 JPYX for this scenario)\n\tsuite.Equal(sdk.NewInt(22), cdp1.AccumulatedFees.Amount)\n\tsuite.Equal(suite.ctx.BlockTime(), cdp1.FeesUpdated)\n\t// cdp we expect fees to not accumulate for because of rounding to zero\n\tcdp2, found := suite.keeper.GetCDP(suite.ctx, \"xrp\", 2)\n\tsuite.True(found)\n\t// check fees are zero\n\tsuite.True(cdp2.AccumulatedFees.IsZero())\n\tsuite.Equal(oldtime, cdp2.FeesUpdated)\n}", "func TestSynchronizerIntegration(t *testing.T) {\n\ttestutil.SkipIfShort(t)\n\t//TODO REACTIVATE THIS AND see if it is working for future\n\ttestutil.SkipIfDisabled(t)\n\tdefer testutil.EnableDebugForMethod()()\n\n\ta := assert.New(t)\n\n\tnode1 := newTestClusterNode(t, testClusterNodeConfig{\n\t\tHttpListen: \"localhost:8094\",\n\t\tNodeID: 1,\n\t\tNodePort: 11004,\n\t\tRemotes: \"localhost:11004\",\n\t})\n\ta.NotNil(node1)\n\tdefer node1.cleanup(true)\n\n\ttime.Sleep(2 * time.Second)\n\n\tclient1, err := node1.client(\"client1\", 10, true)\n\ta.NoError(err)\n\n\tclient1.Send(syncTopic, \"nobody\", \"\")\n\tclient1.Send(syncTopic, \"nobody\", \"\")\n\tclient1.Send(syncTopic, \"nobody\", \"\")\n\n\ttime.Sleep(2 * time.Second)\n\n\tnode2 := newTestClusterNode(t, testClusterNodeConfig{\n\t\tHttpListen: \"localhost:8095\",\n\t\tNodeID: 2,\n\t\tNodePort: 11005,\n\t\tRemotes: \"localhost:11004\",\n\t})\n\ta.NotNil(node2)\n\tdefer node2.cleanup(true)\n\n\tclient2, err := node2.client(\"client2\", 10, true)\n\ta.NoError(err)\n\n\tcmd := &protocol.Cmd{\n\t\tName: protocol.CmdReceive,\n\t\tArg: syncTopic + \" -3\",\n\t}\n\tdoneC := make(chan struct{})\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase m := <-client2.Messages():\n\t\t\t\tlog.WithField(\"m\", m).Error(\"Message received from first cluster\")\n\t\t\tcase e := <-client2.Errors():\n\t\t\t\tlog.WithField(\"clientError\", e).Error(\"Client error\")\n\t\t\tcase status := <-client2.StatusMessages():\n\t\t\t\tlog.WithField(\"status\", status).Error(\"Client status messasge\")\n\t\t\tcase <-doneC:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\tlog.Error(string(cmd.Bytes()))\n\tclient2.WriteRawMessage(cmd.Bytes())\n\ttime.Sleep(10 * time.Second)\n\tclose(doneC)\n}", "func requestRandomnessAndAssertRandomWordsRequestedEvent(\n\tt *testing.T,\n\tvrfConsumerHandle vrfConsumerContract,\n\tconsumerOwner *bind.TransactOpts,\n\tkeyHash common.Hash,\n\tsubID uint64,\n\tnumWords uint32,\n\tcbGasLimit uint32,\n\tuni coordinatorV2Universe,\n) (*big.Int, uint64) {\n\tminRequestConfirmations := uint16(2)\n\t_, err := vrfConsumerHandle.TestRequestRandomness(\n\t\tconsumerOwner,\n\t\tkeyHash,\n\t\tsubID,\n\t\tminRequestConfirmations,\n\t\tcbGasLimit,\n\t\tnumWords,\n\t)\n\trequire.NoError(t, err)\n\n\tuni.backend.Commit()\n\n\titer, err := uni.rootContract.FilterRandomWordsRequested(nil, nil, []uint64{subID}, nil)\n\trequire.NoError(t, err, \"could not filter RandomWordsRequested events\")\n\n\tevents := []*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{}\n\tfor iter.Next() {\n\t\tevents = append(events, iter.Event)\n\t}\n\n\trequestID, err := vrfConsumerHandle.SRequestId(nil)\n\trequire.NoError(t, err)\n\n\tevent := events[len(events)-1]\n\trequire.Equal(t, event.RequestId, requestID, \"request ID in contract does not match request ID in log\")\n\trequire.Equal(t, keyHash.Bytes(), event.KeyHash[:], \"key hash of event (%s) and of request not equal (%s)\", hex.EncodeToString(event.KeyHash[:]), keyHash.String())\n\trequire.Equal(t, cbGasLimit, event.CallbackGasLimit, \"callback gas limit of event and of request not equal\")\n\trequire.Equal(t, minRequestConfirmations, event.MinimumRequestConfirmations, \"min request confirmations of event and of request not equal\")\n\trequire.Equal(t, numWords, event.NumWords, \"num words of event and of request not equal\")\n\n\treturn requestID, event.Raw.BlockNumber\n}", "func (a *ACS) countTruthyAgreements() int {\n\tn := 0\n\tfor _, ok := range a.bbaResults {\n\t\tif ok {\n\t\t\tn++\n\t\t}\n\t}\n\treturn n\n}", "func TestLeaderElectionReceiveMessages(t *testing.T) {\n\tp := newPaxos(&Config{ID: 1, NodeCount: 5})\n\n\tif p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected unset progress timer\")\n\t}\n\tif p.prepare != nil {\n\t\tt.Fatalf(\"expected nil *pb.Prepare\")\n\t}\n\tif exp, a := uint64(0), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d; found %d\", exp, a)\n\t}\n\tassertLeaderElectionState := func(expLastAttempted, expViewChanges int) {\n\t\tassertState(t, p, StateLeaderElection)\n\t\tif exp, a := uint64(expLastAttempted), p.lastAttempted; a != exp {\n\t\t\tt.Fatalf(\"expected last attempted view %d; found %d\", exp, a)\n\t\t}\n\t\tif exp, a := expViewChanges, len(p.viewChanges); a != exp {\n\t\t\tt.Fatalf(\"expected %d ViewChange message, found %d\", exp, a)\n\t\t}\n\t}\n\tassertLeaderElectionState(1, 1)\n\n\t// ViewChange message from node 0 should be applied successfully.\n\tvc := &pb.ViewChange{NodeId: 0, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// Replayed ViewChange message should be ignored. Idempotent.\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message from self should be ignored.\n\tvc = &pb.ViewChange{NodeId: 1, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message for past view should be ignored.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 0}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 2)\n\n\t// ViewChange message from node 2 should be applied successfully.\n\t// Leader election should complete, because quorum of 3 nodes achieved.\n\tvc = &pb.ViewChange{NodeId: 2, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 3)\n\tif exp, a := uint64(1), p.lastInstalled; a != exp {\n\t\tt.Fatalf(\"expected last installed view %d after leader election; found %d\", exp, a)\n\t}\n\tif !p.progressTimer.isSet() {\n\t\tt.Fatalf(\"expected progress timer to be set after completed leader election\")\n\t}\n\n\t// The rest of the ViewChange messages should be ignored.\n\tvc = &pb.ViewChange{NodeId: 3, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tvc = &pb.ViewChange{NodeId: 4, AttemptedView: 1}\n\tp.onViewChange(vc)\n\tassertLeaderElectionState(1, 3)\n}", "func (sm *State_Machine) FollTesting(t *testing.T) {\n\n\t//Creating a follower which has just joined the cluster.\n\tvar follTC TestCases\n\tfollTC.t = t\n\tvar cmdReq = []string{\"read test\", \"read cs733\"}\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:2|LoggIng:1|votedFor:0|commitInd:0|>>>\n\n\t/*Sending an apped request*/\n\tfollTC.req = Append{Data: []byte(cmdReq[0])}\n\tfollTC.respExp = Commit{Data: []byte(\"5000\"), Err: []byte(\"I'm not leader\")}\n\tsm.ClientCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/*Sending appendEntry request*/\n\t//Suppose leader and follower are at same Term.\n\tentries1 := Logg{Logg: []MyLogg{{1, \"read test\"}}}\n\tfollTC.req = AppEntrReq{Term: 1, LeaderId: 5000, PreLoggInd: 0, PreLoggTerm: 2, Logg: entries1, LeaderCom: 0}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 1, Succ: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 0}\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting LogStore\n\tfollTC.respExp = Alarm{T: 0}\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:1|LoggInd:1|votedFor:0|commitInd:0|lastApp:0|>>>\n\n\t//Sending Multiple entries\n\tentries2 := Logg{Logg: []MyLogg{{1, \"read test\"}, {1, \"read cloud\"}, {1, \"read cs733\"}}}\n\tfollTC.req = AppEntrReq{Term: 1, LeaderId: 5000, PreLoggInd: 0, PreLoggTerm: 1, Logg: entries2, LeaderCom: 1}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 1, Succ: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:1|LoggInd:3|votedFor:0|commitInd:1|lastApp:0|>>>\n\n\t//Suppose leader has higher Term than follower.\n\tentries3 := Logg{Logg: []MyLogg{{1, \"read cs733\"}, {2, \"delete test\"}}}\n\tfollTC.req = AppEntrReq{Term: 2, LeaderId: 5000, PreLoggInd: 2, PreLoggTerm: 1, Logg: entries3, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:2|LoggIng:4|votedFor:0|commitInd:2|lastApp:0|>>>\n\n\t//Suppose follower has higher Term than leader.\n\tentries4 := Logg{Logg: []MyLogg{{1, \"read cs733\"}, {2, \"delete test\"}}}\n\tfollTC.req = AppEntrReq{Term: 1, LeaderId: 5000, PreLoggInd: 2, PreLoggTerm: 2, Logg: entries4, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//Suppose prevIndex does not matches.\n\tentries4 = Logg{Logg: []MyLogg{{3, \"append test\"}, {3, \"cas test\"}}}\n\tfollTC.req = AppEntrReq{Term: 3, LeaderId: 5000, PreLoggInd: 5, PreLoggTerm: 3, Logg: entries4, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//Suppose prevIndex matches, but entry doesnt.\n\tentries4 = Logg{Logg: []MyLogg{{3, \"append test\"}, {3, \"cas test\"}}}\n\tfollTC.req = AppEntrReq{Term: 3, LeaderId: 5000, PreLoggInd: 3, PreLoggTerm: 3, Logg: entries4, LeaderCom: 2}\n\tfollTC.respExp = Send{PeerId: 5000, Event: AppEntrResp{Term: 2, Succ: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.respExp = Alarm{T: 200}\n\tfollTC.expect()\n\n\t//Everything is ok, but no entry.\n\tfollTC.req = AppEntrReq{Term: 3, LeaderId: 5000, PreLoggInd: 3, PreLoggTerm: 3, LeaderCom: 2}\n\tfollTC.respExp = Alarm{T: 200}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/*Sending vote request*/\n\t//sending vote request with lower Term.\n\tfollTC.req = VoteReq{Term: 1, CandId: 2000, PreLoggInd: 1, PreLoggTerm: 1}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 2, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//sending vote request with not updated Logg.\n\tfollTC.req = VoteReq{Term: 3, CandId: 2000, PreLoggInd: 1, PreLoggTerm: 1}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 2, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//sending vote request with not updated Logg.\n\tfollTC.req = VoteReq{Term: 3, CandId: 2000, PreLoggInd: 5, PreLoggTerm: 4}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 3, VoteGrant: true}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t//<<<|Id:1000|Status:follower|CurrTerm:3|LoggIng:4|votedFor:1|commitInd:2|lastApp:0|>>>\n\n\t//checking against duplicate vote rquest\n\tfollTC.req = VoteReq{Term: 3, CandId: 2000, PreLoggInd: 5, PreLoggTerm: 4}\n\tfollTC.respExp = Send{PeerId: 2000, Event: VoteResp{Term: 3, VoteGrant: false}}\n\tsm.NetCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.expect()\n\n\t/*Sending timeout*/\n\tfollTC.req = Timeout{}\n\tfollTC.respExp = Alarm{T: 150}\n\tsm.TimeoutCh <- follTC.req\n\tsm.EventProcess()\n\tfollTC.resp = <-sm.ActionCh //expecting alarm signal\n\tfollTC.expect()\n\tfollTC.resp = <-sm.ActionCh\n\tfollTC.respExp = Send{PeerId: 0, Event: VoteReq{Term: 4, CandId: 1000, PreLoggInd: 3, PreLoggTerm: 2}} //also the vote request\n\tfollTC.expect()\n\n}", "func TestChannelClientRollsBackPvtDataIfMvccReadConflict(t *testing.T) {\n\torgsContext := setupMultiOrgContext(t, mainSDK)\n\trequire.NoError(t, integration.EnsureChannelCreatedAndPeersJoined(t, mainSDK, orgChannelID, \"orgchannel.tx\", orgsContext))\n\t// private data collection used for test\n\tconst coll = \"collection1\"\n\t// collection key used for test\n\tconst key = \"collection_key\"\n\tccID := integration.GenerateExamplePvtID(true)\n\tcollConfig, err := newCollectionConfig(coll, \"OR('Org1MSP.member','Org2MSP.member','Org3MSP.member')\", 0, 2, 1000)\n\trequire.NoError(t, err)\n\trequire.NoError(t, integration.InstallExamplePvtChaincode(orgsContext, ccID))\n\trequire.NoError(t, integration.InstantiateExamplePvtChaincode(orgsContext, orgChannelID, ccID, \"OR('Org1MSP.member','Org2MSP.member','Org3MSP.member')\", collConfig))\n\tctxProvider := mainSDK.ChannelContext(orgChannelID, fabsdk.WithUser(org1User), fabsdk.WithOrg(org1Name))\n\tchClient, err := channel.New(ctxProvider)\n\trequire.NoError(t, err)\n\n\tvar errMtx sync.Mutex\n\terrs := multi.Errors{}\n\tvar wg sync.WaitGroup\n\n\t// test function; invokes a CC function that mutates the private data collection\n\tchangePvtData := func(amount int) {\n\t\tdefer wg.Done()\n\t\t_, err := chClient.Execute(\n\t\t\tchannel.Request{\n\t\t\t\tChaincodeID: ccID,\n\t\t\t\tFcn: \"addToInt\",\n\t\t\t\tArgs: [][]byte{[]byte(coll), []byte(key), []byte(strconv.Itoa(amount))},\n\t\t\t},\n\t\t)\n\t\tif err != nil {\n\t\t\terrMtx.Lock()\n\t\t\terrs = append(errs, err)\n\t\t\terrMtx.Unlock()\n\t\t\treturn\n\t\t}\n\t}\n\n\t// expected value at the end of the test\n\tconst expected = 10\n\n\twg.Add(2)\n\tgo changePvtData(expected)\n\tgo changePvtData(expected)\n\twg.Wait()\n\n\t// ensure the MVCC_READ_CONFLICT was reproduced\n\trequire.Truef(t, len(errs) > 0 && strings.Contains(errs[0].Error(), \"MVCC_READ_CONFLICT\"), \"could not reproduce MVCC_READ_CONFLICT\")\n\n\t// read current value of private data collection\n\tresp, err := chClient.Query(\n\t\tchannel.Request{\n\t\t\tChaincodeID: ccID,\n\t\t\tFcn: \"getprivate\",\n\t\t\tArgs: [][]byte{[]byte(coll), []byte(key)},\n\t\t},\n\t\tchannel.WithRetry(retry.TestRetryOpts),\n\t)\n\trequire.NoErrorf(t, err, \"error attempting to read private data\")\n\trequire.NotEmptyf(t, resp.Payload, \"reading private data returned empty response\")\n\n\tactual, err := strconv.Atoi(string(resp.Payload))\n\trequire.NoError(t, err)\n\n\tassert.Truef(t, actual == expected, \"Private data not rolled back during MVCC_READ_CONFLICT\")\n}", "func (suite *FeeTestSuite) TestUpdateFees() {\n\t// this helper function creates two CDPs with id 1 and 2 respectively, each with zero fees\n\tsuite.createCdps()\n\n\t// move the context forward in time so that cdps will have fees accumulate if CalculateFees is called\n\t// note - time must be moved forward by a sufficient amount in order for additional\n\t// fees to accumulate, in this example 600 seconds\n\toldtime := suite.ctx.BlockTime()\n\tsuite.ctx = suite.ctx.WithBlockTime(suite.ctx.BlockTime().Add(time.Second * 600))\n\terr := suite.keeper.UpdateFeesForAllCdps(suite.ctx, \"xrp\")\n\tsuite.NoError(err) // check that we don't have any error\n\n\t// cdp we expect fees to accumulate for\n\tcdp1, _ := suite.keeper.GetCDP(suite.ctx, \"xrp\", 1)\n\t// check fees are not zero\n\t// check that the fees have been updated\n\tsuite.False(cdp1.AccumulatedFees.Empty())\n\t// now check that we have the correct amount of fees overall (22 USDX for this scenario)\n\tsuite.Equal(sdk.NewInt(22), cdp1.AccumulatedFees.AmountOf(\"usdx\"))\n\tsuite.Equal(suite.ctx.BlockTime(), cdp1.FeesUpdated)\n\t// cdp we expect fees to not accumulate for because of rounding to zero\n\tcdp2, _ := suite.keeper.GetCDP(suite.ctx, \"xrp\", 2)\n\n\t// check fees are zero\n\tsuite.True(cdp2.AccumulatedFees.Empty())\n\tsuite.Equal(oldtime, cdp2.FeesUpdated)\n}", "func (suite *KeeperTestSuite) TestOnAcknowledgementPacket() {\n\tdata := types.NewFungibleTokenPacketData(prefixCoins2, testAddr1.String(), testAddr2.String())\n\n\tsuccessAck := types.FungibleTokenPacketAcknowledgement{\n\t\tSuccess: true,\n\t}\n\tfailedAck := types.FungibleTokenPacketAcknowledgement{\n\t\tSuccess: false,\n\t\tError: \"failed packet transfer\",\n\t}\n\n\ttestCases := []struct {\n\t\tmsg string\n\t\tack types.FungibleTokenPacketAcknowledgement\n\t\tmalleate func()\n\t\tsource bool\n\t\tsuccess bool // success of ack\n\t}{\n\t\t{\"success ack causes no-op\", successAck,\n\t\t\tfunc() {}, true, true},\n\t\t{\"successful refund from source chain\", failedAck,\n\t\t\tfunc() {\n\t\t\t\tescrow := types.GetEscrowAddress(testPort1, testChannel1)\n\t\t\t\t_, err := suite.chainA.App.BankKeeper.AddCoins(suite.chainA.GetContext(), escrow, sdk.NewCoins(sdk.NewCoin(\"atom\", sdk.NewInt(100))))\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t}, true, false},\n\t\t{\"successful refund from external chain\", failedAck,\n\t\t\tfunc() {\n\t\t\t\tdata.Amount = prefixCoins\n\t\t\t}, false, false},\n\t}\n\n\tpacket := channeltypes.NewPacket(data.GetBytes(), 1, testPort1, testChannel1, testPort2, testChannel2, 100, 0)\n\n\tfor i, tc := range testCases {\n\t\ttc := tc\n\t\ti := i\n\t\tsuite.Run(fmt.Sprintf(\"Case %s\", tc.msg), func() {\n\t\t\tsuite.SetupTest() // reset\n\n\t\t\ttc.malleate()\n\n\t\t\tvar denom string\n\t\t\tif tc.source {\n\t\t\t\tprefix := types.GetDenomPrefix(packet.GetDestPort(), packet.GetDestChannel())\n\t\t\t\tdenom = prefixCoins2[0].Denom[len(prefix):]\n\t\t\t} else {\n\t\t\t\tdenom = data.Amount[0].Denom\n\t\t\t}\n\n\t\t\tpreCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), testAddr1, denom)\n\n\t\t\terr := suite.chainA.App.TransferKeeper.OnAcknowledgementPacket(suite.chainA.GetContext(), packet, data, tc.ack)\n\t\t\tsuite.Require().NoError(err, \"valid test case %d failed: %s\", i, tc.msg)\n\n\t\t\tpostCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), testAddr1, denom)\n\t\t\tdeltaAmount := postCoin.Amount.Sub(preCoin.Amount)\n\n\t\t\tif tc.success {\n\t\t\t\tsuite.Require().Equal(sdk.ZeroInt(), deltaAmount, \"successful ack changed balance\")\n\t\t\t} else {\n\t\t\t\tsuite.Require().Equal(prefixCoins2[0].Amount, deltaAmount, \"failed ack did not trigger refund\")\n\t\t\t}\n\t\t})\n\t}\n}", "func TestCommitmentSpendValidation(t *testing.T) {\n\tt.Parallel()\n\n\t// In the modern network, all channels use the new tweakless format,\n\t// but we also need to support older nodes that want to open channels\n\t// with the legacy format, so we'll test spending in both scenarios.\n\tfor _, tweakless := range []bool{true, false} {\n\t\ttweakless := tweakless\n\t\tt.Run(fmt.Sprintf(\"tweak=%v\", tweakless), func(t *testing.T) {\n\t\t\ttestSpendValidation(t, tweakless)\n\t\t})\n\t}\n}", "func TestAcknowledgeMailHandler_ServeHTTP_OkReturned(t *testing.T) {\n\tmockedMailContext := new(MockedAcknowledgementMailContext)\n\tmockedMailContext.throwError = false\n\ttestee := AcknowledgeMailHandler{Logger: testhelpers.GetTestLogger(), MailContext: mockedMailContext}\n\n\tjsonData, _ := json.Marshal(getTestAcknowledgements())\n\n\treq, err := http.NewRequest(\"POST\", shared.AcknowledgmentPath, bytes.NewBuffer(jsonData))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trr := httptest.NewRecorder()\n\thandler := http.HandlerFunc(testee.ServeHTTP)\n\n\thandler.ServeHTTP(rr, req)\n\tassert.Equal(t, 200, rr.Code, \"Status code 200 should be returned\")\n\n\tfor idx, ackData := range getTestAcknowledgements() {\n\t\tactual := mockedMailContext.ReceivedAcks[idx]\n\t\tassert.Equal(t, ackData, actual, \"Acknowledgment should be delivered\")\n\t}\n}", "func sendAssertionAnswer(section section.WithSig, query section.Section, token token.Token, s *Server) {\n\twaitTime := 10 * time.Millisecond\n\tdeadline := time.Now().Add(waitTime).UnixNano()\n\ts.caches.PendingQueries.AddAnswerByToken(section, token, deadline)\n\ttime.Sleep(waitTime)\n\tsectionSenders, answers := s.caches.PendingQueries.GetAndRemoveByToken(token, deadline)\n\tfor _, ss := range sectionSenders {\n\t\tsendSections(answers, ss.Token, ss.Sender, s)\n\t}\n}", "func TestReactorBroadcastEvidence(t *testing.T) {\n\tnumPeers := 7\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\t// create a stateDB for all test suites (nodes)\n\tstateDBs := make([]sm.Store, numPeers)\n\tval := types.NewMockPV()\n\n\t// We need all validators saved for heights at least as high as we have\n\t// evidence for.\n\theight := int64(numEvidence) + 10\n\tfor i := 0; i < numPeers; i++ {\n\t\tstateDBs[i] = initializeValidatorState(ctx, t, val, height)\n\t}\n\n\trts := setup(ctx, t, stateDBs, 0)\n\n\trts.start(ctx, t)\n\n\t// Create a series of fixtures where each suite contains a reactor and\n\t// evidence pool. In addition, we mark a primary suite and the rest are\n\t// secondaries where each secondary is added as a peer via a PeerUpdate to the\n\t// primary. As a result, the primary will gossip all evidence to each secondary.\n\n\tprimary := rts.network.RandomNode()\n\tsecondaries := make([]*p2ptest.Node, 0, len(rts.network.NodeIDs())-1)\n\tsecondaryIDs := make([]types.NodeID, 0, cap(secondaries))\n\tfor id := range rts.network.Nodes {\n\t\tif id == primary.NodeID {\n\t\t\tcontinue\n\t\t}\n\n\t\tsecondaries = append(secondaries, rts.network.Nodes[id])\n\t\tsecondaryIDs = append(secondaryIDs, id)\n\t}\n\n\tevList := createEvidenceList(ctx, t, rts.pools[primary.NodeID], val, numEvidence)\n\n\t// Add each secondary suite (node) as a peer to the primary suite (node). This\n\t// will cause the primary to gossip all evidence to the secondaries.\n\tfor _, suite := range secondaries {\n\t\trts.peerChans[primary.NodeID] <- p2p.PeerUpdate{\n\t\t\tStatus: p2p.PeerStatusUp,\n\t\t\tNodeID: suite.NodeID,\n\t\t}\n\t}\n\n\t// Wait till all secondary suites (reactor) received all evidence from the\n\t// primary suite (node).\n\trts.waitForEvidence(t, evList, secondaryIDs...)\n\n\tfor _, pool := range rts.pools {\n\t\trequire.Equal(t, numEvidence, int(pool.Size()))\n\t}\n\n}", "func TestConfigChangeEvents(t *testing.T) {\n\tif err := testcert.GenerateCert(\"mail2.guerrillamail.com\", \"\", 365*24*time.Hour, false, 2048, \"P256\", \"./tests/\"); err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer func() {\n\t\tif err := deleteIfExists(\"../tests/mail2.guerrillamail.com.cert.pem\"); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tif err := deleteIfExists(\"../tests/mail2.guerrillamail.com.key.pem\"); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\toldconf := &AppConfig{}\n\tif err := oldconf.Load([]byte(configJsonA)); err != nil {\n\t\tt.Error(err)\n\t}\n\tlogger, _ := log.GetLogger(oldconf.LogFile, oldconf.LogLevel)\n\tbcfg := backends.BackendConfig{\"log_received_mails\": true}\n\tbackend, err := backends.New(bcfg, logger)\n\tif err != nil {\n\t\tt.Error(\"cannot create backend\", err)\n\t}\n\tapp, err := New(oldconf, backend, logger)\n\tif err != nil {\n\t\tt.Error(\"cannot create daemon\", err)\n\t}\n\t// simulate timestamp change\n\n\ttime.Sleep(time.Second + time.Millisecond*500)\n\tif err := os.Chtimes(oldconf.Servers[1].TLS.PrivateKeyFile, time.Now(), time.Now()); err != nil {\n\t\tt.Error(err)\n\t}\n\tif err := os.Chtimes(oldconf.Servers[1].TLS.PublicKeyFile, time.Now(), time.Now()); err != nil {\n\t\tt.Error(err)\n\t}\n\tnewconf := &AppConfig{}\n\tif err := newconf.Load([]byte(configJsonB)); err != nil {\n\t\tt.Error(err)\n\t}\n\tnewconf.Servers[0].LogFile = log.OutputOff.String() // test for log file change\n\tnewconf.LogLevel = log.InfoLevel.String()\n\tnewconf.LogFile = \"off\"\n\texpectedEvents := map[Event]bool{\n\t\tEventConfigPidFile: false,\n\t\tEventConfigLogFile: false,\n\t\tEventConfigLogLevel: false,\n\t\tEventConfigAllowedHosts: false,\n\t\tEventConfigServerNew: false, // 127.0.0.1:4654 will be added\n\t\tEventConfigServerRemove: false, // 127.0.0.1:9999 server removed\n\t\tEventConfigServerStop: false, // 127.0.0.1:3333: server (disabled)\n\t\tEventConfigServerLogFile: false, // 127.0.0.1:2526\n\t\tEventConfigServerLogReopen: false, // 127.0.0.1:2527\n\t\tEventConfigServerTimeout: false, // 127.0.0.1:2526 timeout\n\t\t//\"server_change:tls_config\": false, // 127.0.0.1:2526\n\t\tEventConfigServerMaxClients: false, // 127.0.0.1:2526\n\t\tEventConfigServerTLSConfig: false, // 127.0.0.1:2527 timestamp changed on certificates\n\t}\n\ttoUnsubscribe := map[Event]func(c *AppConfig){}\n\ttoUnsubscribeSrv := map[Event]func(c *ServerConfig){}\n\n\tfor event := range expectedEvents {\n\t\t// Put in anon func since range is overwriting event\n\t\tfunc(e Event) {\n\t\t\tif strings.Contains(e.String(), \"config_change\") {\n\t\t\t\tf := func(c *AppConfig) {\n\t\t\t\t\texpectedEvents[e] = true\n\t\t\t\t}\n\t\t\t\t_ = app.Subscribe(event, f)\n\t\t\t\ttoUnsubscribe[event] = f\n\t\t\t} else {\n\t\t\t\t// must be a server config change then\n\t\t\t\tf := func(c *ServerConfig) {\n\t\t\t\t\texpectedEvents[e] = true\n\t\t\t\t}\n\t\t\t\t_ = app.Subscribe(event, f)\n\t\t\t\ttoUnsubscribeSrv[event] = f\n\t\t\t}\n\n\t\t}(event)\n\t}\n\n\t// emit events\n\tnewconf.EmitChangeEvents(oldconf, app)\n\t// unsubscribe\n\tfor unevent, unfun := range toUnsubscribe {\n\t\t_ = app.Unsubscribe(unevent, unfun)\n\t}\n\tfor unevent, unfun := range toUnsubscribeSrv {\n\t\t_ = app.Unsubscribe(unevent, unfun)\n\t}\n\tfor event, val := range expectedEvents {\n\t\tif val == false {\n\t\t\tt.Error(\"Did not fire config change event:\", event)\n\t\t\tt.FailNow()\n\t\t}\n\t}\n\n\t// don't forget to reset\n\tif err := os.Truncate(oldconf.LogFile, 0); err != nil {\n\t\tt.Error(err)\n\t}\n}", "func TestAutoRevocations(t *testing.T) {\n\tt.Parallel()\n\n\t// Use a set of test chain parameters which allow for quicker vote\n\t// activation as compared to various existing network params.\n\tparams := quickVoteActivationParams()\n\n\t// Clone the parameters so they can be mutated, find the correct\n\t// deployment for the automatic ticket revocations agenda, and, finally,\n\t// ensure it is always available to vote by removing the time constraints to\n\t// prevent test failures when the real expiration time passes.\n\tconst voteID = chaincfg.VoteIDAutoRevocations\n\tparams = cloneParams(params)\n\tversion, deployment := findDeployment(t, params, voteID)\n\tremoveDeploymentTimeConstraints(deployment)\n\n\t// Shorter versions of useful params for convenience.\n\tcoinbaseMaturity := params.CoinbaseMaturity\n\tstakeValidationHeight := params.StakeValidationHeight\n\truleChangeInterval := int64(params.RuleChangeActivationInterval)\n\n\t// Create a test harness initialized with the genesis block as the tip.\n\tg := newChaingenHarness(t, params)\n\n\t// replaceAutoRevocationsVersions is a munge function which modifies the\n\t// provided block by replacing the block, stake, vote, and revocation\n\t// transaction versions with the versions associated with the automatic\n\t// ticket revocations deployment.\n\treplaceAutoRevocationsVersions := func(b *wire.MsgBlock) {\n\t\tchaingen.ReplaceBlockVersion(int32(version))(b)\n\t\tchaingen.ReplaceStakeVersion(version)(b)\n\t\tchaingen.ReplaceVoteVersions(version)(b)\n\t\tchaingen.ReplaceRevocationVersions(stake.TxVersionAutoRevocations)(b)\n\t}\n\n\t// ---------------------------------------------------------------------\n\t// Generate and accept enough blocks with the appropriate vote bits set to\n\t// reach one block prior to the automatic ticket revocations agenda becoming\n\t// active.\n\t// ---------------------------------------------------------------------\n\n\tg.AdvanceToStakeValidationHeight()\n\tg.AdvanceFromSVHToActiveAgendas(voteID)\n\tactiveAgendaHeight := uint32(stakeValidationHeight + ruleChangeInterval*3 - 1)\n\tg.AssertTipHeight(activeAgendaHeight)\n\n\t// Ensure the automatic ticket revocations agenda is active.\n\ttipHash := &g.chain.BestSnapshot().Hash\n\tgotActive, err := g.chain.IsAutoRevocationsAgendaActive(tipHash)\n\tif err != nil {\n\t\tt.Fatalf(\"error checking auto revocations agenda status: %v\", err)\n\t}\n\tif !gotActive {\n\t\tt.Fatal(\"expected auto revocations agenda to be active\")\n\t}\n\n\t// ---------------------------------------------------------------------\n\t// Generate enough blocks to have a known distance to the first mature\n\t// coinbase outputs for all tests that follow. These blocks continue to\n\t// purchase tickets to avoid running out of votes.\n\t//\n\t// ... -> bsv# -> bbm0 -> bbm1 -> ... -> bbm#\n\t// ---------------------------------------------------------------------\n\n\tfor i := uint16(0); i < coinbaseMaturity; i++ {\n\t\touts := g.OldestCoinbaseOuts()\n\t\tblockName := fmt.Sprintf(\"bbm%d\", i)\n\t\tg.NextBlock(blockName, nil, outs[1:], replaceAutoRevocationsVersions)\n\t\tg.SaveTipCoinbaseOuts()\n\t\tg.AcceptTipBlock()\n\t}\n\tg.AssertTipHeight(activeAgendaHeight + uint32(coinbaseMaturity))\n\n\t// Collect spendable outputs into two different slices. The outs slice is\n\t// intended to be used for regular transactions that spend from the output,\n\t// while the ticketOuts slice is intended to be used for stake ticket\n\t// purchases.\n\tvar outs []*chaingen.SpendableOut\n\tvar ticketOuts [][]chaingen.SpendableOut\n\tfor i := uint16(0); i < coinbaseMaturity; i++ {\n\t\tcoinbaseOuts := g.OldestCoinbaseOuts()\n\t\touts = append(outs, &coinbaseOuts[0])\n\t\tticketOuts = append(ticketOuts, coinbaseOuts[1:])\n\t}\n\n\t// Create a block that misses a vote and does not contain a revocation for\n\t// that missed vote.\n\t//\n\t// ...\n\t// \\-> b1(0)\n\tstartTip := g.TipName()\n\tg.NextBlock(\"b1\", outs[0], ticketOuts[0], g.ReplaceWithNVotes(4),\n\t\treplaceAutoRevocationsVersions)\n\tg.AssertTipNumRevocations(0)\n\tg.RejectTipBlock(ErrNoMissedTicketRevocation)\n\n\t// Create a block that misses a vote and contains a version 1 revocation\n\t// transaction.\n\t//\n\t// ...\n\t// \\-> b2(0)\n\tg.SetTip(startTip)\n\tg.NextBlock(\"b2\", outs[0], ticketOuts[0], g.ReplaceWithNVotes(4),\n\t\tg.CreateRevocationsForMissedTickets(), replaceAutoRevocationsVersions,\n\t\tchaingen.ReplaceRevocationVersions(1))\n\tg.AssertTipNumRevocations(1)\n\tg.RejectTipBlock(ErrInvalidRevocationTxVersion)\n\n\t// Create a block that misses a vote and contains a revocation with a\n\t// non-zero fee.\n\t//\n\t// ...\n\t// \\-> b3(0)\n\tg.SetTip(startTip)\n\tg.NextBlock(\"b3\", outs[0], ticketOuts[0], g.ReplaceWithNVotes(4),\n\t\tg.CreateRevocationsForMissedTickets(), replaceAutoRevocationsVersions,\n\t\tfunc(b *wire.MsgBlock) {\n\t\t\tfor _, stx := range b.STransactions {\n\t\t\t\tif !stake.IsSSRtx(stx) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Decrement the first output value to create a non-zero fee and\n\t\t\t\t// return so that only a single revocation transaction is\n\t\t\t\t// modified.\n\t\t\t\tstx.TxOut[0].Value--\n\t\t\t\treturn\n\t\t\t}\n\t\t})\n\tg.AssertTipNumRevocations(1)\n\t// Note that this will fail with ErrRegTxCreateStakeOut rather than hitting\n\t// the later error case of ErrBadPayeeValue since a revocation with a\n\t// non-zero fee will not be identified as a revocation if the automatic\n\t// ticket revocations agenda is active.\n\tg.RejectTipBlock(ErrRegTxCreateStakeOut)\n\n\t// Create a valid block that misses multiple votes and contains revocation\n\t// transactions for those votes.\n\t//\n\t// ... -> b4(0)\n\tg.SetTip(startTip)\n\tg.NextBlock(\"b4\", outs[0], ticketOuts[0], g.ReplaceWithNVotes(3),\n\t\tg.CreateRevocationsForMissedTickets(), replaceAutoRevocationsVersions)\n\tg.AssertTipNumRevocations(2)\n\tg.AcceptTipBlock()\n\n\t// Create a slice of the ticket hashes that revocations spent in the tip\n\t// block that was just connected.\n\trevocationTicketHashes := make([]chainhash.Hash, 0, params.TicketsPerBlock)\n\tfor _, stx := range g.Tip().STransactions {\n\t\t// Append revocation ticket hashes.\n\t\tif stake.IsSSRtx(stx) {\n\t\t\tticketHash := stx.TxIn[0].PreviousOutPoint.Hash\n\t\t\trevocationTicketHashes = append(revocationTicketHashes, ticketHash)\n\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t// Validate that the revocations are now in the revoked ticket treap in the\n\t// ticket database.\n\ttipHash = &g.chain.BestSnapshot().Hash\n\tblockNode := g.chain.index.LookupNode(tipHash)\n\tstakeNode, err := g.chain.fetchStakeNode(blockNode)\n\tif err != nil {\n\t\tt.Fatalf(\"error fetching stake node: %v\", err)\n\t}\n\tfor _, revocationTicketHash := range revocationTicketHashes {\n\t\tif !stakeNode.ExistsRevokedTicket(revocationTicketHash) {\n\t\t\tt.Fatalf(\"expected ticket %v to exist in the revoked ticket treap\",\n\t\t\t\trevocationTicketHash)\n\t\t}\n\t}\n\n\t// Invalidate the previously connected block so that it is disconnected.\n\tg.InvalidateBlockAndExpectTip(\"b4\", nil, startTip)\n\n\t// Validate that the revocations from the disconnected block are now back in\n\t// the live ticket treap in the ticket database.\n\ttipHash = &g.chain.BestSnapshot().Hash\n\tblockNode = g.chain.index.LookupNode(tipHash)\n\tstakeNode, err = g.chain.fetchStakeNode(blockNode)\n\tif err != nil {\n\t\tt.Fatalf(\"error fetching stake node: %v\", err)\n\t}\n\tfor _, revocationTicketHash := range revocationTicketHashes {\n\t\tif !stakeNode.ExistsLiveTicket(revocationTicketHash) {\n\t\t\tt.Fatalf(\"expected ticket %v to exist in the live ticket treap\",\n\t\t\t\trevocationTicketHash)\n\t\t}\n\t}\n}", "func (s *Service) AttestAndScheduleAggregate(ctx context.Context, data interface{}) {\n\tstarted := time.Now()\n\tduty, ok := data.(*attester.Duty)\n\tif !ok {\n\t\tlog.Error().Msg(\"Passed invalid data\")\n\t\treturn\n\t}\n\tlog := log.With().Uint64(\"slot\", uint64(duty.Slot())).Logger()\n\n\tattestations, err := s.attester.Attest(ctx, duty)\n\tif err != nil {\n\t\tlog.Warn().Err(err).Msg(\"Failed to attest\")\n\t\treturn\n\t}\n\tlog.Trace().Dur(\"elapsed\", time.Since(started)).Msg(\"Attested\")\n\n\tif len(attestations) == 0 || attestations[0].Data == nil {\n\t\tlog.Debug().Msg(\"No attestations; nothing to aggregate\")\n\t\treturn\n\t}\n\n\tepoch := s.chainTimeService.SlotToEpoch(duty.Slot())\n\ts.subscriptionInfosMutex.Lock()\n\tsubscriptionInfoMap, exists := s.subscriptionInfos[epoch]\n\ts.subscriptionInfosMutex.Unlock()\n\tif !exists {\n\t\tlog.Debug().\n\t\t\tUint64(\"epoch\", uint64(epoch)).\n\t\t\tMsg(\"No subscription info for this epoch; not aggregating\")\n\t\treturn\n\t}\n\n\tfor _, attestation := range attestations {\n\t\tlog := log.With().Uint64(\"attestation_slot\", uint64(attestation.Data.Slot)).Uint64(\"committee_index\", uint64(attestation.Data.Index)).Logger()\n\t\tslotInfoMap, exists := subscriptionInfoMap[attestation.Data.Slot]\n\t\tif !exists {\n\t\t\tlog.Debug().Msg(\"No slot info; not aggregating\")\n\t\t\tcontinue\n\t\t}\n\t\t// Do not schedule aggregations for past slots.\n\t\tif attestation.Data.Slot < s.chainTimeService.CurrentSlot() {\n\t\t\tlog.Debug().Uint64(\"current_slot\", uint64(s.chainTimeService.CurrentSlot())).Msg(\"Aggregation in the past; not scheduling\")\n\t\t\tcontinue\n\t\t}\n\t\tinfo, exists := slotInfoMap[attestation.Data.Index]\n\t\tif !exists {\n\t\t\tlog.Debug().Uint64(\"committee_index\", uint64(attestation.Data.Index)).Msg(\"No committee info; not aggregating\")\n\t\t\tcontinue\n\t\t}\n\t\tif info.IsAggregator {\n\t\t\taccounts, err := s.validatingAccountsProvider.ValidatingAccountsForEpochByIndex(ctx, epoch, []phase0.ValidatorIndex{info.Duty.ValidatorIndex})\n\t\t\tif err != nil {\n\t\t\t\t// Don't return here; we want to try to set up as many aggregator jobs as possible.\n\t\t\t\tlog.Error().Err(err).Msg(\"Failed to obtain accounts\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(accounts) == 0 {\n\t\t\t\t// Don't return here; we want to try to set up as many aggregator jobs as possible.\n\t\t\t\tlog.Error().Msg(\"Failed to obtain account of attester\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tattestationDataRoot, err := attestation.Data.HashTreeRoot()\n\t\t\tif err != nil {\n\t\t\t\t// Don't return here; we want to try to set up as many aggregator jobs as possible.\n\t\t\t\tlog.Error().Err(err).Msg(\"Failed to obtain hash tree root of attestation\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\taggregatorDuty := &attestationaggregator.Duty{\n\t\t\t\tSlot: info.Duty.Slot,\n\t\t\t\tAttestationDataRoot: attestationDataRoot,\n\t\t\t\tValidatorIndex: info.Duty.ValidatorIndex,\n\t\t\t\tSlotSignature: info.Signature,\n\t\t\t}\n\t\t\tif err := s.scheduler.ScheduleJob(ctx,\n\t\t\t\tfmt.Sprintf(\"Beacon block attestation aggregation for slot %d committee %d\", attestation.Data.Slot, attestation.Data.Index),\n\t\t\t\ts.chainTimeService.StartOfSlot(attestation.Data.Slot).Add(s.slotDuration*2/3),\n\t\t\t\ts.attestationAggregator.Aggregate,\n\t\t\t\taggregatorDuty,\n\t\t\t); err != nil {\n\t\t\t\t// Don't return here; we want to try to set up as many aggregator jobs as possible.\n\t\t\t\tlog.Error().Err(err).Msg(\"Failed to schedule beacon block attestation aggregation job\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// We are set up as an aggregator for this slot and committee. It is possible that another validator has also been\n\t\t\t// assigned as an aggregator, but we're already carrying out the task so do not need to go any further.\n\t\t\treturn\n\t\t}\n\t}\n}", "func TestCommitterSuccess(t *testing.T) {\n\te := []*transformer.Envelope{\n\t\t&transformer.Envelope{},\n\t\t&transformer.Envelope{},\n\t\t&transformer.Envelope{},\n\t}\n\n\tok := false\n\tc := NewCommitter(&dumbWriter{}, func(envs []*transformer.Envelope) error {\n\t\tok = len(envs) == len(e)\n\t\tfor i := range e {\n\t\t\tok = ok && (e[i] == envs[i])\n\t\t}\n\t\treturn nil\n\t})\n\n\terr := c.Write(e...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error %v\", err)\n\t}\n\tif !ok {\n\t\tt.Fatal(\"commit callback not invoked correctly\")\n\t}\n}", "func TestPutReplyInfo(t *testing.T) {\n\tConvey(\"TestPutReplyInfo put add reply\", t, WithService(func(s *Service) {\n\t\tSo(s.PutReplyInfo(context.TODO(), &model.ReplyEvent{\n\t\t\tMid: testReplyMid,\n\t\t\tAction: model.EventAdd,\n\t\t\tReply: &repmol.Reply{\n\t\t\t\tMid: testRefReplyMid,\n\t\t\t},\n\t\t}), ShouldBeNil)\n\t}))\n\tConvey(\"TestPutReplyInfo put add reply\", t, WithService(func(s *Service) {\n\t\tSo(s.PutReplyInfo(context.TODO(), &model.ReplyEvent{\n\t\t\tMid: testReplyMid,\n\t\t\tAction: model.EventLike,\n\t\t\tReply: &repmol.Reply{\n\t\t\t\tMid: testRefReplyMid,\n\t\t\t},\n\t\t}), ShouldBeNil)\n\t\tSo(s.PutReplyInfo(context.TODO(), &model.ReplyEvent{\n\t\t\tMid: testReplyMid,\n\t\t\tAction: model.EventLike,\n\t\t\tReply: &repmol.Reply{\n\t\t\t\tMid: testRefReplyMid,\n\t\t\t},\n\t\t}), ShouldBeNil)\n\t}))\n\tConvey(\"TestPutReplyInfo put add reply\", t, WithService(func(s *Service) {\n\t\tSo(s.PutReplyInfo(context.TODO(), &model.ReplyEvent{\n\t\t\tMid: testReplyMid,\n\t\t\tAction: model.EventLikeCancel,\n\t\t\tReply: &repmol.Reply{\n\t\t\t\tMid: testRefReplyMid,\n\t\t\t},\n\t\t}), ShouldBeNil)\n\t}))\n\tConvey(\"TestPutReplyInfo put hate\", t, WithService(func(s *Service) {\n\t\tSo(s.PutReplyInfo(context.TODO(), &model.ReplyEvent{\n\t\t\tMid: testReplyMid,\n\t\t\tAction: model.EventHate,\n\t\t\tReply: &repmol.Reply{\n\t\t\t\tMid: testRefReplyMid,\n\t\t\t},\n\t\t}), ShouldBeNil)\n\t}))\n\tConvey(\"TestPutReplyInfo put hate cancel\", t, WithService(func(s *Service) {\n\t\tSo(s.PutReplyInfo(context.TODO(), &model.ReplyEvent{\n\t\t\tMid: testReplyMid,\n\t\t\tAction: model.EventHateCancel,\n\t\t\tReply: &repmol.Reply{\n\t\t\t\tMid: testRefReplyMid,\n\t\t\t},\n\t\t}), ShouldBeNil)\n\t}))\n}", "func (et *explorerTester) testConsensusUpdates(t *testing.T) {\n\t// 20 here is arbitrary\n\tfor i := types.BlockHeight(0); i < 20; i++ {\n\t\tb, _ := et.miner.FindBlock()\n\t\terr := et.cs.AcceptBlock(b)\n\t\tif err != nil {\n\t\t\tet.t.Fatal(err)\n\t\t}\n\t}\n}", "func TestReactorBroadcastEvidence_Lagging(t *testing.T) {\n\tval := types.NewMockPV()\n\theight1 := int64(numEvidence) + 10\n\theight2 := int64(numEvidence) / 2\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\t// stateDB1 is ahead of stateDB2, where stateDB1 has all heights (1-20) and\n\t// stateDB2 only has heights 1-5.\n\tstateDB1 := initializeValidatorState(ctx, t, val, height1)\n\tstateDB2 := initializeValidatorState(ctx, t, val, height2)\n\n\trts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}, 100)\n\trts.start(ctx, t)\n\n\tprimary := rts.nodes[0]\n\tsecondary := rts.nodes[1]\n\n\t// Send a list of valid evidence to the first reactor's, the one that is ahead,\n\t// evidence pool.\n\tevList := createEvidenceList(ctx, t, rts.pools[primary.NodeID], val, numEvidence)\n\n\t// Add each secondary suite (node) as a peer to the primary suite (node). This\n\t// will cause the primary to gossip all evidence to the secondaries.\n\trts.peerChans[primary.NodeID] <- p2p.PeerUpdate{\n\t\tStatus: p2p.PeerStatusUp,\n\t\tNodeID: secondary.NodeID,\n\t}\n\n\t// only ones less than the peers height should make it through\n\trts.waitForEvidence(t, evList[:height2], secondary.NodeID)\n\n\trequire.Equal(t, numEvidence, int(rts.pools[primary.NodeID].Size()))\n\trequire.Equal(t, int(height2), int(rts.pools[secondary.NodeID].Size()))\n}", "func TestClientSpacesSummaryJoinRules(t *testing.T) {\n\tdeployment := Deploy(t, b.BlueprintOneToOneRoom)\n\tdefer deployment.Destroy(t)\n\n\t// create the rooms\n\talice := deployment.Client(t, \"hs1\", \"@alice:hs1\")\n\troot := alice.CreateRoom(t, map[string]interface{}{\n\t\t\"preset\": \"public_chat\",\n\t\t\"name\": \"Root\",\n\t\t\"creation_content\": map[string]interface{}{\n\t\t\t\"type\": \"m.space\",\n\t\t},\n\t})\n\tr1 := alice.CreateRoom(t, map[string]interface{}{\n\t\t\"preset\": \"private_chat\",\n\t\t\"name\": \"R1\",\n\t})\n\tss1 := alice.CreateRoom(t, map[string]interface{}{\n\t\t\"preset\": \"private_chat\",\n\t\t\"name\": \"Sub-Space 1\",\n\t\t\"creation_content\": map[string]interface{}{\n\t\t\t\"type\": \"m.space\",\n\t\t},\n\t})\n\tr2 := alice.CreateRoom(t, map[string]interface{}{\n\t\t\"preset\": \"public_chat\",\n\t\t\"name\": \"R2\",\n\t\t\"initial_state\": []map[string]interface{}{\n\t\t\t{\n\t\t\t\t\"type\": \"m.room.history_visibility\",\n\t\t\t\t\"state_key\": \"\",\n\t\t\t\t\"content\": map[string]string{\n\t\t\t\t\t\"history_visibility\": \"world_readable\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n\tr3 := alice.CreateRoom(t, map[string]interface{}{\n\t\t\"preset\": \"private_chat\",\n\t\t\"name\": \"R3\",\n\t})\n\n\t// create the links\n\trootToR1 := eventKey(root, r1, spaceChildEventType)\n\talice.SendEventSynced(t, root, b.Event{\n\t\tType: spaceChildEventType,\n\t\tStateKey: &r1,\n\t\tContent: map[string]interface{}{\n\t\t\t\"via\": []string{\"hs1\"},\n\t\t},\n\t})\n\trootToSS1 := eventKey(root, ss1, spaceChildEventType)\n\talice.SendEventSynced(t, root, b.Event{\n\t\tType: spaceChildEventType,\n\t\tStateKey: &ss1,\n\t\tContent: map[string]interface{}{\n\t\t\t\"via\": []string{\"hs1\"},\n\t\t},\n\t})\n\tss1ToR2 := eventKey(ss1, r2, spaceChildEventType)\n\talice.SendEventSynced(t, ss1, b.Event{\n\t\tType: spaceChildEventType,\n\t\tStateKey: &r2,\n\t\tContent: map[string]interface{}{\n\t\t\t\"via\": []string{\"hs1\"},\n\t\t},\n\t})\n\tss1ToR3 := eventKey(ss1, r3, spaceChildEventType)\n\talice.SendEventSynced(t, ss1, b.Event{\n\t\tType: spaceChildEventType,\n\t\tStateKey: &r3,\n\t\tContent: map[string]interface{}{\n\t\t\t\"via\": []string{\"hs1\"},\n\t\t},\n\t})\n\n\t// Querying is done by bob who is not yet in any of the rooms.\n\tbob := deployment.Client(t, \"hs1\", \"@bob:hs1\")\n\tbob.JoinRoom(t, root, []string{\"hs1\"})\n\n\tres := bob.MustDo(t, \"GET\", []string{\"_matrix\", \"client\", \"unstable\", \"org.matrix.msc2946\", \"rooms\", root, \"spaces\"}, nil)\n\tmust.MatchResponse(t, res, match.HTTPResponse{\n\t\tJSON: []match.JSON{\n\t\t\tmatch.JSONCheckOff(\"rooms\", []interface{}{\n\t\t\t\troot,\n\t\t\t}, func(r gjson.Result) interface{} {\n\t\t\t\treturn r.Get(\"room_id\").Str\n\t\t\t}, nil),\n\t\t\tmatch.JSONCheckOff(\"events\", []interface{}{\n\t\t\t\trootToR1, rootToSS1,\n\t\t\t}, func(r gjson.Result) interface{} {\n\t\t\t\treturn eventKey(r.Get(\"room_id\").Str, r.Get(\"state_key\").Str, r.Get(\"type\").Str)\n\t\t\t}, nil),\n\t\t},\n\t})\n\tres = bob.MustDo(t, \"GET\", []string{\"_matrix\", \"client\", \"unstable\", \"org.matrix.msc2946\", \"rooms\", root, \"hierarchy\"}, nil)\n\tmust.MatchResponse(t, res, match.HTTPResponse{\n\t\tJSON: []match.JSON{\n\t\t\tmatch.JSONCheckOff(\"rooms\", []interface{}{\n\t\t\t\troot,\n\t\t\t}, func(r gjson.Result) interface{} {\n\t\t\t\treturn r.Get(\"room_id\").Str\n\t\t\t}, nil),\n\t\t\tmatch.JSONCheckOff(\"rooms.#.children_state|@flatten\", []interface{}{\n\t\t\t\trootToR1, rootToSS1,\n\t\t\t}, func(r gjson.Result) interface{} {\n\t\t\t\treturn eventKey(r.Get(\"room_id\").Str, r.Get(\"state_key\").Str, r.Get(\"type\").Str)\n\t\t\t}, nil),\n\t\t},\n\t})\n\n\t// Invite to R1 and R3, querying again should only show R1 (since SS1 is not visible).\n\talice.InviteRoom(t, r1, bob.UserID)\n\talice.InviteRoom(t, r3, bob.UserID)\n\n\tres = bob.MustDo(t, \"GET\", []string{\"_matrix\", \"client\", \"unstable\", \"org.matrix.msc2946\", \"rooms\", root, \"spaces\"}, nil)\n\tmust.MatchResponse(t, res, match.HTTPResponse{\n\t\tJSON: []match.JSON{\n\t\t\tmatch.JSONCheckOff(\"rooms\", []interface{}{\n\t\t\t\troot, r1,\n\t\t\t}, func(r gjson.Result) interface{} {\n\t\t\t\treturn r.Get(\"room_id\").Str\n\t\t\t}, nil),\n\t\t\tmatch.JSONCheckOff(\"events\", []interface{}{\n\t\t\t\trootToR1, rootToSS1,\n\t\t\t}, func(r gjson.Result) interface{} {\n\t\t\t\treturn eventKey(r.Get(\"room_id\").Str, r.Get(\"state_key\").Str, r.Get(\"type\").Str)\n\t\t\t}, nil),\n\t\t},\n\t})\n\tres = bob.MustDo(t, \"GET\", []string{\"_matrix\", \"client\", \"unstable\", \"org.matrix.msc2946\", \"rooms\", root, \"hierarchy\"}, nil)\n\tmust.MatchResponse(t, res, match.HTTPResponse{\n\t\tJSON: []match.JSON{\n\t\t\tmatch.JSONCheckOff(\"rooms\", []interface{}{\n\t\t\t\troot, r1,\n\t\t\t}, func(r gjson.Result) interface{} {\n\t\t\t\treturn r.Get(\"room_id\").Str\n\t\t\t}, nil),\n\t\t\tmatch.JSONCheckOff(\"rooms.#.children_state|@flatten\", []interface{}{\n\t\t\t\trootToR1, rootToSS1,\n\t\t\t}, func(r gjson.Result) interface{} {\n\t\t\t\treturn eventKey(r.Get(\"room_id\").Str, r.Get(\"state_key\").Str, r.Get(\"type\").Str)\n\t\t\t}, nil),\n\t\t},\n\t})\n\n\t// Invite to SS1 and it now appears, as well as the rooms under it.\n\talice.InviteRoom(t, ss1, bob.UserID)\n\n\tres = bob.MustDo(t, \"GET\", []string{\"_matrix\", \"client\", \"unstable\", \"org.matrix.msc2946\", \"rooms\", root, \"spaces\"}, nil)\n\tmust.MatchResponse(t, res, match.HTTPResponse{\n\t\tJSON: []match.JSON{\n\t\t\tmatch.JSONCheckOff(\"rooms\", []interface{}{\n\t\t\t\troot, r1, ss1, r2, r3,\n\t\t\t}, func(r gjson.Result) interface{} {\n\t\t\t\treturn r.Get(\"room_id\").Str\n\t\t\t}, nil),\n\t\t\tmatch.JSONCheckOff(\"events\", []interface{}{\n\t\t\t\trootToR1, rootToSS1, ss1ToR2, ss1ToR3,\n\t\t\t}, func(r gjson.Result) interface{} {\n\t\t\t\treturn eventKey(r.Get(\"room_id\").Str, r.Get(\"state_key\").Str, r.Get(\"type\").Str)\n\t\t\t}, nil),\n\t\t},\n\t})\n\tres = bob.MustDo(t, \"GET\", []string{\"_matrix\", \"client\", \"unstable\", \"org.matrix.msc2946\", \"rooms\", root, \"hierarchy\"}, nil)\n\tmust.MatchResponse(t, res, match.HTTPResponse{\n\t\tJSON: []match.JSON{\n\t\t\tmatch.JSONCheckOff(\"rooms\", []interface{}{\n\t\t\t\troot, r1, ss1, r2, r3,\n\t\t\t}, func(r gjson.Result) interface{} {\n\t\t\t\treturn r.Get(\"room_id\").Str\n\t\t\t}, nil),\n\t\t\tmatch.JSONCheckOff(\"rooms.#.children_state|@flatten\", []interface{}{\n\t\t\t\trootToR1, rootToSS1, ss1ToR2, ss1ToR3,\n\t\t\t}, func(r gjson.Result) interface{} {\n\t\t\t\treturn eventKey(r.Get(\"room_id\").Str, r.Get(\"state_key\").Str, r.Get(\"type\").Str)\n\t\t\t}, nil),\n\t\t},\n\t})\n}", "func (suite *KeeperTestSuite) TestChanOpenAck() {\n\tvar (\n\t\tpath *ibctesting.Path\n\t\tcounterpartyChannelID string\n\t\tchannelCap *capabilitytypes.Capability\n\t\theightDiff uint64\n\t)\n\n\ttestCases := []testCase{\n\t\t{\"success\", func() {\n\t\t\tsuite.coordinator.SetupConnections(path)\n\t\t\tpath.SetChannelOrdered()\n\t\t\terr := path.EndpointA.ChanOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = path.EndpointB.ChanOpenTry()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, true},\n\t\t{\"success with empty stored counterparty channel ID\", func() {\n\t\t\tsuite.coordinator.SetupConnections(path)\n\t\t\tpath.SetChannelOrdered()\n\n\t\t\terr := path.EndpointA.ChanOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = path.EndpointB.ChanOpenTry()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\t// set the channel's counterparty channel identifier to empty string\n\t\t\tchannel := path.EndpointA.GetChannel()\n\t\t\tchannel.Counterparty.ChannelId = \"\"\n\n\t\t\t// use a different channel identifier\n\t\t\tcounterpartyChannelID = path.EndpointB.ChannelID\n\n\t\t\tsuite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel)\n\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, true},\n\t\t{\"channel doesn't exist\", func() {}, false},\n\t\t{\"channel state is not INIT or TRYOPEN\", func() {\n\t\t\t// create fully open channels on both chains\n\t\t\tsuite.coordinator.Setup(path)\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, false},\n\t\t{\"connection not found\", func() {\n\t\t\tsuite.coordinator.SetupConnections(path)\n\t\t\tpath.SetChannelOrdered()\n\t\t\terr := path.EndpointA.ChanOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = path.EndpointB.ChanOpenTry()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\n\t\t\t// set the channel's connection hops to wrong connection ID\n\t\t\tchannel := path.EndpointA.GetChannel()\n\t\t\tchannel.ConnectionHops[0] = \"doesnotexist\"\n\t\t\tsuite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel)\n\t\t}, false},\n\t\t{\"connection is not OPEN\", func() {\n\t\t\tsuite.coordinator.SetupClients(path)\n\n\t\t\terr := path.EndpointA.ConnOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\t// create channel in init\n\t\t\tpath.SetChannelOrdered()\n\n\t\t\terr = path.EndpointA.ChanOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tsuite.chainA.CreateChannelCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, false},\n\t\t{\"consensus state not found\", func() {\n\t\t\tsuite.coordinator.SetupConnections(path)\n\t\t\tpath.SetChannelOrdered()\n\n\t\t\terr := path.EndpointA.ChanOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = path.EndpointB.ChanOpenTry()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\n\t\t\theightDiff = 3 // consensus state doesn't exist at this height\n\t\t}, false},\n\t\t{\"invalid counterparty channel identifier\", func() {\n\t\t\tsuite.coordinator.SetupConnections(path)\n\t\t\tpath.SetChannelOrdered()\n\n\t\t\terr := path.EndpointA.ChanOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = path.EndpointB.ChanOpenTry()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tcounterpartyChannelID = \"otheridentifier\"\n\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, false},\n\t\t{\"channel verification failed\", func() {\n\t\t\t// chainB is INIT, chainA in TRYOPEN\n\t\t\tsuite.coordinator.SetupConnections(path)\n\t\t\tpath.SetChannelOrdered()\n\n\t\t\terr := path.EndpointB.ChanOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = path.EndpointA.ChanOpenTry()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tchannelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)\n\t\t}, false},\n\t\t{\"channel capability not found\", func() {\n\t\t\tsuite.coordinator.SetupConnections(path)\n\t\t\tpath.SetChannelOrdered()\n\t\t\terr := path.EndpointA.ChanOpenInit()\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\tpath.EndpointB.ChanOpenTry()\n\n\t\t\tchannelCap = capabilitytypes.NewCapability(6)\n\t\t}, false},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\t\tsuite.Run(fmt.Sprintf(\"Case %s\", tc.msg), func() {\n\t\t\tsuite.SetupTest() // reset\n\t\t\tcounterpartyChannelID = \"\" // must be explicitly changed in malleate\n\t\t\theightDiff = 0 // must be explicitly changed\n\t\t\tpath = ibctesting.NewPath(suite.chainA, suite.chainB)\n\n\t\t\ttc.malleate()\n\n\t\t\tif counterpartyChannelID == \"\" {\n\t\t\t\tcounterpartyChannelID = ibctesting.FirstChannelID\n\t\t\t}\n\n\t\t\tif path.EndpointA.ClientID != \"\" {\n\t\t\t\t// ensure client is up to date\n\t\t\t\terr := path.EndpointA.UpdateClient()\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t}\n\n\t\t\tchannelKey := host.ChannelKey(path.EndpointB.ChannelConfig.PortID, ibctesting.FirstChannelID)\n\t\t\tproof, proofHeight := suite.chainB.QueryProof(channelKey)\n\n\t\t\terr := suite.chainA.App.GetIBCKeeper().ChannelKeeper.ChanOpenAck(\n\t\t\t\tsuite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channelCap, path.EndpointB.ChannelConfig.Version, counterpartyChannelID,\n\t\t\t\tproof, malleateHeight(proofHeight, heightDiff),\n\t\t\t)\n\n\t\t\tif tc.expPass {\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t} else {\n\t\t\t\tsuite.Require().Error(err)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestFinalize(t *testing.T) {\n\tnumGRBefore := runtime.NumGoroutine()\n\t// Create a set of 100 agreement components, and finalize them immediately\n\tfor i := 0; i < 100; i++ {\n\t\tc, _ := agreement.WireAgreement(50)\n\t\tc.FinalizeRound()\n\t}\n\n\t// Ensure we have freed up all of the resources associated with these components\n\tnumGRAfter := runtime.NumGoroutine()\n\t// We should have roughly the same amount of goroutines\n\tassert.InDelta(t, numGRBefore, numGRAfter, 10.0)\n}", "func TestSimplePayment(t *testing.T) {\n\tvar update *idb.AlgoUpdate\n\tstate := GetAccounting()\n\tstate.AddTransaction(test.OpenMain)\n\n\tsenderBalance, senderRewards := getSenderAmounts(test.OpenMainStxn)\n\tupdate = state.AlgoUpdates[test.OpenMainStxn.Txn.Sender]\n\tassertUpdates(t, update, false, senderBalance, senderRewards)\n\n\treceiverBalance, receiverRewards := getReceiverAmounts(test.OpenMainStxn)\n\tupdate = state.AlgoUpdates[test.OpenMainStxn.Txn.Receiver]\n\tassertUpdates(t, update, false, receiverBalance, receiverRewards)\n}", "func verifyAccumulator(core *Core, exp uint64, t *testing.T) {\n\t// Swap the accumulator with register 14\n\tregPair := 7\n\trunOneCycle(core, uint64(instruction.XCH|(regPair<<1)), t)\n\t// Run the SRC command\n\t_, srcVal := runOneIOCycle(core, uint64(instruction.SRC|(regPair<<1)), t)\n\tif exp != srcVal {\n\t\tt.Errorf(\"Accumulator val %X was not equal to %X\", srcVal, exp)\n\t}\n}", "func (suite *EventLogTestSuite) TestWrite_GetEventCountWithUpdateResult() {\n\ttimeStamp1 := \"22:59:59\"\n\ttimeStamp2 := \"23:00:00\"\n\t//timeStamp3 := \"23:05:00\"\n\tinputFiles := []string{\n\t\tsuite.EventLog.eventLogName + suite.EventLog.fileDelimiter + \"2020-03-02\",\n\t}\n\tdefer func() {\n\t\tfor _, fileName := range inputFiles {\n\t\t\tos.Remove(filepath.Join(suite.EventLog.eventLogPath, fileName))\n\t\t}\n\t}()\n\tfile2Input := \"SchemaVersion=1\\n\" +\n\t\t\"EventType1 Test 2.3.2.0 \" + timeStamp1 +\n\t\t\"\\nEventType1 Test 2.3.3.1 \" + timeStamp1 +\n\t\t\"\\nagent_update_result UpdateError1-2.3.2.1 2.0.3.1 \" + timeStamp1 +\n\t\t\"\\nagent_update_result UpdateError2-2.3.2.2 2.0.3.2 \" + timeStamp1 +\n\t\t\"\\nagent_update_result UpdateError3-2.3.2.3 2.0.3.3 \" + timeStamp2 +\n\t\t\"\\n\"\n\tfor _, fileName := range inputFiles {\n\t\tsuite.EventLog.fileSystem.AppendToFile(filepath.Join(suite.EventLog.eventLogPath, fileName), file2Input, 0600)\n\t}\n\teventCounts, err := GetEventCounter()\n\tassert.Equal(suite.T(), 5, len(eventCounts))\n\tassert.Nil(suite.T(), err)\n\t_, ok := eventCounts[0].CountMap[\"UpdateError3-2.3.2.3\"]\n\tassert.Equal(suite.T(), true, ok)\n\tval, err := strconv.Atoi(eventCounts[0].SchemaVersion)\n\tassert.Equal(suite.T(), 1, val)\n}", "func TestChannelClientRollsBackPvtDataIfMvccReadConflict(t *testing.T) {\n\t// 'ApproveChaincodeDefinitionForMyOrg' failed: error validating chaincode definition: collection-name: collection1 -- collection member 'Org3MSP' is not part of the channel\n\tif metadata.CCMode == \"lscc\" {\n\t\torgsContext := setupMultiOrgContext(t, mainSDK)\n\t\trequire.NoError(t, integration.EnsureChannelCreatedAndPeersJoined(t, mainSDK, orgChannelID, \"orgchannel.tx\", orgsContext))\n\t\t// private data collection used for test\n\t\tconst coll = \"collection1\"\n\t\t// collection key used for test\n\t\tconst key = \"collection_key\"\n\t\tccID := integration.GenerateExamplePvtID(true)\n\t\tcollConfig, err := newCollectionConfig(coll, \"OR('Org1MSP.member','Org2MSP.member','Org3MSP.member')\", 0, 2, 1000)\n\t\trequire.NoError(t, err)\n\n\t\trequire.NoError(t, integration.InstallExamplePvtChaincode(orgsContext, ccID))\n\t\trequire.NoError(t, integration.InstantiateExamplePvtChaincode(orgsContext, orgChannelID, ccID, \"OR('Org1MSP.member','Org2MSP.member','Org3MSP.member')\", collConfig))\n\n\t\tctxProvider := mainSDK.ChannelContext(orgChannelID, fabsdk.WithUser(org1User), fabsdk.WithOrg(org1Name))\n\t\tchClient, err := channel.New(ctxProvider)\n\t\trequire.NoError(t, err)\n\n\t\tvar errMtx sync.Mutex\n\t\terrs := multi.Errors{}\n\t\tvar wg sync.WaitGroup\n\n\t\t// test function; invokes a CC function that mutates the private data collection\n\t\tchangePvtData := func(amount int) {\n\t\t\tdefer wg.Done()\n\t\t\t_, err := chClient.Execute(\n\t\t\t\tchannel.Request{\n\t\t\t\t\tChaincodeID: ccID,\n\t\t\t\t\tFcn: \"addToInt\",\n\t\t\t\t\tArgs: [][]byte{[]byte(coll), []byte(key), []byte(strconv.Itoa(amount))},\n\t\t\t\t},\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\terrMtx.Lock()\n\t\t\t\terrs = append(errs, err)\n\t\t\t\terrMtx.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// expected value at the end of the test\n\t\tconst expected = 10\n\n\t\twg.Add(2)\n\t\tgo changePvtData(expected)\n\t\tgo changePvtData(expected)\n\t\twg.Wait()\n\n\t\t// ensure the MVCC_READ_CONFLICT was reproduced\n\t\trequire.Truef(t, len(errs) > 0 && strings.Contains(errs[0].Error(), \"MVCC_READ_CONFLICT\"), \"could not reproduce MVCC_READ_CONFLICT\")\n\n\t\t// read current value of private data collection\n\t\t//resp, err := chClient.Query(\n\t\t//\tchannel.Request{\n\t\t//\t\tChaincodeID: ccID,\n\t\t//\t\tFcn: \"getprivate\",\n\t\t//\t\tArgs: [][]byte{[]byte(coll), []byte(key)},\n\t\t//\t},\n\t\t//\tchannel.WithRetry(retry.TestRetryOpts),\n\t\t//)\n\t\tresp, err := retry.NewInvoker(retry.New(retry.TestRetryOpts)).Invoke(\n\t\t\tfunc() (interface{}, error) {\n\t\t\t\tb, e := chClient.Query(\n\t\t\t\t\tchannel.Request{\n\t\t\t\t\t\tChaincodeID: ccID,\n\t\t\t\t\t\tFcn: \"getprivate\",\n\t\t\t\t\t\tArgs: [][]byte{[]byte(coll), []byte(key)},\n\t\t\t\t\t},\n\t\t\t\t\tchannel.WithRetry(retry.TestRetryOpts),\n\t\t\t\t)\n\t\t\t\tif e != nil || strings.TrimSpace(string(b.Payload)) == \"\" {\n\t\t\t\t\treturn nil, status.New(status.TestStatus, status.GenericTransient.ToInt32(), fmt.Sprintf(\"getprivate data returned error: %v\", e), nil)\n\t\t\t\t}\n\t\t\t\treturn b, e\n\t\t\t},\n\t\t)\n\t\trequire.NoErrorf(t, err, \"error attempting to read private data\")\n\t\trequire.NotEmptyf(t, strings.TrimSpace(string(resp.(channel.Response).Payload)), \"reading private data returned empty response\")\n\n\t\tactual, err := strconv.Atoi(string(resp.(channel.Response).Payload))\n\t\trequire.NoError(t, err)\n\n\t\tassert.Truef(t, actual == expected, \"Private data not rolled back during MVCC_READ_CONFLICT\")\n\t}\n}", "func TestSuitableSubscribe(t *testing.T) {\n\tvar testSubscribe = testType{\"testing\"}\n\ttables := []struct {\n\t\tmethodName string\n\t\tisSubscribe bool\n\t\targNum int\n\t\thasCtx bool\n\t}{\n\t\t{\"method1\", true, 0, true},\n\t\t{\"method3\", false, 0, true},\n\t\t{\"method4\", false, 0, true},\n\t\t{\"method5\", true, 1, true},\n\t\t{\"method6\", false, 1, true},\n\t}\n\n\tfor _, table := range tables {\n\t\t_, subscriptions := suitableCallbacks(reflect.ValueOf(testSubscribe), reflect.TypeOf(testSubscribe))\n\t\tif table.isSubscribe {\n\t\t\tsubscription, ok := subscriptions[table.methodName]\n\t\t\tif !ok {\n\t\t\t\tt.Fatalf(\"Input Method: %s, should be subscripable\", table.methodName)\n\t\t\t}\n\t\t\tif len(subscription.argTypes) != table.argNum {\n\t\t\t\tt.Errorf(\"Input Method: %s, got arg nums: %d, expected: %d\",\n\t\t\t\t\ttable.methodName, len(subscription.argTypes), table.argNum)\n\t\t\t}\n\t\t\tif subscription.hasCtx != table.hasCtx {\n\t\t\t\tt.Errorf(\"Input Method: %s, got hasCtx %t, expected: %t\",\n\t\t\t\t\ttable.methodName, subscription.hasCtx, table.hasCtx)\n\t\t\t}\n\t\t\tif subscription.errPos != -1 {\n\t\t\t\tt.Errorf(\"Expected input method error position to be -1\")\n\t\t\t}\n\n\t\t} else {\n\t\t\t_, ok := subscriptions[table.methodName]\n\t\t\tif ok {\n\t\t\t\tt.Errorf(\"Input Method: %s, should not be subscripable\", table.methodName)\n\t\t\t}\n\t\t}\n\t}\n}" ]
[ "0.6576471", "0.65324676", "0.63587064", "0.61780435", "0.6098367", "0.59411633", "0.591464", "0.57962435", "0.57424355", "0.57177454", "0.5620648", "0.5585404", "0.55817324", "0.557412", "0.554213", "0.5497874", "0.5467166", "0.54497343", "0.5400443", "0.53578997", "0.53088003", "0.52572566", "0.52333844", "0.5206518", "0.52017164", "0.51844877", "0.5167232", "0.5160979", "0.5153498", "0.5153351", "0.51358825", "0.51318663", "0.5127421", "0.5098596", "0.508908", "0.50864553", "0.5076429", "0.50603575", "0.5057938", "0.5057457", "0.5046", "0.5038027", "0.50357497", "0.502908", "0.50206506", "0.50187427", "0.50129175", "0.5012267", "0.5008049", "0.5001103", "0.5000626", "0.49998665", "0.49966523", "0.4995638", "0.49806654", "0.49781182", "0.4964792", "0.496293", "0.49566317", "0.49555185", "0.49489358", "0.49403867", "0.4939365", "0.49371496", "0.49295726", "0.49225807", "0.49185416", "0.49025553", "0.48889804", "0.4886049", "0.48787904", "0.4859317", "0.48561752", "0.4856078", "0.48555437", "0.4853408", "0.4852424", "0.48419258", "0.48383912", "0.48376808", "0.4822871", "0.48177356", "0.4812194", "0.48055032", "0.48020852", "0.4786667", "0.47825187", "0.47737086", "0.47727785", "0.47720584", "0.4767572", "0.47667214", "0.47651944", "0.47626698", "0.47604004", "0.47491813", "0.47428086", "0.4735249", "0.47342438", "0.47294325" ]
0.68284917
0
Test that we properly clean up after calling Finalize. TODO: trap eventual errors
func TestFinalize(t *testing.T) { numGRBefore := runtime.NumGoroutine() // Create a set of 100 agreement components, and finalize them immediately for i := 0; i < 100; i++ { c, _ := agreement.WireAgreement(50) c.FinalizeRound() } // Ensure we have freed up all of the resources associated with these components numGRAfter := runtime.NumGoroutine() // We should have roughly the same amount of goroutines assert.InDelta(t, numGRBefore, numGRAfter, 10.0) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *SparkCoreAdaptor) Finalize() (errs []error) {\n\treturn\n}", "func Finalise() error {\n\n\tif !globalData.initialised {\n\t\treturn fault.NotInitialised\n\t}\n\n\tglobalData.log.Info(\"shutting down…\")\n\tglobalData.log.Flush()\n\n\t// finally...\n\tglobalData.initialised = false\n\n\tglobalData.log.Info(\"finished\")\n\tglobalData.log.Flush()\n\n\treturn nil\n}", "func (s *GCPCKMSSeal) Finalize(_ context.Context) error {\n\treturn nil\n}", "func (r *Reaper) Finalize() error {\n\tif r == nil || r.released {\n\t\treturn nil\n\t}\n\tif r.finalized {\n\t\treturn kerror.New(kerror.EIllegal, \"reaper has already called destructors\")\n\t}\n\tdefer func() {\n\t\tr.finalized = true\n\t}()\n\treturn reap(r.destructors...)\n}", "func (tool *CommandLineTool) Finalize() error {\n\treturn nil\n}", "func (kor *KubernetesOAMRouter) Finalize(canary *flaggerv1.Canary) error {\n\treturn fmt.Errorf(\"OAM router doesn't do finalize\")\n}", "func Finalise() error {\n\n\tif !globalData.initialised {\n\t\treturn fault.NotInitialised\n\t}\n\n\tglobalData.log.Info(\"shutting down…\")\n\tglobalData.log.Flush()\n\n\tSet(Stopped)\n\n\t// finally...\n\tglobalData.initialised = false\n\n\tglobalData.log.Info(\"finished\")\n\tglobalData.log.Flush()\n\n\treturn nil\n}", "func (dpos *DummyDpos) Finalize(*types.Block) error { return nil }", "func (b *BluetoothAdapter) Finalize() (errs []error) {\n\treturn\n}", "func Finalise() error {\n\tif !globalData.initialised {\n\t\treturn fault.NotInitialised\n\t}\n\n\tglobalData.log.Info(\"shutting down…\")\n\tglobalData.log.Flush()\n\n\t// stop background\n\tglobalData.background.Stop()\n\n\t// release message bus\n\tmessagebus.Bus.Announce.Release()\n\n\tglobalData.log.Info(\"start backing up peer data…\")\n\tif err := receptor.Backup(globalData.backupFile, globalData.receptors.Connectable()); err != nil {\n\t\tglobalData.log.Errorf(\"fail to backup peer data: %s\", err.Error())\n\t}\n\n\t// finally...\n\tglobalData.initialised = false\n\n\tglobalData.log.Info(\"finished\")\n\tglobalData.log.Flush()\n\n\treturn nil\n}", "func (m *MockFinalizer) Finalize() error {\n\tr0 := m.FinalizeFunc.nextHook()()\n\tm.FinalizeFunc.appendCall(FinalizerFinalizeFuncCall{r0})\n\treturn r0\n}", "func (p *Permit) Finalize() {\n\t// nothing to do here anymore, preserving for future use.\n}", "func onFinalize() {\n\t//\n}", "func Finalize() error {\n\tC.Py_Finalize()\n\treturn nil\n}", "func (a *models.App) Finalize() {\n\tvar err error\n\n\ta.DB, err = sql.Close(\"postgres\", connectionString)\n\tutils.ErrCheck(err)\n}", "func (c *Container) Finalize() {\n\tc.PrefixTags(\"Element\", \"Container\")\n\tc.Element.Finalize()\n}", "func (_m *ITestCase) CleanupTestCase() {\n\t_m.Called()\n}", "func (_Finalizable *FinalizableTransactor) Finalize(opts *bind.TransactOpts, fin bool) (*types.Transaction, error) {\n\treturn _Finalizable.contract.Transact(opts, \"finalize\", fin)\n}", "func (c *Controller) Finalize() error {\n\tif c.fixedNs {\n\t\treturn nil\n\t}\n\treturn c.client.Namespaces().Delete(c.namespace.Name, nil)\n}", "func (m *PooledWrapper) Finalize(context.Context, ...wrapping.Option) error {\n\treturn nil\n}", "func (t *Tag) Finalize() {\n\tif t.noFinalize {\n\t\treturn\n\t}\n\tif t.Name != nil {\n\t\tt.Name.Finalize()\n\t\tt.Name = nil\n\t}\n\tif t.Value != nil {\n\t\tt.Value.Finalize()\n\t\tt.Value = nil\n\t}\n}", "func (_FinalizableCrowdsaleImpl *FinalizableCrowdsaleImplTransactor) Finalize(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _FinalizableCrowdsaleImpl.contract.Transact(opts, \"finalize\")\n}", "func (cd *ConnectionDetails) Finalize() error {\n\tcd.Dialect = normalizeSynonyms(cd.Dialect)\n\n\tif cd.Options == nil { // for safety\n\t\tcd.Options = make(map[string]string)\n\t}\n\n\tif cd.URL != \"\" {\n\t\tif err := cd.withURL(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif fin, ok := finalizer[cd.Dialect]; ok {\n\t\tfin(cd)\n\t}\n\n\tif DialectSupported(cd.Dialect) {\n\t\tif cd.Database != \"\" || cd.URL != \"\" {\n\t\t\treturn nil\n\t\t}\n\t\treturn errors.New(\"no database or URL specified\")\n\t}\n\treturn errors.Errorf(\"unsupported dialect '%v'\", cd.Dialect)\n}", "func cleanup(ctx context.Context, didSignOut bool, tconn *chrome.TestConn) {\n\tif didSignOut {\n\t\treturn\n\t}\n\tif err := signOut(ctx, tconn); err != nil {\n\t\ttesting.ContextLog(ctx, \"Failed to sign out during cleanup: \", err)\n\t\ttesting.ContextLog(ctx, \"The above error is likely caused by an error occurred in test body\")\n\t}\n}", "func (c *Component) Finalize() {\n\tc.PrefixTags(\"Element\", \"Component\")\n\tc.Element.Finalize()\n}", "func Finalized(namespace *kapi.Namespace) bool {\n\tfor i := range namespace.Spec.Finalizers {\n\t\tif api.FinalizerOrigin == namespace.Spec.Finalizers[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (_CRLv0 *CRLv0Transactor) Finalize(opts *bind.TransactOpts, fin bool) (*types.Transaction, error) {\n\treturn _CRLv0.contract.Transact(opts, \"finalize\", fin)\n}", "func (kt *SharedKeybaseTransport) Finalize() {\n\tkt.mutex.Lock()\n\tdefer kt.mutex.Unlock()\n\tkt.transport = kt.stagedTransport\n\tkt.stagedTransport = nil\n}", "func (c *TelemetryConfig) Finalize() {\n\tif c == nil {\n\t\treturn\n\t}\n\n\td := DefaultTelemetryConfig()\n\n\tif c.MetricsPrefix == nil {\n\t\tc.MetricsPrefix = d.MetricsPrefix\n\t}\n\n\tc.Stdout.Finalize()\n\tc.DogStatsD.Finalize()\n\tc.Prometheus.Finalize()\n}", "func (p *Project) Finalize() error {\n\terrs := errors.AggregatedError{}\n\tp.Targets = make(TargetNameMap)\n\tfor name, t := range p.MasterFile.Targets {\n\t\tt.Initialize(name, p)\n\t\terrs.Add(p.Targets.Add(t))\n\t}\n\terrs.AddMany(\n\t\tp.Targets.BuildDeps(),\n\t\tp.Targets.CheckCyclicDeps(),\n\t)\n\n\treturn errs.Aggregate()\n}", "func (b *ClientAdaptor) Finalize() (err error) {\n\treturn b.Disconnect()\n}", "func (_OwnerProxyRegistry *OwnerProxyRegistryTransactor) Finalize(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _OwnerProxyRegistry.contract.Transact(opts, \"finalize\")\n}", "func (m *Manager) finalCleanup() {\n\tm.lock()\n\tfor m.state.cleaned != initiallyCleanedState {\n\t\tif m.state.cleaned == finallyCleanedState {\n\t\t\tm.unlock()\n\t\t\treturn // do not clean if already cleaned\n\t\t} else if m.state.cleaned == noCleanedState {\n\t\t\tm.state.cleanWait.Wait() // wait for wake up from `cleanup` or other `finalCleanup` method\n\t\t}\n\t}\n\n\tglog.Infof(\"%s %s has started a final cleanup\", cmn.DSortName, m.ManagerUUID)\n\tnow := time.Now()\n\n\tif err := m.cleanupStreams(); err != nil {\n\t\tglog.Error(err)\n\t}\n\n\tif err := m.dsorter.finalCleanup(); err != nil {\n\t\tglog.Error(err)\n\t}\n\n\t// The reason why this is not in regular cleanup is because we are only sure\n\t// that this can be freed once we cleanup streams - streams are asynchronous\n\t// and we may have race between in-flight request and cleanup.\n\tm.recManager.Cleanup()\n\n\tm.creationPhase.metadata.SendOrder = nil\n\tm.creationPhase.metadata.Shards = nil\n\n\tm.finishedAck.m = nil\n\n\t// Update clean state\n\tm.state.cleaned = finallyCleanedState\n\tm.state.cleanWait.Signal() // if there is another `finalCleanup` waiting it should be woken up to check the state and exit\n\tm.unlock()\n\n\tManagers.persist(m.ManagerUUID)\n\tglog.Infof(\"%s %s final cleanup has been finished in %v\", cmn.DSortName, m.ManagerUUID, time.Since(now))\n}", "func (h *HostExpr) Finalize() {\n\tif h.Variables == nil {\n\t\th.Variables = &AttributeExpr{Type: &Object{}}\n\t}\n}", "func (bench *Stopwatch) finalize() {\n\t// release the initial lock such that Wait can proceed.\n\tbench.wait.Unlock()\n}", "func (_CRLv0 *CRLv0TransactorSession) Finalize(fin bool) (*types.Transaction, error) {\n\treturn _CRLv0.Contract.Finalize(&_CRLv0.TransactOpts, fin)\n}", "func (setup *SimpleTestSetup) TearDown() {\n\tsetup.harnessPool.DisposeAll()\n\tsetup.harnessWalletPool.DisposeAll()\n\t//setup.nodeGoBuilder.Dispose()\n\tsetup.WorkingDir.Dispose()\n}", "func (a *I2cBusAdaptor) Finalize() error {\n\ta.mutex.Lock()\n\tdefer a.mutex.Unlock()\n\n\tvar err error\n\tfor _, bus := range a.buses {\n\t\tif bus != nil {\n\t\t\tif e := bus.Close(); e != nil {\n\t\t\t\terr = multierror.Append(err, e)\n\t\t\t}\n\t\t}\n\t}\n\ta.buses = nil\n\treturn err\n}", "func (s *Statement) Finalize() (e error) {\n\treturn SQLiteError(C.sqlite3_finalize(s.cptr))\n}", "func (sl *diskSessionLogger) Finalize() error {\n\tsl.Lock()\n\tdefer sl.Unlock()\n\tif sl.streamFile != nil {\n\t\tauditOpenFiles.Dec()\n\t\tlog.Infof(\"sessionLogger.Finalize(sid=%s)\", sl.sid)\n\t\tsl.streamFile.Close()\n\t\tsl.eventsFile.Close()\n\t\tsl.streamFile = nil\n\t\tsl.eventsFile = nil\n\t}\n\treturn nil\n}", "func (c *DedupConfig) Finalize() {\n\tif c.Enabled == nil {\n\t\tc.Enabled = Bool(false ||\n\t\t\tTimeDurationPresent(c.MaxStale) ||\n\t\t\tStringPresent(c.Prefix) ||\n\t\t\tTimeDurationPresent(c.TTL) ||\n\t\t\tTimeDurationPresent(c.BlockQueryWaitTime))\n\t}\n\n\tif c.MaxStale == nil {\n\t\tc.MaxStale = TimeDuration(DefaultDedupMaxStale)\n\t}\n\n\tif c.Prefix == nil {\n\t\tc.Prefix = String(DefaultDedupPrefix)\n\t}\n\n\tif c.TTL == nil {\n\t\tc.TTL = TimeDuration(DefaultDedupTTL)\n\t}\n\n\tif c.BlockQueryWaitTime == nil {\n\t\tc.BlockQueryWaitTime = TimeDuration(DefaultDedupBlockQueryWaitTime)\n\t}\n}", "func (_FinalizableCrowdsaleImpl *FinalizableCrowdsaleImplSession) Finalize() (*types.Transaction, error) {\n\treturn _FinalizableCrowdsaleImpl.Contract.Finalize(&_FinalizableCrowdsaleImpl.TransactOpts)\n}", "func (_Finalizable *FinalizableTransactorSession) Finalize(fin bool) (*types.Transaction, error) {\n\treturn _Finalizable.Contract.Finalize(&_Finalizable.TransactOpts, fin)\n}", "func (m *MockAccessPolicyFinalizer) FinalizeAccessPolicy(obj *v1alpha2.AccessPolicy) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FinalizeAccessPolicy\", obj)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (n Noop) CleanUp() error {\n\treturn nil\n}", "func (_RefundableCrowdsale *RefundableCrowdsaleSession) Finalize() (*types.Transaction, error) {\n\treturn _RefundableCrowdsale.Contract.Finalize(&_RefundableCrowdsale.TransactOpts)\n}", "func finalizer() {\n\tfmt.Println()\n\tfmt.Println(\"See ya!\")\n}", "func (_TransferProxyRegistry *TransferProxyRegistryTransactor) Finalize(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _TransferProxyRegistry.contract.Transact(opts, \"finalize\")\n}", "func (_RefundableCrowdsale *RefundableCrowdsaleTransactorSession) Finalize() (*types.Transaction, error) {\n\treturn _RefundableCrowdsale.Contract.Finalize(&_RefundableCrowdsale.TransactOpts)\n}", "func (_Crowdsale *CrowdsaleTransactor) Finalise(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Crowdsale.contract.Transact(opts, \"finalise\")\n}", "func (c *PrometheusConfig) Finalize() {\n\tif c == nil {\n\t\treturn\n\t}\n\n\td := DefaultPrometheusConfig()\n\n\tif c.Port == nil {\n\t\tc.Port = d.Port\n\t}\n\n\tif c.CachePeriod == nil {\n\t\tc.CachePeriod = d.CachePeriod\n\t}\n}", "func (res *Results) Finalize() {\n res.Replies = len(res.Took)\n res.min()\n res.max()\n res.avg()\n res.med()\n res.pct()\n\n // Code counts\n for _, code := range res.Code {\n if code < 100 { // ignore\n } else if code < 200 {\n res.Code1xx++\n } else if code < 300 {\n res.Code2xx++\n } else if code < 400 {\n res.Code3xx++\n } else if code < 500 {\n res.Code4xx++\n } else if code < 600 {\n res.Code5xx++\n }\n }\n\n // Error counts\n res.ErrorsTotal = len(res.Errors)\n\n for _, err := range res.Errors {\n e := err.(*url.Error).Err.(*net.OpError).Error()\n if strings.Contains(e, \"connection refused\") {\n res.ErrorsConnRefused++\n } else if strings.Contains(e, \"connection reset\") {\n res.ErrorsConnReset++\n } else if strings.Contains(e, \"connection timed out\") {\n res.ErrorsConnTimeout++\n } else if strings.Contains(e, \"no free file descriptors\") {\n res.ErrorsFdUnavail++\n } else if strings.Contains(e, \"no such host\") {\n res.ErrorsAddrUnavail++\n } else {\n res.ErrorsOther++\n }\n }\n}", "func (t *Tags) Finalize() {\n\tif t.noFinalize {\n\t\treturn\n\t}\n\n\tvalues := t.values\n\tt.values = nil\n\n\tfor i := range values {\n\t\tvalues[i].Finalize()\n\t}\n\n\tif t.pool == nil {\n\t\treturn\n\t}\n\n\tt.pool.PutTags(Tags{values: values})\n}", "func (_CRLv0 *CRLv0Session) Finalize(fin bool) (*types.Transaction, error) {\n\treturn _CRLv0.Contract.Finalize(&_CRLv0.TransactOpts, fin)\n}", "func (_FinalizableCrowdsaleImpl *FinalizableCrowdsaleImplTransactorSession) Finalize() (*types.Transaction, error) {\n\treturn _FinalizableCrowdsaleImpl.Contract.Finalize(&_FinalizableCrowdsaleImpl.TransactOpts)\n}", "func (_RefundableCrowdsale *RefundableCrowdsaleTransactor) Finalize(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _RefundableCrowdsale.contract.Transact(opts, \"finalize\")\n}", "func Finalize(s *gtk.TreeSelection) {\n\tdefer cfg.Env.Trace(\"Finalize\")()\n\tswitch Action {\n\tcase u2f.Registered, u2f.Authenticated:\n\tcase u2f.MissingKey, u2f.RegistrationFailed, u2f.AuthenticationFailed:\n\t\tDeselect(s)\n\t}\n}", "func (m *CryptographyServiceMock) Finish() {\n\tm.MinimockFinish()\n}", "func Finalize() error {\n\tperr := C.MPI_Finalize()\n\tif perr != 0 {\n\t\treturn errors.New(\"Error initializing MPI\")\n\t}\n\treturn nil\n}", "func EnsureFinalizer(objectMeta *metav1.ObjectMeta, expectedFinalizer string) {\n\t// First check if the finalizer is already included in the object.\n\tfor _, finalizer := range objectMeta.Finalizers {\n\t\tif finalizer == expectedFinalizer {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// If it doesn't exist, append the finalizer to the object meta.\n\tobjectMeta.Finalizers = append(objectMeta.Finalizers, expectedFinalizer)\n\n\treturn\n}", "func (_Finalizable *FinalizableSession) Finalize(fin bool) (*types.Transaction, error) {\n\treturn _Finalizable.Contract.Finalize(&_Finalizable.TransactOpts, fin)\n}", "func (ut *todoPayload) Finalize() {\n\tvar defaultCompleted = false\n\tif ut.Completed == nil {\n\t\tut.Completed = &defaultCompleted\n\t}\n\tvar defaultCreated, _ = time.Parse(time.RFC3339, \"1978-06-30T10:00:00+09:00\")\n\tif ut.Created == nil {\n\t\tut.Created = &defaultCreated\n\t}\n\tvar defaultModified, _ = time.Parse(time.RFC3339, \"1978-06-30T10:00:00+09:00\")\n\tif ut.Modified == nil {\n\t\tut.Modified = &defaultModified\n\t}\n}", "func (t *SubprocessTest) TearDown() {\n\terr := t.destroy()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (m *MockServiceEntryFinalizer) FinalizeServiceEntry(obj *v1alpha3.ServiceEntry) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FinalizeServiceEntry\", obj)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (ct *ConnectionTransportTLS) Finalize() {\n\tct.mutex.Lock()\n\tdefer ct.mutex.Unlock()\n\tif ct.transport != nil {\n\t\tct.transport.Close()\n\t}\n\tct.transport = ct.stagedTransport\n\tct.stagedTransport = nil\n\tct.srvRemote.Reset()\n}", "func (m *MockDaemonSetFinalizer) FinalizeDaemonSet(obj *v1.DaemonSet) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FinalizeDaemonSet\", obj)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (a *APIManagedFinalizerRemover) Finalize(ctx context.Context, mg Managed) error {\n\tmeta.RemoveFinalizer(mg, managedFinalizerName)\n\treturn errors.Wrap(a.client.Update(ctx, mg), errUpdateManaged)\n}", "func (m *MockStatefulSetFinalizer) FinalizeStatefulSet(obj *v1.StatefulSet) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FinalizeStatefulSet\", obj)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (ic *Context) Finalize() {\n\tfor _, f := range ic.cancelFuncs {\n\t\tf()\n\t}\n\tic.cancelFuncs = nil\n}", "func (_Crowdsale *CrowdsaleSession) Finalised() (bool, error) {\n\treturn _Crowdsale.Contract.Finalised(&_Crowdsale.CallOpts)\n}", "func (test *Test) Teardown() {\n\ttest.cleanup()\n}", "func (m *IndexCollectionAccessorMock) Finish() {\n\tm.MinimockFinish()\n}", "func (e *GT) Finalize() *GT {\n\tret := finalExponentiation(e.p)\n\te.p.Set(ret)\n\treturn e\n}", "func (ts *TestSetup) Cleanup() {\n\tif ts.Server != nil {\n\t\tts.Server.Stop()\n\t}\n\tif ts.NC != nil {\n\t\tts.NC.Close()\n\t}\n\tif ts.GNATSD != nil {\n\t\tts.GNATSD.Shutdown()\n\t}\n\n\tif ts.SystemUserCredsFile != \"\" {\n\t\tos.Remove(ts.SystemUserCredsFile)\n\t}\n\n\tif ts.SystemAccountJWTFile != \"\" {\n\t\tos.Remove(ts.SystemAccountJWTFile)\n\t}\n\n\tif ts.OperatorJWTFile != \"\" {\n\t\tos.Remove(ts.SystemUserCredsFile)\n\t}\n}", "func (c *ConsoleWrapper) Cleanup() {\n\terr := c.console.Close()\n\tassert.NoError(c.tester, err)\n}", "func (b *Block) Finalize(endorsements []*endorsement.Endorsement, ts time.Time) error {\n\tif len(b.endorsements) != 0 {\n\t\treturn errors.New(\"the block has been finalized\")\n\t}\n\tb.endorsements = endorsements\n\tb.commitTime = ts\n\n\treturn nil\n}", "func (m *SignatureKeyHolderMock) Finish() {\n\tm.MinimockFinish()\n}", "func (m *MockDestinationRuleFinalizer) FinalizeDestinationRule(obj *v1alpha3.DestinationRule) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FinalizeDestinationRule\", obj)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func Finalize(w http.ResponseWriter, object interface{}) {\n\tfinalData := data{\n\t\tItem: object,\n\t}\n\tfinal := response{\n\t\tData: finalData,\n\t}\n\tjs, err := json.Marshal(final)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tresponseStatus := status{\n\t\tStatusCode: http.StatusOK,\n\t\tStatusText: http.StatusText(http.StatusOK),\n\t}\n\tsendOutJs(w, responseStatus, js)\n}", "func (e *engineImpl) Finalize(\n\tchain engine.ChainReader, header *block.Header,\n\tstate *state.DB, txs []*types.Transaction,\n\treceipts []*types.Receipt, outcxs []*types.CXReceipt,\n\tincxs []*types.CXReceiptsProof, stks []*staking.StakingTransaction,\n) (*types.Block, *big.Int, error) {\n\n\t// Accumulate any block and uncle rewards and commit the final state root\n\t// Header seems complete, assemble into a block and return\n\tpayout, err := AccumulateRewards(\n\t\tchain, state, header, e.Rewarder(), e.Slasher(), e.Beaconchain(),\n\t)\n\tif err != nil {\n\t\treturn nil, nil, ctxerror.New(\"cannot pay block reward\").WithCause(err)\n\t}\n\n\t// Withdraw unlocked tokens to the delegators' accounts\n\t// Only do such at the last block of an epoch\n\tif header.ShardID() == shard.BeaconChainShardID && len(header.ShardState()) > 0 {\n\t\tvalidators, err := chain.ReadValidatorList()\n\t\tif err != nil {\n\t\t\treturn nil, nil, ctxerror.New(\"[Finalize] failed to read active validators\").WithCause(err)\n\t\t}\n\t\t// Payout undelegated/unlocked tokens\n\t\tfor _, validator := range validators {\n\t\t\twrapper := state.GetStakingInfo(validator)\n\t\t\tif wrapper != nil {\n\t\t\t\tfor i := range wrapper.Delegations {\n\t\t\t\t\tdelegation := &wrapper.Delegations[i]\n\t\t\t\t\ttotalWithdraw := delegation.RemoveUnlockedUndelegations(header.Epoch(), wrapper.LastEpochInCommittee)\n\t\t\t\t\tstate.AddBalance(delegation.DelegatorAddress, totalWithdraw)\n\t\t\t\t}\n\t\t\t\tif err := state.UpdateStakingInfo(validator, wrapper); err != nil {\n\t\t\t\t\treturn nil, nil, ctxerror.New(\"[Finalize] failed update validator info\").WithCause(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terr = errors.New(\"[Finalize] validator came back empty \" + common2.MustAddressToBech32(validator))\n\t\t\t\treturn nil, nil, ctxerror.New(\"[Finalize] failed getting validator info\").WithCause(err)\n\t\t\t}\n\t\t}\n\n\t\t// Set the LastEpochInCommittee field for all external validators in the upcoming epoch.\n\t\tnewShardState, err := header.GetShardState()\n\t\tif err != nil {\n\t\t\treturn nil, nil, ctxerror.New(\"[Finalize] failed to read shard state\").WithCause(err)\n\t\t}\n\t\tprocessed := make(map[common.Address]struct{})\n\t\tfor i := range newShardState.Shards {\n\t\t\tshard := newShardState.Shards[i]\n\t\t\tfor j := range shard.Slots {\n\t\t\t\tslot := shard.Slots[j]\n\t\t\t\tif slot.EffectiveStake != nil { // For external validator\n\t\t\t\t\t_, ok := processed[slot.EcdsaAddress]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tprocessed[slot.EcdsaAddress] = struct{}{}\n\t\t\t\t\t\twrapper := state.GetStakingInfo(slot.EcdsaAddress)\n\t\t\t\t\t\twrapper.LastEpochInCommittee = newShardState.Epoch\n\n\t\t\t\t\t\tif err := state.UpdateStakingInfo(slot.EcdsaAddress, wrapper); err != nil {\n\t\t\t\t\t\t\treturn nil, nil, ctxerror.New(\"[Finalize] failed update validator info\").WithCause(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\theader.SetRoot(state.IntermediateRoot(chain.Config().IsS3(header.Epoch())))\n\treturn types.NewBlock(header, txs, receipts, outcxs, incxs, stks), payout, nil\n}", "func EnsureFinalizer(ctx context.Context, reader client.Reader, writer client.Writer, obj client.Object, finalizer string) error {\n\treturn tryPatchFinalizers(ctx, reader, writer, obj, controllerutil.AddFinalizer, finalizer)\n}", "func (c *DogStatsDConfig) Finalize() {\n\tif c == nil {\n\t\treturn\n\t}\n\n\td := DefaultDogStatsDConfig()\n\n\tif c.Address == nil {\n\t\tc.Address = d.Address\n\t}\n\n\tif c.Period == nil {\n\t\tc.Period = d.Period\n\t}\n}", "func (fc *FakeCompressor) CleanUp(_ string) error {\n\treturn errors.New(\"not implemented\")\n}", "func (env *TestEnv) Cleanup() {\n\tenv.Txmgr.Shutdown()\n\tenv.DBEnv.Cleanup()\n\tenv.TStoreEnv.Cleanup()\n}", "func (m *MySQL) Cleanup() {\n\tos.RemoveAll(\"/tmp/go-harness\")\n}", "func (_m *mockCopyCat) Shutdown() {\n\t_m.Called()\n}", "func (_CRLv0 *CRLv0Session) Finalized() (bool, error) {\n\treturn _CRLv0.Contract.Finalized(&_CRLv0.CallOpts)\n}", "func (_Crowdsale *CrowdsaleCaller) Finalised(opts *bind.CallOpts) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Crowdsale.contract.Call(opts, out, \"finalised\")\n\treturn *ret0, err\n}", "func (consensus *Consensus) Finalize(chain ChainReader, header *types.Header, state *state.DB, txs []*types.Transaction, receipts []*types.Receipt) (*types.Block, error) {\n\t// Accumulate any block and uncle rewards and commit the final state root\n\t// Header seems complete, assemble into a block and return\n\taccumulateRewards(chain.Config(), state, header)\n\theader.Root = state.IntermediateRoot(false)\n\treturn types.NewBlock(header, txs, receipts), nil\n}", "func TestCleanUp(t *testing.T) {\n\tvar dataSource DataSourceName\n\tdataSource.Init()\n\tdb := GetDBConnection(dataSource.DSNString(), logger.Silent)\n\tDropTables(db)\n}", "func assistantFinalizer(as *Assistant) {\n\truntime.SetFinalizer(as, func(as *Assistant) { gobject.Unref(as) })\n}", "func CleanupTestHarness() {\n\tcleanupCerts()\n}", "func (c *common) Finalized() bool {\n\tnumPreferences := c.preferences.Len()\n\tc.ctx.Log.Verbo(\"Conflict graph has %d preferred transactions\",\n\t\tnumPreferences)\n\treturn numPreferences == 0\n}", "func (r *Reaper) Finalized() bool {\n\tif r == nil {\n\t\treturn false\n\t}\n\treturn r.finalized\n}", "func (m *ConsensusNetworkMock) Finish() {\n\tm.MinimockFinish()\n}", "func AfterSuiteCleanup() {\n\tlogf.Log.Info(\"AfterSuiteCleanup\")\n}", "func (m *TesterMock) Finish() {\n\tm.MinimockFinish()\n}", "func (_CRLv0 *CRLv0CallerSession) Finalized() (bool, error) {\n\treturn _CRLv0.Contract.Finalized(&_CRLv0.CallOpts)\n}", "func (c *StdoutConfig) Finalize() {\n\tif c == nil {\n\t\treturn\n\t}\n\n\td := DefaultStdoutConfig()\n\n\tif c.Period == nil {\n\t\tc.Period = d.Period\n\t}\n\n\tif c.PrettyPrint == nil {\n\t\tc.PrettyPrint = d.PrettyPrint\n\t}\n\n\tif c.DoNotPrintTime == nil {\n\t\tc.DoNotPrintTime = d.DoNotPrintTime\n\t}\n}", "func finalizer(a *CasbinMenuAdapter) {\n}" ]
[ "0.6841336", "0.68049765", "0.6798049", "0.67779976", "0.67707664", "0.67355555", "0.66572475", "0.66430855", "0.6611207", "0.65803516", "0.657241", "0.6484667", "0.6480284", "0.6463848", "0.6461248", "0.6414478", "0.637417", "0.63708615", "0.6358206", "0.6301211", "0.628449", "0.6280816", "0.6227726", "0.6218942", "0.61854404", "0.61640936", "0.6150603", "0.61452234", "0.6142635", "0.6126742", "0.6120788", "0.61168593", "0.61016387", "0.6096624", "0.6091675", "0.6086056", "0.60810775", "0.6075026", "0.6072267", "0.6059542", "0.6052604", "0.60516816", "0.60349274", "0.60311866", "0.6019518", "0.6017871", "0.600075", "0.6000194", "0.5982505", "0.5973003", "0.59729177", "0.5971494", "0.5945915", "0.59453166", "0.59441125", "0.59367365", "0.59359914", "0.5909708", "0.59093297", "0.5901585", "0.58929914", "0.58802646", "0.5878452", "0.58699554", "0.5858963", "0.5851084", "0.5838953", "0.5833956", "0.581968", "0.58182585", "0.5798623", "0.578986", "0.5787433", "0.5774476", "0.5773021", "0.57721245", "0.5765125", "0.57578766", "0.5749807", "0.5745968", "0.5742677", "0.573728", "0.5729992", "0.5724586", "0.5714738", "0.57090205", "0.5703455", "0.5694653", "0.56907755", "0.56867236", "0.56858146", "0.5674521", "0.5668487", "0.56682444", "0.5666404", "0.5660132", "0.5658701", "0.56580675", "0.56560194", "0.56485665" ]
0.6605337
9
GitCommonDir returns commondir where contains "config" file
func (v Repository) GitCommonDir() string { return v.gitCommonDir }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (v Repository) CommonDir() string {\n\tdir := v.RepoDir()\n\tcommonDir := dir\n\tif path.IsFile(filepath.Join(dir, \"commondir\")) {\n\t\tf, err := os.Open(filepath.Join(dir, \"commondir\"))\n\t\tif err == nil {\n\t\t\ts := bufio.NewScanner(f)\n\t\t\tif s.Scan() {\n\t\t\t\tcommonDir = s.Text()\n\t\t\t\tif !filepath.IsAbs(commonDir) {\n\t\t\t\t\tcommonDir = filepath.Join(dir, commonDir)\n\t\t\t\t}\n\t\t\t}\n\t\t\tf.Close()\n\t\t}\n\t}\n\treturn commonDir\n}", "func FindGitConfig(dir string) (string, error) {\n\tvar err error\n\n\tif dir, err = findGitDir(dir); err != nil {\n\t\treturn \"\", err\n\t}\n\tif dir, err = getGitCommonDir(dir); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(dir, \"config\"), nil\n}", "func getConfigFolderPath() string {\n\tsep := string(filepath.Separator)\n\twd, _ := os.Getwd()\n\n\twdPath := strings.Split(wd, sep)\n\tindexOfSrc := lastIndexOf(wdPath, \"src\")\n\tindexOfBin := lastIndexOf(wdPath, \"bin\")\n\n\tcfgPath := \"\"\n\tvar pathEl []string\n\tif indexOfBin > -1 && indexOfBin > indexOfSrc {\n\t\tpathEl = wdPath[:indexOfBin] // take up to bin (exclusive)\n\t} else if indexOfSrc > -1 {\n\t\tpathEl = wdPath[:indexOfSrc] // take up to src (exclusive)\n\t}\n\n\tif len(pathEl) > 0 {\n\t\tcfgPath = strings.Join(pathEl, sep) + sep\n\t\tcfgPath += \"config\" + sep\n\t}\n\n\treturn cfgPath\n}", "func (g *GitLocal) FindGitConfigDir(dir string) (string, string, error) {\n\treturn g.GitCLI.FindGitConfigDir(dir)\n}", "func ConfigDir() string {\n\treturn configDir\n}", "func configDir() (string, error) {\n\tif dc := os.Getenv(\"DOCKER_CONFIG\"); dc != \"\" {\n\t\treturn dc, nil\n\t}\n\tif h := dockerUserHomeDir(); h != \"\" {\n\t\treturn filepath.Join(dockerUserHomeDir(), \".docker\"), nil\n\t}\n\treturn \"\", errNoHomeDir\n}", "func (s *Scope) configDir() (string, error) {\n\tp, err := s.dataDir()\n\tif err != nil {\n\t\treturn p, err\n\t}\n\n\treturn filepath.Join(p, \"Config\"), nil\n}", "func gitPathDir() string {\n\tgcd := trim(cmdOutput(\"git\", \"rev-parse\", \"--git-path\", \".\"))\n\tresult, err := filepath.Abs(gcd)\n\tif err != nil {\n\t\tdief(\"%v\", err)\n\t}\n\treturn result\n}", "func GetConfigRootDir() string {\n\tconfigFile := viper.GetString(\"viper.config_file\")\n\tif configFile == \"\" {\n\t\tcwd, _ := os.Getwd()\n\t\treturn cwd\n\t}\n\n\treturn path.Dir(configFile)\n}", "func GetConfDir() string {\n\treturn fileutil.GetConfDir()\n}", "func ConfigDirPath(envContainer EnvContainer) (string, error) {\n\treturn xdgDirPath(envContainer, \"XDG_CONFIG_HOME\", \".config\")\n}", "func configDirPath() string {\n\tusr, err := user.Current()\n\tif err != nil {\n\t\tlogging.LogFatal(\"config/configDirPath() - Can't find current user: \", err)\n\t}\n\n\t// println(\"usr.HomeDir: \", usr.HomeDir)\n\tconfigDirPath := paths.GetFilePath(usr.HomeDir, configDirName)\n\n\treturn configDirPath\n}", "func mustGetConfigDir() string {\n\thomeDir, err := homedir.Dir()\n\tif err != nil {\n\t\tlog.Fatal(\"Unable to get the home directory\", err)\n\t}\n\treturn filepath.Join(homeDir, globalMnmConfigDir)\n}", "func getConfigPath() (string, error) {\n\thome, homeErr := os.UserHomeDir()\n\tif homeErr == nil {\n\t\tif _, err := os.Stat(filepath.Join(home, \".bin\", \"config.json\")); !os.IsNotExist(err) {\n\t\t\treturn filepath.Join(path.Join(home, \".bin\")), nil\n\t\t}\n\t}\n\n\tc := os.Getenv(\"XDG_CONFIG_HOME\")\n\tif _, err := os.Stat(c); !os.IsNotExist(err) {\n\t\treturn filepath.Join(c, \"bin\"), nil\n\t}\n\n\tif homeErr != nil {\n\t\treturn \"\", homeErr\n\t}\n\tc = filepath.Join(home, \".config\")\n\tif _, err := os.Stat(c); !os.IsNotExist(err) {\n\t\treturn filepath.Join(c, \"bin\"), nil\n\t}\n\n\treturn filepath.Join(home, \".bin\"), nil\n}", "func getConfigFilePath() string {\n\tpathList := [5]string{\n\t\t\"config.json\",\n\t\t\"../config.json\",\n\t\t\"../../config.json\",\n\t\t\"../../../config.json\",\n\t\t\"../../../../config.json\",\n\t}\n\n\t_, b, _, _ := runtime.Caller(0)\n\tfilePath := filepath.Dir(b)\n\tfilePath = filepath.Join(filePath, \"../config.json\")\n\n\tpath, err := os.Getwd()\n\tif err == nil {\n\t\tfor _, configPath := range pathList {\n\t\t\tprocessFilePath := filepath.Join(path, configPath)\n\t\t\texist, _ := exists(processFilePath)\n\t\t\tif exist == true {\n\t\t\t\tfilePath = processFilePath\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn filePath\n}", "func (c *ConfigFile) ConfigDir() string {\n\trw, err := NewConfigReadWriter(c.version)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn rw.ConfigDir(c)\n}", "func GetGitDirectory() string {\n\tcurrentDirectory, _ := os.Getwd()\n\tvar projectDirectory = \"\"\n\tdirectoryParts := strings.Split(currentDirectory, string(os.PathSeparator))\n\n\tfor projectDirectory == \"\" {\n\t\tif _, err := os.Stat(filepath.Join(currentDirectory, \"/.git\")); err == nil {\n\t\t\treturn currentDirectory\n\t\t}\n\n\t\tif directoryParts[0]+\"\\\\\" == currentDirectory || currentDirectory == \"/\" {\n\t\t\treturn \"\"\n\t\t}\n\n\t\tcurrentDirectory = filepath.Dir(currentDirectory)\n\t}\n\n\treturn \"\"\n}", "func configDir() (string, error) {\n\t// At the moment (go1.8.3), os.user.Current() just produces: \"user: Current not implemented on YOUROS/YOURARCH\"\n\t// Apparently this is due to lack of support with cross-compiled binaries? Therefore we DIY it here.\n\n\t// DC/OS CLI allows users to manually override the config dir (default ~/.dcos) with a DCOS_DIR envvar:\n\tconfigDir := config.DcosConfigRootDir // proxy for DCOS_DIR envvar\n\tif len(configDir) != 0 {\n\t\treturn configDir, nil\n\t}\n\n\t// OSX/Linux: $HOME/.dcos/\n\thomeDir := os.Getenv(\"HOME\")\n\tif len(homeDir) != 0 {\n\t\treturn path.Join(homeDir, dcosConfigDirName), nil\n\t}\n\n\t// Windows: ${HOMEDRIVE}${HOMEPATH}/.dcos/ or $USERPROFILE/.dcos/\n\thomeDrive := os.Getenv(\"HOMEDRIVE\")\n\thomePath := os.Getenv(\"HOMEPATH\")\n\tif len(homeDrive) != 0 && len(homePath) != 0 {\n\t\treturn path.Join(homeDrive + homePath, dcosConfigDirName), nil\n\t}\n\thomeDir = os.Getenv(\"USERPROFILE\")\n\tif len(homeDir) != 0 {\n\t\treturn path.Join(homeDir, dcosConfigDirName), nil\n\t}\n\n\t// If we get here, it means that we couldn't figure out the user's home directory.\n\t// Shouldn't happen in practice.\n\treturn \"\", fmt.Errorf(\"Unable to resolve CLI config directory: DCOS_DIR, HOME, HOMEDRIVE+HOMEPATH, or USERPROFILE\")\n}", "func configPath() (string, error) {\n\thome, err := sys.GetHomeDir()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(home, \".keeper\", \"config.json\"), nil\n}", "func GetConfigDirectory() string {\n\tconfigBase := os.Getenv(\"CONFIG_BASE\")\n\tif configBase == \"\" {\n\t\tdir, err := filepath.Abs(filepath.Dir(os.Args[0]))\n\t\tif err != nil {\n\t\t\tconfigBase = \".\"\n\t\t} else {\n\t\t\tconfigBase = filepath.Join(dir, \"..\", \"conf\")\n\t\t}\n\t}\n\n\treturn configBase\n}", "func GetSharedGameConfigFolder() string {\n\treturn filepath.Join(GetConfigFolder(), \"configs_shared\")\n}", "func (c Config) GetConfigDirPath() (string, error) {\n\t// Get home directory.\n\thome := os.Getenv(homeKey)\n\tif home != \"\" {\n\t\treturn filepath.Join(home, \".mstreamb0t\"), nil\n\t}\n\tu, err := user.Current()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(u.HomeDir, \".mstreamb0t\"), nil\n}", "func ConfigDir() string {\n\treturn filepath.Join(userSettingsDir, \"kopia\")\n}", "func PathCfgDir() string {\n\tdir := os.Getenv(ENV_CFG_DIR)\n\tif dir != \"\" {\n\t\treturn dir\n\t}\n\thomeDir, err := Home()\n\tif err != nil {\n\t\tlog.Fatal(\"can not fetch home directory\")\n\t}\n\treturn filepath.Join(homeDir, DEFAULT_CFG_DIR)\n}", "func getMcConfigDir() (string, error) {\n\tif mcCustomConfigDir != \"\" {\n\t\treturn mcCustomConfigDir, nil\n\t}\n\tu, err := user.Current()\n\tif err != nil {\n\t\treturn \"\", NewIodine(iodine.New(err, nil))\n\t}\n\t// For windows the path is slightly different\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\treturn filepath.Join(u.HomeDir, mcConfigWindowsDir), nil\n\tdefault:\n\t\treturn filepath.Join(u.HomeDir, mcConfigDir), nil\n\t}\n}", "func GetConfigDir() string {\n\treturn configDir\n}", "func repoRoot() (string, error) {\n\trepoRootState.once.Do(func() {\n\t\tif wsDir := os.Getenv(\"BUILD_WORKSPACE_DIRECTORY\"); wsDir != \"\" {\n\t\t\trepoRootState.dir = wsDir\n\t\t\treturn\n\t\t}\n\t\tdir, err := os.Getwd()\n\t\tif err != nil {\n\t\t\trepoRootState.err = err\n\t\t\treturn\n\t\t}\n\t\tfor {\n\t\t\t_, err := os.Stat(filepath.Join(dir, \"WORKSPACE\"))\n\t\t\tif err == nil {\n\t\t\t\trepoRootState.dir = dir\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err != os.ErrNotExist {\n\t\t\t\trepoRootState.err = err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tparent := filepath.Dir(dir)\n\t\t\tif parent == dir {\n\t\t\t\trepoRootState.err = errors.New(\"could not find workspace directory\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdir = parent\n\t\t}\n\t})\n\treturn repoRootState.dir, repoRootState.err\n}", "func (c *ConfHolder) Dir() string {\n\treturn os.Getenv(\"HOME\") + \"/.config/teonet/teoroom/\"\n}", "func ConfigDir() string {\n\tdir := \".\"\n\tswitch goos {\n\tcase \"darwin\":\n\t\tdir = path.Join(envFunc(\"HOME\"), \"Library\", \"Application Support\", \"shade\")\n\tcase \"linux\", \"freebsd\":\n\t\tdir = path.Join(envFunc(\"HOME\"), \".shade\")\n\tdefault:\n\t\tlog.Printf(\"TODO: ConfigDir on GOOS %q\", goos)\n\t}\n\treturn dir\n}", "func GetConfigPath(fileName string) string {\n\tif len(fileName) > 0 {\n\t\tfilepath, err := filepath.Abs(fileName)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"WARNING: No chef configuration file found at %s \\n\", fileName)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tif !doesDirExist(filepath) {\n\t\t\tfmt.Printf(\"WARNING: No chef configuration file found at %s \\n\", fileName)\n\t\t\tos.Exit(1)\n\t\t}\n\t\treturn filepath\n\t}\n\tex, err := os.Getwd()\n\tif err != nil {\n\t\treturn GetDefaultConfigPath()\n\t}\n\n\treturn filepath.Join(ex, configPath)\n\n}", "func (s *Scope) configDir() (string, error) {\n\tswitch s.Type {\n\tcase System:\n\t\treturn defaultConfigDirs[0], nil\n\n\tcase User:\n\t\tpath := os.Getenv(\"XDG_CONFIG_HOME\")\n\t\tif path == \"\" {\n\t\t\treturn expandUser(\"~/.config\"), nil\n\t\t}\n\t\treturn path, nil\n\n\tcase CustomHome:\n\t\treturn filepath.Join(s.CustomHome, \".config\"), nil\n\t}\n\n\treturn \"\", ErrInvalidScope\n}", "func configLocation() string {\n\tif configFileLocation == \"\" {\n\t\thome, err := homedir.Dir()\n\t\terrorExit(err)\n\t\treturn fmt.Sprintf(\"%s/%s\", home, configFileLocation)\n\t}\n\treturn configFileLocation\n}", "func GetConfigFolder() string {\n\treturn filepath.Join(helper.Home(), configPath)\n}", "func UserConfigDir() (string, error)", "func GetConfigPath() (string, error) {\n\tvar carriageReturn string\n\tconfigFile := \"/api.conf\"\n\tdir, err := filepath.Abs(filepath.Dir(os.Args[0]))\n\tif err != nil {\n\t\tos.Exit(EXIT_FLAG_ERROR)\n\t}\n\tconfigPath := dir + configFile\n\tif _, err := os.Stat(configPath); err == nil {\n\t\treturn configPath, nil\n\t}\n\tvar cmdName string\n\tif runtime.GOOS == \"windows\" {\n\t\tcmdName = \"where\"\n\t\tcarriageReturn = \"\\r\\n\"\n\t} else {\n\t\tcmdName = \"which\"\n\t\tcarriageReturn = \"\\n\"\n\t}\n\tresponse, err := GetCommandOutput(cmdName, 2*time.Second, os.Args[0])\n\tpath := string(bytes.Split(response, []byte(carriageReturn))[0])\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t// check if is a symlink\n\tfile, err := os.Lstat(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif file.Mode()&os.ModeSymlink == os.ModeSymlink {\n\t\t// This is a symlink\n\t\tpath, err = filepath.EvalSymlinks(path)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn filepath.Dir(path) + configFile, nil\n}", "func Dir() string {\n\treturn configDir\n}", "func repoParent() string {\n\tcmd := exec.Command(\"go\", \"env\", \"GOPATH\")\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tlog.Fatalf(\"could not determine repo path: %v\", err)\n\t}\n\tgopath := strings.TrimSpace(string(out))\n\tlist := filepath.SplitList(gopath)\n\tif len(list) == 0 {\n\t\tlog.Fatalf(\"could not determine repo path: could not parse GOPATH=%q\", gopath)\n\t}\n\treturn filepath.Join(list[0], \"src\", \"golang.org\", \"x\")\n}", "func findConfigFile() (string, error) {\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor {\n\t\tfp := filepath.Join(dir, ConfigFileName)\n\t\tif _, err := os.Stat(fp); err == nil {\n\t\t\treturn fp, nil\n\t\t} else if !os.IsNotExist(err) {\n\t\t\treturn \"\", err\n\t\t}\n\t\tparentDir := filepath.Dir(dir)\n\t\tif parentDir == dir {\n\t\t\treturn \"\", nil\n\t\t}\n\t\tdir = parentDir\n\t}\n}", "func FindGitDir() (string, error) {\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn findGitDirIn(wd, 0)\n}", "func getConfFilePath(root, clusterName string) string {\n\treturn fmt.Sprintf(\"%s/%s.config\", root, clusterName)\n}", "func repoRoot() string {\n\treturn filepath.Clean(trim(cmdOutput(\"git\", \"rev-parse\", \"--show-toplevel\")))\n}", "func GlobalConfigFile() (string, error) {\n\tvar (\n\t\tfile string\n\t\terr error\n\t)\n\n\tfile, err = xdgConfigHome(\"config\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// xdg config not exist, use ~/.gitconfig\n\tif _, err := os.Stat(file); err != nil {\n\t\tfile, err = expendHome(\".gitconfig\")\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn file, nil\n}", "func getConfigFilePath() string {\n\t// get current system user\n\tu, err := user.Current()\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn path.Join(u.HomeDir, configFileName)\n}", "func GetConfDir(lc logger.LoggingClient, configDir string) string {\n\tenvValue := os.Getenv(envConfDir)\n\tif len(envValue) > 0 {\n\t\tconfigDir = envValue\n\t\tlogEnvironmentOverride(lc, \"-c/-confdir\", envConfDir, envValue)\n\t}\n\n\tif len(configDir) == 0 {\n\t\tconfigDir = defaultConfDirValue\n\t}\n\n\treturn configDir\n}", "func findConfigDir() (string, error) {\n\n\tvar p []string = []string{} // The directories in which we looked, for error message\n\tvar configDir string // The directory in which we found the configuration\n\n\t// Look for the developer's private configuration.\n\tif dir, err := os.Getwd(); err == nil {\n\t\tcfgdir := path.Join(dir, kCONFIG_DIR_DEV)\n\t\tcfgpath := path.Join(cfgdir, kCONFIG_FILENAME)\n\t\tp = append(p, cfgdir)\n\t\tif _, err := os.Stat(cfgpath); err == nil {\n\t\t\tconfigDir = cfgdir\n\t\t}\n\t}\n\n\t// If not found, look for the production configuration.\n\tif configDir == \"\" {\n\t\tcfgdir := kCONFIG_DIR_PROD\n\t\tcfgpath := path.Join(cfgdir, kCONFIG_FILENAME)\n\t\tp = append(p, cfgdir)\n\t\tif _, err := os.Stat(cfgpath); err == nil {\n\t\t\tconfigDir = cfgdir\n\t\t}\n\t}\n\n\t// Report an error if no configuration file was found.\n\tif configDir == \"\" {\n\t\treturn \"\", fmt.Errorf(\"Unable to locate configuration file %q in path %s\",\n\t\t\tkCONFIG_FILENAME, strings.Join(p, \":\"))\n\t} else {\n\t\treturn configDir, nil\n\t}\n}", "func (a AppConfig) ConfigDir() string {\n\tconfigDirMutex.RLock()\n\tdefer configDirMutex.RUnlock()\n\treturn configDir\n}", "func UserConfigDir(tc Context) (string, error) {\n\treturn os.UserConfigDir()\n}", "func (b *Bucket) ConfDir() string {\n\treturn b.conf.Dir\n}", "func (u *userGitConfig) GetUserConfigBasePath() string {\n\treturn strings.TrimLeft(u.basePath, \"/\")\n}", "func ConfigDirectory() (dir string, e error) {\r\n\tdir, e = SystemDirectory()\r\n\tif s, ok := os.LookupEnv(\"PROGRAMDATA\"); ok {\r\n\t\tdir, e = s, nil\r\n\t}\r\n\treturn\r\n}", "func GetGameConfigFolder() string {\n\treturn filepath.Join(GetConfigFolder(), \"configs\")\n}", "func findGitDir(dir string) (string, error) {\n\tvar err error\n\n\tdir, err = absPath(dir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor {\n\t\t// Check if is in a bare repo\n\t\tif isGitDir(dir) {\n\t\t\treturn dir, nil\n\t\t}\n\n\t\t// Check .git\n\t\tgitdir := filepath.Join(dir, \".git\")\n\t\tfi, err := os.Stat(gitdir)\n\t\tif err != nil {\n\t\t\t// Test parent dir\n\t\t\toldDir := dir\n\t\t\tdir = filepath.Dir(dir)\n\t\t\tif oldDir == dir {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcontinue\n\t\t} else if fi.IsDir() {\n\t\t\tif isGitDir(gitdir) {\n\t\t\t\treturn gitdir, nil\n\t\t\t}\n\t\t\treturn \"\", fmt.Errorf(\"corrupt git dir: %s\", gitdir)\n\t\t} else {\n\t\t\tf, err := os.Open(gitdir)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", fmt.Errorf(\"cannot open gitdir file '%s'\", gitdir)\n\t\t\t}\n\t\t\tdefer f.Close()\n\t\t\treader := bufio.NewReader(f)\n\t\t\tline, err := reader.ReadString('\\n')\n\t\t\tif strings.HasPrefix(line, \"gitdir:\") {\n\t\t\t\trealgit := strings.TrimSpace(strings.TrimPrefix(line, \"gitdir:\"))\n\t\t\t\tif !filepath.IsAbs(realgit) {\n\t\t\t\t\trealgit, err = absJoin(filepath.Dir(gitdir), realgit)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn \"\", err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif isGitDir(realgit) {\n\t\t\t\t\treturn realgit, nil\n\t\t\t\t}\n\t\t\t\treturn \"\", fmt.Errorf(\"gitdir '%s' points to corrupt git repo: %s\", gitdir, realgit)\n\t\t\t}\n\t\t\treturn \"\", fmt.Errorf(\"bad gitdir file '%s'\", gitdir)\n\t\t}\n\t}\n\treturn \"\", ErrNotInGitDir\n}", "func pluginCNIConfDir(conf *operv1.NetworkSpec) string {\n\tif *conf.DisableMultiNetwork {\n\t\treturn SystemCNIConfDir\n\t}\n\treturn MultusCNIConfDir\n}", "func getAppDir() (string, error) {\n\tusr, _ := user.Current()\n\tdir := path.Join(usr.HomeDir, \".lntop\")\n\t_, err := os.Stat(dir)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\terr := os.Mkdir(dir, 0700)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\terr = ioutil.WriteFile(dir+\"/config.toml\",\n\t\t\t\t[]byte(DefaultFileContent()), 0644)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t} else {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn dir, nil\n}", "func GetConfigFilePath() string {\n\tpath, _ := osext.ExecutableFolder()\n\tpath = fmt.Sprintf(\"%s/eremetic.yml\", path)\n\tif _, err := os.Open(path); err == nil {\n\t\treturn path\n\t}\n\tglobalPath := \"/etc/eremetic/eremetic.yml\"\n\tif _, err := os.Open(globalPath); err == nil {\n\t\treturn globalPath\n\t}\n\n\treturn \"\"\n}", "func findGitRoot(dir string) string {\n\torig := dir\n\tfor dir != \"\" && dir != \".\" && dir != \"/\" {\n\t\t_, err := os.Stat(filepath.Join(dir, \".git\"))\n\t\tif err == nil {\n\t\t\t// Found dir/.git, return dir.\n\t\t\treturn dir\n\t\t} else if !os.IsNotExist(err) {\n\t\t\t// Error finding .git, return original input.\n\t\t\treturn orig\n\t\t}\n\t\tdir, _ = filepath.Split(dir)\n\t\tdir = strings.TrimSuffix(dir, \"/\")\n\t}\n\treturn orig\n}", "func ConfigDirs() []string {\n\treturn []string{}\n}", "func (g *Generator) ConfigWorkingDir() string {\n\treturn g.image.Config.WorkingDir\n}", "func getMcConfigPath() (string, error) {\n\tdir, err := getMcConfigDir()\n\tif err != nil {\n\t\treturn \"\", NewIodine(iodine.New(err, nil))\n\t}\n\treturn filepath.Join(dir, mcConfigFile), nil\n}", "func getCfgPath(name string) string {\n\treturn configDir + \"/\" + name + \".conf\"\n}", "func configPath() string {\n\tenv := os.Getenv(configPathEnv)\n\tif env == \"\" {\n\t\treturn defaultConfigFile\n\t}\n\treturn env\n}", "func mustGetMcConfigDir() (configDir string) {\n\tconfigDir, err := getMcConfigDir()\n\tif err != nil {\n\t\tconsole.Fatalf(\"Unable to determine default configuration folder. %s\\n\", NewIodine(iodine.New(err, nil)))\n\t}\n\treturn configDir\n}", "func CephConfFilePath(configDir, clusterName string) string {\n\tconfFile := fmt.Sprintf(\"%s.config\", clusterName)\n\treturn path.Join(configDir, clusterName, confFile)\n}", "func (c *Config) commonWorkspaces() []string {\n\treturn []string{\n\t\tc.tastWorkspace(), // shared code\n\t\t\"/usr/lib/gopath\", // system packages\n\t}\n}", "func GetPathRelativeToConfig(configuration *viper.Viper, key string) string {\n\tconfigFile := configuration.ConfigFileUsed()\n\tp := configuration.GetString(key)\n\tif p == \"\" || filepath.IsAbs(p) {\n\t\treturn p\n\t}\n\treturn filepath.Clean(filepath.Join(filepath.Dir(configFile), p))\n}", "func (g GlobalCfg) RepoConfigFile(repoID string) string {\n\trepo := g.MatchingRepo(repoID)\n\tif repo != nil && repo.RepoConfigFile != \"\" {\n\t\treturn repo.RepoConfigFile\n\t}\n\treturn DefaultAtlantisFile\n}", "func DetermineConfigFilePath() string {\n\tdir := util.ExeDirectory()\n\tr := lookForConfigFile(dir)\n\tif len(r) != 0 {\n\t\treturn r\n\t}\n\n\tdir, err := homedir.Dir()\n\tif err != nil {\n\t\tlog.Printf(\"failed to get home dir: %v\\n\", err)\n\t\treturn \"\"\n\t}\n\treturn lookForConfigFile(dir)\n}", "func getKubeConfigPath() (string, error) {\n\tif envPath, ok := os.LookupEnv(\"KUBECONFIG\"); ok {\n\t\treturn envPath, nil\n\t}\n\tme, err := user.Current()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"cannot get current user information: %w\", err)\n\t}\n\treturn path.Join(me.HomeDir, \".kube/config\"), nil\n}", "func GetConfigFile() (string, string) {\n\tdefaultConfig := filepath.Join(file.UserHome(), config.DefaultConfigDir, config.DefaultConfig)\n\tdefaultAppDir, _ := filepath.Split(defaultConfig)\n\treturn defaultAppDir, defaultConfig\n}", "func getConfFile() string {\n\tconf := \"Chrystoki.conf\"\n\tconfPath := os.Getenv(\"ChrystokiConfigurationPath\")\n\tif confPath == \"\" {\n\t\tconfPath = \"/etc\"\n\t}\n\treturn filepath.Join(confPath, conf)\n}", "func getConfFilePath(profile string) string {\n\tpwd, e := os.Getwd()\n\tutil.LogPanic(e)\n\n\tpath := pwd + defaultConfPath\n\tif profile != \"\" {\n\t\tpath += \"-\" + profile\n\t}\n\treturn path + defaultConfFilesSuffix\n}", "func GetGitDirs(logger *zap.Logger, cdUp string, exitCode int, pwd string) (string, string) {\n\tif exitCode != 0 {\n\t\treturn \"\", \"\"\n\t}\n\tabsPath := filepath.Clean(filepath.Join(pwd, cdUp))\n\trealPath, err := filepath.EvalSymlinks(absPath)\n\tif err != nil {\n\t\tlogger.Error(\"Error while handling git dir paths\", zap.Error(err))\n\t\treturn \"\", \"\"\n\t}\n\treturn absPath, realPath\n}", "func getBasePath() string {\r\n\tdir, err := os.Getwd()\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\tfor _, err := ioutil.ReadFile(filepath.Join(dir, \"go.mod\")); err != nil && len(dir) > 1; {\r\n\t\tdir = filepath.Dir(dir)\r\n\t\t_, err = ioutil.ReadFile(filepath.Join(dir, \"go.mod\"))\r\n\t}\r\n\tif len(dir) < 2 {\r\n\t\tpanic(\"No go.mod found\")\r\n\t}\r\n\treturn dir\r\n}", "func Dir() (string, error) {\n\tc, err := os.UserConfigDir()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(c, appName), nil\n}", "func getProjectRoot(t *testing.T) (rootPath string) {\n\troot, err := os.Getwd()\n\trequire.NoError(t, err, \"could not get current working directory\")\n\tfor root != \"/\" { // Walk up path to find dir containing go.mod\n\t\tif _, err := os.Stat(filepath.Join(root, \"go.mod\")); os.IsNotExist(err) {\n\t\t\troot = filepath.Dir(root)\n\t\t} else {\n\t\t\treturn root\n\t\t}\n\t}\n\tt.Fatal(\"could not find project root\")\n\treturn\n}", "func GetConfigPath(configPath string) string {\n\tif configPath == \"docker\" {\n\t\treturn \"./config/config-docker\"\n\t}\n\treturn \"./config/config-local\"\n}", "func Dir() string {\n\tsrcdir := os.Getenv(\"TEST_SRCDIR\")\n\treturn filepath.Join(\n\t\tsrcdir, os.Getenv(\"TEST_WORKSPACE\"),\n\t\t\"go\", \"tools\", \"gazelle\", \"testdata\",\n\t)\n}", "func (cc *CollectdConfig) ManagedConfigDir() string {\n\treturn filepath.Join(cc.InstanceConfigDir(), \"managed_config\")\n}", "func FindRootRepoPath() (string, error) {\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Error getting pwd: \", err)\n\t\tos.Exit(1)\n\t}\n\n\tparts := strings.SplitAfter(pwd, string(os.PathSeparator))\n\tfor i, _ := range parts {\n\t\ttestPath := path.Join(parts[:i+1]...)\n\t\tif IsRepo(testPath) {\n\t\t\treturn testPath, nil\n\t\t}\n\t}\n\n\t// Return pwd in case we're cloning into pwd.\n\treturn pwd, fmt.Errorf(\"No .git found in %s or any parent dir.\", pwd)\n}", "func getConfigFilePath() (string, error) {\n var err error\n configPath := configdir.LocalConfig(\"road-trip\")\n err = configdir.MakePath(configPath) // Ensure it exists.\n if err != nil {\n return \"\", errors.New(\"Cannot access folder: '\" + configPath + \"' to store config file.\")\n }\n\n return configPath + string(os.PathSeparator) + \"player.yaml\", nil\n}", "func (config *Config) WorkingDir() string {\n\tconfigPath, err := filepath.Abs(config.configPath)\n\tif err != nil {\n\t\tlog.Fatalf(\"config error: unable to find config file (%s):\\n%v\\n\", config.configPath, err)\n\t}\n\n\tconfigPath, err = filepath.EvalSymlinks(configPath)\n\tif err != nil {\n\t\tlog.Fatalf(\"config error: unable to remove symbolic links for filepath (%s):\\n%v\\n\", configPath, err)\n\t}\n\n\treturn filepath.Join(filepath.Dir(configPath), config.Path)\n}", "func getRepoPath() (string, error) {\n\t// Set default base path and directory name\n\tdirectoryName := \".saturn\"\n\n\t// Join the path and directory name, then expand the home path\n\tfullPath, err := homedir.Expand(filepath.Join(\"~\", directoryName))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Return the shortest lexical representation of the path\n\treturn filepath.Clean(fullPath), nil\n}", "func configDiffs(base string) (map[string]struct{}, error) {\n\tbase, err := filepath.Abs(base)\n\tif !strings.HasSuffix(base, string(filepath.Separator)) {\n\t\tbase += string(filepath.Separator)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := runCmd(base, \"git\", \"rev-parse\", \"--show-toplevel\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgitroot := strings.Trim(string(out), \"\\n\")\n\tlog.Infof(\"config git root %s\", gitroot)\n\n\tresults := map[string]struct{}{}\n\tout, err = runCmd(base, \"git\", \"status\", \"--porcelain\", \"-uall\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tscanner := bufio.NewScanner(bytes.NewReader(out))\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tif !statusPattern.MatchString(line) {\n\t\t\tcontinue\n\t\t}\n\t\tpath := filepath.Join(gitroot, line[3:])\n\t\tif !strings.HasPrefix(path, base) {\n\t\t\tcontinue\n\t\t}\n\t\tresults[path[len(base):]] = struct{}{}\n\t}\n\treturn results, nil\n}", "func Path(sys string) (string, error) {\n\n\tvar paths []string\n\n\t// if CHEAT_CONFIG_PATH is set, return it\n\tif os.Getenv(\"CHEAT_CONFIG_PATH\") != \"\" {\n\n\t\t// expand ~\n\t\texpanded, err := homedir.Expand(os.Getenv(\"CHEAT_CONFIG_PATH\"))\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to expand ~: %v\", err)\n\t\t}\n\n\t\treturn expanded, nil\n\n\t\t// OSX config paths\n\t} else if sys == \"darwin\" {\n\n\t\tpaths = []string{\n\t\t\tpath.Join(os.Getenv(\"XDG_CONFIG_HOME\"), \"/cheat/conf.yml\"),\n\t\t\tpath.Join(os.Getenv(\"HOME\"), \".config/cheat/conf.yml\"),\n\t\t\tpath.Join(os.Getenv(\"HOME\"), \".cheat/conf.yml\"),\n\t\t}\n\n\t\t// Linux config paths\n\t} else if sys == \"linux\" {\n\n\t\tpaths = []string{\n\t\t\tpath.Join(os.Getenv(\"XDG_CONFIG_HOME\"), \"/cheat/conf.yml\"),\n\t\t\tpath.Join(os.Getenv(\"HOME\"), \".config/cheat/conf.yml\"),\n\t\t\tpath.Join(os.Getenv(\"HOME\"), \".cheat/conf.yml\"),\n\t\t\t\"/etc/cheat/conf.yml\",\n\t\t}\n\n\t\t// Windows config paths\n\t} else if sys == \"windows\" {\n\n\t\tpaths = []string{\n\t\t\tfmt.Sprintf(\"%s/cheat/conf.yml\", os.Getenv(\"APPDATA\")),\n\t\t\tfmt.Sprintf(\"%s/cheat/conf.yml\", os.Getenv(\"PROGRAMDATA\")),\n\t\t}\n\n\t\t// Unsupported platforms\n\t} else {\n\t\treturn \"\", fmt.Errorf(\"unsupported os: %s\", sys)\n\t}\n\n\t// check if the config file exists on any paths\n\tfor _, p := range paths {\n\t\tif _, err := os.Stat(p); err == nil {\n\t\t\treturn p, nil\n\t\t}\n\t}\n\n\t// we can't find the config file if we make it this far\n\treturn \"\", fmt.Errorf(\"could not locate config file\")\n}", "func Init() string {\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\terr = os.Mkdir(constants.GitDir, 0755)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn filepath.Join(dir, constants.GitDir)\n}", "func FindProjectConfigFilePath() (string, error) {\n\tif cwd, err := os.Getwd(); err == nil {\n\t\tvar configFilePath string\n\t\tfor cwd != \".\" && cwd != string(filepath.Separator) {\n\t\t\tfor _, filename := range [2]string{\"outrigger.yml\", \".outrigger.yml\"} {\n\t\t\t\tconfigFilePath = filepath.Join(cwd, filename)\n\t\t\t\tif _, e := os.Stat(configFilePath); !os.IsNotExist(e) {\n\t\t\t\t\treturn configFilePath, nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcwd = filepath.Dir(cwd)\n\t\t}\n\t} else {\n\t\treturn \"\", err\n\t}\n\n\treturn \"\", errors.New(\"no outrigger configuration file found\")\n}", "func appConfigFilePaths(ctx context.Context) (paths []string) {\n\tif p := flag.GetAppConfigFilePath(ctx); p != \"\" {\n\t\tpaths = append(paths, p, filepath.Join(p, app.DefaultConfigFileName))\n\n\t\treturn\n\t}\n\n\twd := state.WorkingDirectory(ctx)\n\tpaths = append(paths, filepath.Join(wd, app.DefaultConfigFileName))\n\n\treturn\n}", "func getProjectRoot() (string, error) {\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error getting pwd: %s\", err)\n\t}\n\tfor {\n\t\tparent, name := filepath.Split(dir)\n\t\tif name == \"acr-builder\" {\n\t\t\tbreak\n\t\t}\n\t\tparent = filepath.Clean(parent)\n\t\tif parent == \"\" {\n\t\t\tpanic(\"no acr-builder directory find on pwd\")\n\t\t}\n\t\tdir = parent\n\t}\n\treturn dir, nil\n}", "func getCredentialsFilename(xdgCongifHome string) (string, error) {\n\tif xdgCongifHome == \"\" {\n\t\thome, err := os.UserHomeDir()\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrapf(err, \"no $XDG_CONFIG_HOME set and failed to find user home directory\")\n\t\t}\n\t\treturn filepath.Join(home, \".git-credentials\"), nil\n\t}\n\n\twriteDirectory := filepath.Join(xdgCongifHome, \"git\")\n\texists, err := files.DirExists(writeDirectory)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"failed to check if directory %s exists\", writeDirectory)\n\t}\n\tif !exists {\n\t\terr = os.MkdirAll(writeDirectory, os.ModePerm)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrapf(err, \"failed to create directory %s\", writeDirectory)\n\t\t}\n\t}\n\treturn filepath.Join(xdgCongifHome, \"git\", \"credentials\"), nil\n}", "func ConfigPath() string {\n\treturn defaultConfig.ConfigPath()\n}", "func (v Repository) RepoDir() string {\n\tif path.IsDir(v.DotGit) {\n\t\treturn v.DotGit\n\t}\n\treturn v.GitDir\n}", "func (b *Bucket) Cwd() string {\n\treturn b.cwd\n}", "func getConfigFile() (string, error) {\n\tconfigDir, err := os.UserConfigDir()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tappDir := configDir + \"/whatphone\"\n\tif _, err := os.Stat(appDir); os.IsNotExist(err) {\n\t\terr = os.Mkdir(appDir, 0744)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn appDir + \"/config.json\", nil\n}", "func GetClusterConfigPath(cluster string) (string, error) {\n\thome, err := GetHomeDir()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn home + \"/.kcm/\" + cluster + \"/config\", nil\n}", "func GetCommonEnvironmentConfigurations() map[string]string {\n\n\tenvs := make(map[string]string)\n\tenvs[\"PATH\"], envs[\"USERNAME\"] = os.Getenv(\"PATH\"), os.Getenv(\"USERNAME\")\n\treturn envs\n}", "func GetConfigPath() (p string, err error) {\n\td, err := GetPassDir()\n\tif err == nil {\n\t\tp = filepath.Join(d, ConfigFileName)\n\t}\n\treturn\n}", "func SystemConfigFile() string {\n\tfile := os.Getenv(gitSystemConfigEnv)\n\tif file == \"\" {\n\t\tfile = \"/etc/gitconfig\"\n\t}\n\treturn file\n}", "func PathConfig(homeDir string) string {\n\treturn fmt.Sprintf(\"%s/%s\", homeDir, ConfigSubDir)\n}", "func gitPath() (string, error) {\n\tgitOnce.Do(func() {\n\t\tpath, err := exec.LookPath(\"git\")\n\t\tif err != nil {\n\t\t\tgitOnce.err = err\n\t\t\treturn\n\t\t}\n\t\tif runtime.GOOS == \"plan9\" {\n\t\t\tgitOnce.err = errors.New(\"plan9 git does not support the full git command line\")\n\t\t}\n\t\tgitOnce.path = path\n\t})\n\n\treturn gitOnce.path, gitOnce.err\n}", "func mockConfigDir() (string, error) {\n\treturn \"/tmp/CONFIG/datamaps/\", nil\n}" ]
[ "0.7268661", "0.698559", "0.67048705", "0.6670058", "0.64420474", "0.6358523", "0.6318514", "0.6305134", "0.629191", "0.6254382", "0.6215227", "0.6206227", "0.6198722", "0.6184039", "0.6167576", "0.61568576", "0.61452913", "0.6124343", "0.6116592", "0.6085793", "0.6062132", "0.60501266", "0.6042316", "0.60039276", "0.5998813", "0.595658", "0.5938422", "0.59223557", "0.5919555", "0.5913251", "0.5911764", "0.5886241", "0.58841753", "0.5876622", "0.58485204", "0.58387864", "0.5834103", "0.58278495", "0.58263355", "0.5825209", "0.58097047", "0.5807246", "0.578959", "0.5770676", "0.57529277", "0.5748568", "0.57377946", "0.5734286", "0.57284105", "0.5699466", "0.56926566", "0.5685172", "0.5674379", "0.5658118", "0.56543297", "0.56537694", "0.5652347", "0.56451637", "0.5637511", "0.5623499", "0.5622359", "0.56016225", "0.5591058", "0.558569", "0.5576658", "0.5575515", "0.55639094", "0.55631447", "0.55540335", "0.5549037", "0.55380523", "0.55358034", "0.5535442", "0.5532033", "0.5531434", "0.55282867", "0.5521389", "0.55013937", "0.54998577", "0.5483058", "0.54793096", "0.5477189", "0.54701495", "0.5453123", "0.5447785", "0.544479", "0.5442076", "0.543852", "0.5436953", "0.5427209", "0.54263026", "0.5420562", "0.5413273", "0.54105693", "0.54077756", "0.5403355", "0.53929144", "0.5392701", "0.5391471", "0.53881663" ]
0.7390736
0
IsBare indicates a repository is a bare repository.
func (v Repository) IsBare() bool { return v.workDir == "" }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IsBareRepository(path string) bool {\n\n\tcmd := exec.Command(\"git\", fmt.Sprintf(\"--git-dir=%s\", path), \"rev-parse\", \"--is-bare-repository\")\n\tbody, err := cmd.Output()\n\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tstatus := strings.Trim(string(body), \"\\n \")\n\treturn status == \"true\"\n}", "func IsLocalNonBareGitRepository(fs fs.FileSystem, dir string) (bool, error) {\n\t_, err := fs.Stat(filepath.Join(dir, \".git\"))\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}", "func cloneBareRepository(remote string, dest string) error {\n\tcmd := exec.Command(\"git\", \"clone\", \"--bare\", remote, dest)\n\terr := cmd.Run()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func CloneBare(URL string) (Repository, error) {\n\t// Git objects storer based on memory\n\tstorer := memory.NewStorage()\n\n\trepo, err := git.Clone(storer, nil, &git.CloneOptions{\n\t\tURL: URL,\n\t\tTags: git.TagMode(2),\n\t})\n\tif err != nil {\n\t\treturn Repository{}, err\n\t}\n\treturn Repository{repo}, nil\n}", "func InitBareRepository(path string) (*Repository, error) {\n\n\tpath, err := filepath.Abs(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not determine absolute path: %v\", err)\n\t}\n\n\tcmd := exec.Command(\"git\", \"init\", \"--bare\", path)\n\terr = cmd.Run()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Repository{Path: path}, nil\n}", "func setupBareGitRepo(t *testing.T) *repositories.GitRepository {\n\tt.Helper()\n\tassert := assert.New(t)\n\n\trepoDir, err := ioutil.TempDir(\"\", \"rb-gateway-bare-repo-\")\n\tassert.Nil(err)\n\n\t_, err = git.PlainInit(repoDir, true)\n\tassert.Nil(err)\n\n\treturn &repositories.GitRepository{\n\t\tRepositoryInfo: repositories.RepositoryInfo{\n\t\t\tName: \"upstream\",\n\t\t\tPath: repoDir,\n\t\t},\n\t}\n}", "func LocalNonBareGitRepositoryIsEmpty(fs fs.FileSystem, dir string) (bool, error) {\n\tgitPath := filepath.Join(dir, \".git\")\n\n\tfi, err := fs.Stat(gitPath)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif !fi.IsDir() {\n\t\tgitPath, err = followGitSubmodule(fs, gitPath)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\t// Search for any file in .git/{objects,refs}. We don't just search the\n\t// base .git directory because of the hook samples that are normally\n\t// generated with `git init`\n\tfound := false\n\tfor _, dir := range []string{\"objects\", \"refs\"} {\n\t\terr := fs.Walk(filepath.Join(gitPath, dir), func(path string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif !info.IsDir() {\n\t\t\t\tfound = true\n\t\t\t}\n\n\t\t\tif found {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tif found {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\treturn true, nil\n}", "func BareMetal(name string) bool {\n\treturn name == None || name == Mock\n}", "func (r *Repo) IsRaw() (res bool) {\n\treturn r.WorkDir == \"\"\n}", "func IsRepo() bool {\n\treturn run.Silent(\"git rev-parse --git-dir >/dev/null 2>&1\")\n}", "func (this *Tidy) Bare(val bool) (bool, error) {\n\treturn this.optSetBool(C.TidyMakeBare, cBool(val))\n}", "func IsRepo() bool {\n\tout, err := Run(\"rev-parse\", \"--is-inside-work-tree\")\n\treturn err == nil && strings.TrimSpace(out) == \"true\"\n}", "func (p *cliModules) IsBoringBinary() bool {\n\treturn false\n}", "func (a *Application) checkRepoIsReal(name ...string) bool {\n\tvar fullname string\n\tswitch len(name) {\n\tcase 1:\n\t\tfullname = strings.TrimSpace(name[0])\n\t\tif fullname == \"\" || fullname == \"/\" {\n\t\t\treturn false\n\t\t}\n\tcase 2:\n\t\torg := strings.TrimSpace(name[0])\n\t\trepo := strings.TrimSpace(name[1])\n\t\tif org == \"\" || repo == \"\" {\n\t\t\treturn false\n\t\t}\n\t\tfullname = u.Format(\"%s/%s\", name[0], name[1])\n\tdefault:\n\t\tpanic(\"Youre doing this wrong\")\n\t}\n\turl := u.Format(\"https://github.com/%s\", fullname)\n\tif code, _, _, e := nt.HTTP(nt.HEAD, url, nt.NewHeaderBuilder().GetHeader(), nil); e != nil || code != 200 {\n\t\treturn false\n\t} else {\n\t\treturn true\n\t}\n}", "func (g *GitDriver) IsOpen() bool {\n\tif g.Repository == nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func RepositoryBase_IsResource(construct awscdk.IConstruct) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_ecr.RepositoryBase\",\n\t\t\"isResource\",\n\t\t[]interface{}{construct},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func (r *Repo) IsGitHubRepo() bool { return strings.HasPrefix(r.URI, \"github.com/\") }", "func (a *Address) Bare() *Address {\n\tb := *a\n\tb.ResourcePart = \"\"\n\treturn &b\n}", "func (cmd InspectCmd) RequiresRepo() bool {\n\treturn true\n}", "func (jid JID) Bare() JID {\n\tif i := strings.Index(string(jid), \"/\"); i != -1 {\n\t\treturn jid[0:i]\n\t}\n\treturn jid\n}", "func (s *GitlabSCM) RepositoryIsEmpty(ctx context.Context, opt *RepositoryOptions) bool {\n\t// TODO no implementation provided yet\n\treturn false\n}", "func (handler *InitHandler) isUsingRepositoryMember(r Repository) bool {\n\treturn len(r.Managers) > 0 || len(r.Developers) > 0 || len(r.Viewers) > 0 || len(r.Reporters) > 0\n}", "func (cmd ConfigCmd) RequiresRepo() bool {\n\treturn false\n}", "func (r *Repo) IsClean() (res bool, lines StatLines) {\n\tlines = r.mapStatus()\n\tres = len(lines) == 0\n\treturn\n}", "func Repository_IsResource(construct awscdk.IConstruct) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_ecr.Repository\",\n\t\t\"isResource\",\n\t\t[]interface{}{construct},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func RepositoryHasStarterJSON(repoDirectory string) bool {\n\t_, err := os.Stat(repoDirectory)\n\tif err != nil || os.IsNotExist(err) {\n\t\treturn false\n\t}\n\t_, err = os.Stat(repoDirectory + \"starter.json\")\n\tif err != nil || os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}", "func GitRepositoryReady(repository GitRepository, artifact Artifact, url, reason, message string) GitRepository {\n\trepository.Status.Artifact = &artifact\n\trepository.Status.URL = url\n\tmeta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message)\n\treturn repository\n}", "func IsGitRepository(path string) bool {\n\tvar out bytes.Buffer\n\tcmd := exec.Command(\"git\", \"-C\", options.Dir, \"rev-parse\", \"--is-inside-work-tree\")\n\tcmd.Stdout = &out\n\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Println(\"ERROR\", err)\n\t\treturn false\n\t}\n\n\tvar val bool\n\t_, err = fmt.Sscanf(out.String(), \"%t\", &val)\n\tif err != nil {\n\t\tlog.Println(\"ERROR\", err)\n\t\treturn false\n\t}\n\n\treturn val\n}", "func GitRepositoryNotReady(repository GitRepository, reason, message string) GitRepository {\n\tmeta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message)\n\treturn repository\n}", "func isTestgroundRepo(path string) bool {\n\tfi, err := os.Stat(path)\n\tif err != nil || !fi.IsDir() {\n\t\treturn false\n\t}\n\tf, err := os.Open(filepath.Join(path, \"go.mod\"))\n\tif err != nil {\n\t\treturn false\n\t}\n\ts := bufio.NewScanner(f)\n\tif !s.Scan() {\n\t\treturn false\n\t}\n\treturn s.Text() == gomodHeader\n}", "func isTestgroundRepo(path string) bool {\n\tfi, err := os.Stat(path)\n\tif err != nil || !fi.IsDir() {\n\t\treturn false\n\t}\n\tf, err := os.Open(filepath.Join(path, \"go.mod\"))\n\tif err != nil {\n\t\treturn false\n\t}\n\ts := bufio.NewScanner(f)\n\tif !s.Scan() {\n\t\treturn false\n\t}\n\treturn s.Text() == gomodHeader\n}", "func (s *AutograderService) IsEmptyRepo(ctx context.Context, in *pb.RepositoryRequest) (*pb.Void, error) {\n\tusr, scm, err := s.getUserAndSCMForCourse(ctx, in.GetCourseID())\n\tif err != nil {\n\t\ts.logger.Errorf(\"IsEmptyRepo failed: scm authentication error: %w\", err)\n\t\treturn nil, err\n\t}\n\n\tif !s.isTeacher(usr.GetID(), in.GetCourseID()) {\n\t\ts.logger.Error(\"IsEmptyRepo failed: user is not teacher\")\n\t\treturn nil, status.Errorf(codes.PermissionDenied, \"only teachers can access repository info\")\n\t}\n\n\tif err := s.isEmptyRepo(ctx, scm, in); err != nil {\n\t\ts.logger.Errorf(\"IsEmptyRepo failed: %w\", err)\n\t\tif contextCanceled(ctx) {\n\t\t\treturn nil, status.Error(codes.FailedPrecondition, ErrContextCanceled)\n\t\t}\n\t\tif ok, parsedErr := parseSCMError(err); ok {\n\t\t\treturn nil, parsedErr\n\t\t}\n\t\treturn nil, status.Errorf(codes.FailedPrecondition, \"group repository does not exist or not empty\")\n\t}\n\n\treturn &pb.Void{}, nil\n}", "func TestBareCommand(t *testing.T) {\n\n\t// Run a blank command\n\toutput := executeCommand()\n\n\t// We should have a subcommand required command and a complete usage dump\n\trequire.NotNil(t, executeError, \"there should have been an error\")\n\trequire.Equal(t, \"subcommand is required\", executeError.Error(), \"Expected subcommand required error\")\n\trequire.Contains(t, output,\n\t\t\"Slog is a CLI utility for reading and culling web access logs stored in S3\",\n\t\t\"Expected full usage display\")\n}", "func (j JID) Bare() JID {\n\treturn JID{\n\t\tlocallen: j.locallen,\n\t\tdomainlen: j.domainlen,\n\t\tdata: j.data[:j.domainlen+j.locallen],\n\t}\n}", "func RepositoryBase_IsConstruct(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_ecr.RepositoryBase\",\n\t\t\"isConstruct\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func NewBare() *Proxier {\n\tp := &Proxier{}\n\tp.proxySources = map[proxy.ProxySource]bool{}\n\tp.ProxyTimeout = DefaultProxyDBTimeout\n\treturn p\n}", "func Repository_IsConstruct(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_ecr.Repository\",\n\t\t\"isConstruct\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func isMainBranch(br string) bool {\n\treturn br == \"master\" || strings.HasPrefix(br, \"dev.\")\n}", "func RepositoryHasPrefix(r kappnavv1.Repository, prefix string) bool {\n\treturn strings.HasPrefix(string(r), prefix)\n}", "func NewBaremetalCommand(cfgFactory config.Factory) *cobra.Command {\n\toptions := &CommonOptions{}\n\tbaremetalRootCmd := &cobra.Command{\n\t\tUse: \"baremetal\",\n\t\tShort: \"Perform actions on baremetal hosts\",\n\t}\n\n\tbaremetalRootCmd.AddCommand(NewEjectMediaCommand(cfgFactory, options))\n\tbaremetalRootCmd.AddCommand(NewPowerOffCommand(cfgFactory, options))\n\tbaremetalRootCmd.AddCommand(NewPowerOnCommand(cfgFactory, options))\n\tbaremetalRootCmd.AddCommand(NewPowerStatusCommand(cfgFactory, options))\n\tbaremetalRootCmd.AddCommand(NewRebootCommand(cfgFactory, options))\n\tbaremetalRootCmd.AddCommand(NewRemoteDirectCommand(cfgFactory, options))\n\n\treturn baremetalRootCmd\n}", "func TestNewRepoOwnerKlone(t *testing.T) {\n\tpath := fmt.Sprintf(\"%s/klone-e2e-empty\", local.Home())\n\n\trepo, err := GitServer.GetRepoByOwner(GitServer.OwnerName(), \"klone-e2e-empty\")\n\tif err != nil && !strings.Contains(err.Error(), \"404 Not Found\") {\n\t\tt.Fatalf(\"Unable to attempt to search for repo: %v\", err)\n\t}\n\tif repo != nil && repo.Owner() == GitServer.OwnerName() {\n\t\t_, err := GitServer.DeleteRepo(\"klone-e2e-empty\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Unable to delete repo: %v\", err)\n\t\t}\n\t}\n\trepo, err = GitServer.NewRepo(\"klone-e2e-empty\", \"A throw-away repository created by Klone (@kris-nova)\")\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to create new repo: %v\", err)\n\t}\n\terr = IdempotentKlone(path, \"klone-e2e-empty\")\n\tif err != nil {\n\t\tt.Fatalf(\"Error kloning: %v\", err)\n\t}\n\tr, err := git.PlainOpen(path)\n\tif err != nil {\n\t\tt.Fatalf(\"Error opening path: %v\", err)\n\t}\n\tremotes, err := r.Remotes()\n\tif err != nil {\n\t\tt.Fatalf(\"Error reading remotes: %v\", err)\n\t}\n\toriginOk := false\n\tfor _, remote := range remotes {\n\t\trspl := strings.Split(remote.String(), \"\\t\")\n\t\tif len(rspl) < 3 {\n\t\t\tt.Fatalf(\"Invalid remote string: %s\", remote.String())\n\t\t}\n\t\tname := rspl[0]\n\t\turl := rspl[1]\n\t\tif strings.Contains(name, \"origin\") && strings.Contains(url, fmt.Sprintf(\"[email protected]:%s/klone-e2e-empty.git\", GitServer.OwnerName())) {\n\t\t\toriginOk = true\n\t\t}\n\t\t//fmt.Println(name, url)\n\t}\n\tif originOk == false {\n\t\tt.Fatal(\"Error detecting remote [origin]\")\n\t}\n}", "func (s RepoSpec) IsZero() bool { return s.URI == \"\" }", "func (g *GitLocal) IsShallow(dir string) (bool, error) {\n\treturn g.GitCLI.IsShallow(dir)\n}", "func IsRepo(repoPath string) bool {\n\trp := path.Join(repoPath, \".git\")\n\tinfo, err := os.Stat(rp)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn info.IsDir()\n}", "func (cmd LoginCmd) RequiresRepo() bool {\n\treturn false\n}", "func (v Repository) Raw() *git.Repository {\n\tvar (\n\t\terr error\n\t)\n\n\tif v.raw != nil {\n\t\treturn v.raw\n\t}\n\n\tv.raw, err = git.PlainOpen(v.CommonDir())\n\tif err != nil {\n\t\tlog.Errorf(\"cannot open git repo '%s': %s\", v.RepoDir(), err)\n\t\treturn nil\n\t}\n\treturn v.raw\n}", "func (v Repository) ObjectsRepository() *Repository {\n\tif v.ObjectsGitDir == \"\" {\n\t\treturn nil\n\t}\n\n\treturn &Repository{\n\t\tProject: v.Project,\n\n\t\tDotGit: \"\",\n\t\tGitDir: v.ObjectsGitDir,\n\t\tObjectsGitDir: \"\",\n\n\t\tIsBare: true,\n\t\tRemoteURL: v.RemoteURL,\n\t\tSettings: v.Settings,\n\t\tRemotes: nil,\n\t}\n}", "func (is *ObjectStorage) ValidateRepo(name string) (bool, error) {\n\tif !zreg.FullNameRegexp.MatchString(name) {\n\t\treturn false, zerr.ErrInvalidRepositoryName\n\t}\n\n\t// https://github.com/opencontainers/image-spec/blob/master/image-layout.md#content\n\t// at least, expect at least 3 entries - [\"blobs\", \"oci-layout\", \"index.json\"]\n\t// and an additional/optional BlobUploadDir in each image store\n\t// for objects storage we can not create empty dirs, so we check only against index.json and oci-layout\n\tdir := path.Join(is.rootDir, name)\n\tif fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() {\n\t\treturn false, zerr.ErrRepoNotFound\n\t}\n\n\tfiles, err := is.store.List(context.Background(), dir)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"dir\", dir).Msg(\"unable to read directory\")\n\n\t\treturn false, zerr.ErrRepoNotFound\n\t}\n\n\t//nolint:gomnd\n\tif len(files) < 2 {\n\t\treturn false, zerr.ErrRepoBadVersion\n\t}\n\n\tfound := map[string]bool{\n\t\tispec.ImageLayoutFile: false,\n\t\t\"index.json\": false,\n\t}\n\n\tfor _, file := range files {\n\t\t_, err := is.store.Stat(context.Background(), file)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tfilename, err := filepath.Rel(dir, file)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tfound[filename] = true\n\t}\n\n\tfor k, v := range found {\n\t\tif !v && k != storageConstants.BlobUploadDir {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\tbuf, err := is.store.GetContent(context.Background(), path.Join(dir, ispec.ImageLayoutFile))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tvar il ispec.ImageLayout\n\tif err := json.Unmarshal(buf, &il); err != nil {\n\t\treturn false, err\n\t}\n\n\tif il.Version != ispec.ImageLayoutVersion {\n\t\treturn false, zerr.ErrRepoBadVersion\n\t}\n\n\treturn true, nil\n}", "func isGitRepo (dir string) (bool, error) {\n\tfiles, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tfor _, file := range files {\n\t\tif file.IsDir() && file.Name() == \".git\" {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}", "func (r *RepoRef) IsEmpty() bool {\n\treturn r.URL == \"\" && r.Path == \"\"\n}", "func TestBareCommand(t *testing.T) {\n\n\t// Run a blank command\n\toutput := executeCommand()\n\n\t// We should a complete usage / help dump\n\trequire.Nil(t, executeError, \"there should not have been an error: \", executeError)\n\trequire.Contains(t, output,\n\t\t\"mafia token-code [flags]\",\n\t\t\"Expected usage display\")\n}", "func IsMeta(p string) bool {\n\tbase := path.Base(p)\n\n\t// https://wiki.debian.org/RepositoryFormat#Compression_of_indices\n\tswitch {\n\tcase strings.HasSuffix(base, \".gz\"):\n\t\tbase = base[0 : len(base)-3]\n\tcase strings.HasSuffix(base, \".bz2\"):\n\t\tbase = base[0 : len(base)-4]\n\tcase strings.HasSuffix(base, \".xz\"):\n\t\tbase = base[0 : len(base)-3]\n\tcase strings.HasSuffix(base, \".lzma\"):\n\t\tbase = base[0 : len(base)-5]\n\tcase strings.HasSuffix(base, \".lz\"):\n\t\tbase = base[0 : len(base)-3]\n\t}\n\n\tswitch base {\n\tcase \"Release\", \"Release.gpg\", \"InRelease\":\n\t\treturn true\n\tcase \"Packages\", \"Sources\", \"Index\":\n\t\treturn true\n\t}\n\n\treturn false\n}", "func NewBarePacket(src []byte) *Packet {\n\treturn &Packet{0, src, true, 0, 0, 0}\n}", "func NewBareDecoder(l []id.Item, in io.Reader, endian bool) Decoding {\n\tp := &Bare{}\n\tp.in = in\n\tp.syncBuffer = make([]byte, 0, defaultSize)\n\tp.lut = MakeLut(l)\n\tp.endian = endian\n\treturn p.Decoding\n}", "func NewBaremetalCommand(rootSettings *environment.AirshipCTLSettings) *cobra.Command {\n\tbaremetalRootCmd := &cobra.Command{\n\t\tUse: \"baremetal\",\n\t\tShort: \"Perform actions on baremetal hosts\",\n\t\tPersistentPreRun: func(cmd *cobra.Command, args []string) {\n\t\t\tlog.Init(rootSettings.Debug, cmd.OutOrStderr())\n\n\t\t\t// Load or Initialize airship Config\n\t\t\trootSettings.InitConfig()\n\t\t},\n\t}\n\n\tejectMediaCmd := NewEjectMediaCommand(rootSettings)\n\tbaremetalRootCmd.AddCommand(ejectMediaCmd)\n\n\tpowerOffCmd := NewPowerOffCommand(rootSettings)\n\tbaremetalRootCmd.AddCommand(powerOffCmd)\n\n\tpowerOnCmd := NewPowerOnCommand(rootSettings)\n\tbaremetalRootCmd.AddCommand(powerOnCmd)\n\n\tpowerStatusCmd := NewPowerStatusCommand(rootSettings)\n\tbaremetalRootCmd.AddCommand(powerStatusCmd)\n\n\trebootCmd := NewRebootCommand(rootSettings)\n\tbaremetalRootCmd.AddCommand(rebootCmd)\n\n\tremoteDirectCmd := NewRemoteDirectCommand(rootSettings)\n\tbaremetalRootCmd.AddCommand(remoteDirectCmd)\n\n\treturn baremetalRootCmd\n}", "func (o *DeployKey) GetRepositoryOk() (*Repository, bool) {\n\tif o == nil || o.Repository == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Repository, true\n}", "func OpenRepository(path string) (*Repository, error) {\n\n\tpath, err := filepath.Abs(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"git: could not determine absolute path\")\n\t}\n\n\tif !IsBareRepository(path) {\n\t\treturn nil, fmt.Errorf(\"git: not a bare repository\")\n\t}\n\n\treturn &Repository{Path: path}, nil\n}", "func IsBranchOfRepoRequirePullRequest(repoID int64, name string) bool {\n\tprotectBranch, err := GetProtectBranchOfRepoByName(repoID, name)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn protectBranch.Protected && protectBranch.RequirePullRequest\n}", "func NewBareMetalNoAuth(eo EndpointOpts) (*gophercloud.ServiceClient, error) {\n\tsc, err := initClientOpts(&gophercloud.ProviderClient{}, eo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsc.Type = \"baremetal\"\n\n\treturn sc, nil\n}", "func (o *CreateRepository32Forbidden) IsSuccess() bool {\n\treturn false\n}", "func (o *CreateRepository28Forbidden) IsSuccess() bool {\n\treturn false\n}", "func (i Inventory) BaremetalInventory() (ifc.BaremetalInventory, error) {\n\tcfg, err := i.Factory()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmgmCfg, err := cfg.CurrentContextManagementConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttargetPath, err := cfg.CurrentContextTargetPath()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tphaseDir, err := cfg.CurrentContextInventoryRepositoryName()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmetadataPath, err := cfg.CurrentContextMetadataPath()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmetadataBundle := filepath.Join(targetPath, phaseDir, metadataPath)\n\n\tmeta, err := metadata.Config(metadataBundle)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinventoryBundle := filepath.Join(targetPath, phaseDir, meta.InventoryPath)\n\tbundle, err := document.NewBundleByPath(inventoryBundle)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn baremetal.NewInventory(mgmCfg, bundle), nil\n}", "func (ctx *Context) IsHead() bool {\r\n\treturn ctx.Is(\"HEAD\")\r\n}", "func (r *RepoRef) IsRemote() bool {\n\treturn r.URL != \"\"\n}", "func (r *Repository) IsEmpty() (bool, error) {\n\titer, err := r.Refs()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tvar count int\n\treturn count == 0, iter.ForEach(func(r *plumbing.Reference) error {\n\t\tcount++\n\t\treturn nil\n\t})\n}", "func handleRepo(ctx context.Context, client *github.Client, repo *github.Repository) error {\n\topt := &github.ListOptions{\n\t\tPerPage: 100,\n\t}\n\n\tbranches, resp, err := client.Repositories.ListBranches(ctx, *repo.Owner.Login, *repo.Name, opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, branch := range branches {\n\t\tif branch.GetName() == \"master\" && in(orgs, *repo.Owner.Login) {\n\t\t\t// we must get the individual branch for the branch protection to work\n\t\t\tb, _, err := client.Repositories.GetBranch(ctx, *repo.Owner.Login, *repo.Name, branch.GetName())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// return early if it is already protected\n\t\t\tif b.GetProtected() {\n\t\t\t\tfmt.Printf(\"[OK] %s:%s is already protected\\n\", *repo.FullName, b.GetName())\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tfmt.Printf(\"[UPDATE] %s:%s will be changed to protected\\n\", *repo.FullName, b.GetName())\n\t\t\tif dryrun {\n\t\t\t\t// return early\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// set the branch to be protected\n\t\t\tif _, _, err := client.Repositories.UpdateBranchProtection(ctx, *repo.Owner.Login, *repo.Name, b.GetName(), &github.ProtectionRequest{\n\t\t\t\tRequiredStatusChecks: &github.RequiredStatusChecks{\n\t\t\t\t\tStrict: false,\n\t\t\t\t\tContexts: []string{},\n\t\t\t\t},\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *CreateRepository8Forbidden) IsSuccess() bool {\n\treturn false\n}", "func IsGit(path string) bool {\n\tif strings.HasSuffix(path, \".git\") || strings.HasPrefix(path, \"git@\") {\n\t\treturn true\n\t}\n\turl, err := url.Parse(path)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif url.Scheme == \"\" {\n\t\turl.Scheme = \"https\"\n\t}\n\tresp, err := http.Head(url.String() + \".git\")\n\tif err != nil {\n\t\treturn false\n\t}\n\tfor _, status := range []int{200, 301, 302, 401} {\n\t\tif resp.StatusCode == status {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (h BareTlfHandle) IsReader(user keybase1.UID) bool {\n\treturn h.IsPublic() || h.findUserInList(user, h.Readers) || h.IsWriter(user)\n}", "func IsBrokerRequired(mode *string) bool {\n\treturn *mode == Broker || *mode == Hybrid\n}", "func (s *GitRepoSyncer) IsCloneable(ctx context.Context, remoteURL *vcs.URL) error {\n\tif strings.ToLower(string(protocol.NormalizeRepo(api.RepoName(remoteURL.String())))) == \"github.com/sourcegraphtest/alwayscloningtest\" {\n\t\treturn nil\n\t}\n\tif testGitRepoExists != nil {\n\t\treturn testGitRepoExists(ctx, remoteURL)\n\t}\n\n\targs := []string{\"ls-remote\", remoteURL.String(), \"HEAD\"}\n\tctx, cancel := context.WithTimeout(ctx, shortGitCommandTimeout(args))\n\tdefer cancel()\n\n\tcmd := exec.CommandContext(ctx, \"git\", args...)\n\tout, err := runWithRemoteOpts(ctx, cmd, nil)\n\tif err != nil {\n\t\tif ctxerr := ctx.Err(); ctxerr != nil {\n\t\t\terr = ctxerr\n\t\t}\n\t\tif len(out) > 0 {\n\t\t\terr = errors.Errorf(\"%s (output follows)\\n\\n%s\", err, out)\n\t\t}\n\t\treturn err\n\t}\n\treturn nil\n}", "func TestBareReadCommand(t *testing.T) {\n\n\t// Run the command\n\toutput := executeCommand(\"read\")\n\n\t// We should have a bucket required error but no usage displayed\n\trequire.NotNil(t, executeError, \"there should have been an error\")\n\trequire.Equal(t, \"An S3 bucket name must be provided\", executeError.Error(), \"Expected S3 bucket name required error\")\n\trequire.Empty(t, output, \"Expected no usage display\")\n}", "func BareNamespace() *Namespace {\n\treturn &Namespace{\n\t\tDomains: make(map[string]string),\n\t\tContributors: map[string]*BaseContributor{},\n\t}\n}", "func TestIdenitfyRepositoryWithLeadingSlash(t *testing.T) {\n\taccount, repository, err := IdentifyRepository(\n\t\t[]string{\n\t\t\t\"deploy-keychain\",\n\t\t\t\"[email protected]\",\n\t\t\t\"git-upload-pack '/nint8835/deploy-keychain.git'\",\n\t\t},\n\t)\n\n\tif err != nil {\n\t\tt.Errorf(\"identifying repo returned unexpected error: %s\", err)\n\t}\n\n\tif account != \"nint8835\" {\n\t\tt.Errorf(\"identify repo returned unexpected account: %s\", account)\n\t}\n\n\tif repository != \"deploy-keychain\" {\n\t\tt.Errorf(\"identify repo returned unexpected repository: %s\", repository)\n\t}\n}", "func (o *CreateRepository38Forbidden) IsSuccess() bool {\n\treturn false\n}", "func createRepository(t *testing.T) *git.Repository {\n\t// Create the repo\n\tr, err := git.Init(memory.NewStorage(), memfs.New())\n\tif err != nil {\n\t\tt.Errorf(\"Unable to create repository for testing: %s\", err.Error())\n\t}\n\n\treturn r\n}", "func (repo Repository) Type() string {\n\treturn \"bolt\"\n}", "func (input *BeegoInput) IsHead() bool {\n\treturn input.Is(\"HEAD\")\n}", "func (b *Box) Available() bool {\n\treturn b.Status == StatusDeploying ||\n\t\tb.Status == StatusCreating ||\n\t\tb.Status == StatusError\n}", "func describeRepository(flags *pflag.FlagSet, image string) error {\n\torg, _, err := dockerhub.GetFlags(flags)\n\tif err != nil {\n\t\tcolor.Red(\"Error: %s\", err)\n\t}\n\n\trepoInfo, err := dockerhub.NewClient(org, \"\").DescribeRepository(image)\n\tif err != nil {\n\t\tcolor.Red(\"Error: %s\", err)\n\t}\n\n\tcolor.Blue(\"User: \" + repoInfo.User +\n\t\t\"\\nName: \" + repoInfo.Name +\n\t\t\"\\nNamespace: \" + repoInfo.Namespace +\n\t\t\"\\nRepositoryType: \" + repoInfo.RepositoryType +\n\t\t\"\\nStatus: \" + fmt.Sprintf(\"%d\", repoInfo.Status) +\n\t\t\"\\nDescription: \" + repoInfo.Description +\n\t\t\"\\nIsPrivate: \" + fmt.Sprintf(\"%t\", repoInfo.IsPrivate) +\n\t\t\"\\nIsAutomated: \" + fmt.Sprintf(\"%t\", repoInfo.IsAutomated) +\n\t\t\"\\nCanEdit: \" + fmt.Sprintf(\"%t\", repoInfo.CanEdit) +\n\t\t\"\\nStarCount: \" + fmt.Sprintf(\"%d\", repoInfo.StarCount) +\n\t\t\"\\nPullCount: \" + fmt.Sprintf(\"%d\", repoInfo.PullCount) +\n\t\t\"\\nLastUpdated: \" + fmt.Sprint(repoInfo.LastUpdated) +\n\t\t\"\\nIsMigrated: \" + fmt.Sprintf(\"%t\", repoInfo.IsMigrated) +\n\t\t\"\\nCollaboratorCount: \" + fmt.Sprintf(\"%d\", repoInfo.CollaboratorCount) +\n\t\t\"\\nAffiliation: \" + repoInfo.Affiliation +\n\t\t\"\\nHubUser: \" + repoInfo.HubUser)\n\n\treturn nil\n}", "func (s *PerforceDepotSyncer) IsCloneable(ctx context.Context, remoteURL *vcs.URL) error {\n\tusername, password, host, _, err := decomposePerforceRemoteURL(remoteURL)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"decompose\")\n\t}\n\n\t// FIXME: Need to find a way to determine if depot exists instead of a general ping to the Perforce server.\n\treturn p4pingWithTrust(ctx, host, username, password)\n}", "func (c *Contract) ReadyRepo(ctx TransactionContextInterface, jeweler string, paperNumber string, repurchaser string, readyDateTime string) (*InventoryFinancingPaper, error) {\r\n\tpaper, err := ctx.GetPaperList().GetPaper(jeweler, paperNumber)\r\n\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\tif paper.GetRepurchaser() == \"\" {\r\n\t\tpaper.SetRepurchaser(repurchaser)\r\n\t}\r\n\r\n\tif paper.IsEvaluated() {\r\n\t\tpaper.SetReadyREPO()\r\n\t}\r\n\r\n\tif !paper.IsReadyREPO() {\r\n\t\treturn nil, fmt.Errorf(\"inventory paper %q:%q is waiting for REPO's ready. Current state = %q\", jeweler, paperNumber, paper.GetState())\r\n\t}\r\n\r\n\terr = ctx.GetPaperList().UpdatePaper(paper)\r\n\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\tfmt.Printf(\"The repurchaser %q is ready to REPO the inventory financing paper %q:%q, the ready date is %q.\\nCurrent state = %q\", paper.GetRepurchaser(), jeweler, paperNumber, readyDateTime, paper.GetState())\r\n\treturn paper, nil\r\n}", "func IsRepoURL(value string) bool {\n\treturn Regex.Match([]byte(value))\n}", "func IsStaging() bool {\n\treturn strings.ToLower(env) == staging\n}", "func repackIfNeeded(ctx context.Context, repo *localrepo.Repo) (bool, RepackObjectsConfig, error) {\n\trepackNeeded, cfg, err := needsRepacking(repo)\n\tif err != nil {\n\t\treturn false, RepackObjectsConfig{}, fmt.Errorf(\"determining whether repo needs repack: %w\", err)\n\t}\n\n\tif !repackNeeded {\n\t\treturn false, RepackObjectsConfig{}, nil\n\t}\n\n\tif err := RepackObjects(ctx, repo, cfg); err != nil {\n\t\treturn false, RepackObjectsConfig{}, err\n\t}\n\n\treturn true, cfg, nil\n}", "func GitRepositoryReadyMessage(repository GitRepository) string {\n\tif c := apimeta.FindStatusCondition(repository.Status.Conditions, meta.ReadyCondition); c != nil {\n\t\tif c.Status == metav1.ConditionTrue {\n\t\t\treturn c.Message\n\t\t}\n\t}\n\treturn \"\"\n}", "func (r *Repository) Mode() borges.Mode {\n\treturn r.mode\n}", "func TestBlameFileInRepo(t *testing.T) {\n\tdefer tests.PrepareTestEnv(t)()\n\n\tsession := loginUser(t, \"user2\")\n\n\treq := NewRequest(t, \"GET\", \"/user2/repo1/blame/branch/master/README.md\")\n\tresp := session.MakeRequest(t, req, http.StatusOK)\n\n\thtmlDoc := NewHTMLParser(t, resp.Body)\n\tdescription := htmlDoc.doc.Find(\"#repo-desc\")\n\trepoTopics := htmlDoc.doc.Find(\"#repo-topics\")\n\trepoSummary := htmlDoc.doc.Find(\".repository-summary\")\n\n\tassert.EqualValues(t, 0, description.Length())\n\tassert.EqualValues(t, 0, repoTopics.Length())\n\tassert.EqualValues(t, 0, repoSummary.Length())\n}", "func EnsureRepository(repo skyhook.PytorchRepository) error {\n\thash := repo.Hash()\n\n\t// does it already exist?\n\tpath := filepath.Join(\"data/models\", hash)\n\tif _, err := os.Stat(path); err == nil {\n\t\treturn nil\n\t}\n\n\t// clone the repository\n\tlog.Printf(\"[pytorch] cloning repository %s@%s\", repo.URL, repo.Commit)\n\tcmd := exec.Command(\n\t\t\"git\", \"clone\", repo.URL, path,\n\t)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn err\n\t}\n\n\tif repo.Commit != \"\" {\n\t\tcmd = exec.Command(\n\t\t\t\"git\", \"checkout\", repo.Commit,\n\t\t)\n\t\tcmd.Dir = path\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\t\tif err := cmd.Run(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func isTombstone(b []byte) bool {\n\treturn len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone\n}", "func isTombstone(b []byte) bool {\n\treturn len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone\n}", "func (g *GitStatusWidget) IsReady() bool {\n\treturn g.isReady\n}", "func (c *Config) IsGitLab() bool {\n\treturn c.GitLab.ClientID != \"\"\n}", "func (r RepositoryImpl) IsAlive() bool {\n\treturn r.conn.IsAlive()\n}", "func (r RepositoryImpl) IsAlive() bool {\n\treturn r.conn.IsAlive()\n}", "func GitRepositoryProgressing(repository GitRepository) GitRepository {\n\trepository.Status.ObservedGeneration = repository.Generation\n\trepository.Status.URL = \"\"\n\trepository.Status.Conditions = []metav1.Condition{}\n\tmeta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, \"reconciliation in progress\")\n\treturn repository\n}", "func IsLatest(sha string) bool {\n\treturn sha == \"\" || sha == \"latest\"\n}", "func (o *CreateRepository37Forbidden) IsSuccess() bool {\n\treturn false\n}", "func pruneIfNeeded(ctx context.Context, repo *localrepo.Repo) (bool, error) {\n\t// Pool repositories must never prune any objects, or otherwise we may corrupt members of\n\t// that pool if they still refer to that object.\n\tif IsPoolPath(repo.GetRelativePath()) {\n\t\treturn false, nil\n\t}\n\n\t// Only count objects older than two weeks. Objects which are more recent than that wouldn't\n\t// get pruned anyway and thus cause us to prune all the time during the grace period.\n\tlooseObjectCount, err := estimateLooseObjectCount(repo, time.Now().AddDate(0, 0, -14))\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"estimating loose object count: %w\", err)\n\t}\n\n\t// We again use the same limit here as we do when doing an incremental repack. This is done\n\t// intentionally: if we determine that there's too many loose objects and try to repack, but\n\t// all of those loose objects are in fact unreachable, then we'd still have the same number\n\t// of unreachable objects after the incremental repack. We'd thus try to repack every single\n\t// time.\n\t//\n\t// Using the same limit here doesn't quite fix this case: the unreachable objects would only\n\t// be pruned after a grace period of two weeks. Because of that we only count objects which\n\t// are older than this grace period such that we don't prune if there aren't any old and\n\t// unreachable objects.\n\tif looseObjectCount <= looseObjectLimit {\n\t\treturn false, nil\n\t}\n\n\tif err := repo.ExecAndWait(ctx, git.SubCmd{\n\t\tName: \"prune\",\n\t\tFlags: []git.Option{\n\t\t\t// By default, this prunes all unreachable objects regardless of when they\n\t\t\t// have last been accessed. This opens us up for races when there are\n\t\t\t// concurrent commands which are just at the point of writing objects into\n\t\t\t// the repository, but which haven't yet updated any references to make them\n\t\t\t// reachable. We thus use the same two-week grace period as git-gc(1) does.\n\t\t\tgit.ValueFlag{Name: \"--expire\", Value: \"two.weeks.ago\"},\n\t\t},\n\t}); err != nil {\n\t\treturn false, fmt.Errorf(\"pruning objects: %w\", err)\n\t}\n\n\treturn true, nil\n}", "func (in *BaremetalStatus) DeepCopy() *BaremetalStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(BaremetalStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}" ]
[ "0.80748403", "0.65109813", "0.6356934", "0.6285676", "0.6277902", "0.6159446", "0.58196324", "0.5621973", "0.5499171", "0.5424167", "0.5416235", "0.5323672", "0.51931185", "0.5172835", "0.5115312", "0.50820893", "0.5052068", "0.50374174", "0.49954063", "0.49940842", "0.49926752", "0.49635446", "0.4937245", "0.49292874", "0.4925992", "0.4921105", "0.49124902", "0.48891768", "0.48666596", "0.48567575", "0.48567575", "0.48491716", "0.48410812", "0.48269337", "0.4819063", "0.4812045", "0.48073274", "0.47734776", "0.47677588", "0.47478357", "0.4737495", "0.470533", "0.47001398", "0.46726277", "0.46726185", "0.46626666", "0.46576098", "0.45977724", "0.45942998", "0.45827124", "0.45765415", "0.45743668", "0.45719233", "0.45478636", "0.4545319", "0.45131502", "0.45122504", "0.45063767", "0.45030683", "0.45012262", "0.44995925", "0.44691196", "0.44624504", "0.445565", "0.44556135", "0.44489443", "0.44445226", "0.4440637", "0.4430025", "0.44197294", "0.44131047", "0.438274", "0.43815428", "0.43810308", "0.43693995", "0.43693", "0.43659407", "0.43587616", "0.43568295", "0.4353019", "0.43470633", "0.4342077", "0.43343556", "0.43232003", "0.43205655", "0.43139094", "0.43077576", "0.43054625", "0.43048844", "0.43020198", "0.43020198", "0.4299081", "0.42954198", "0.428541", "0.428541", "0.42829293", "0.42721137", "0.4267375", "0.42507547", "0.42497313" ]
0.8010365
1
Config returns git config object
func (v Repository) Config() GitConfig { return v.gitConfig }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (v Repository) Config() goconfig.GitConfig {\n\tcfg, err := goconfig.Load(v.configFile())\n\tif err != nil && err != goconfig.ErrNotExist {\n\t\tlog.Fatalf(\"fail to load config: %s: %s\", v.configFile(), err)\n\t}\n\tif cfg == nil {\n\t\tcfg = goconfig.NewGitConfig()\n\t}\n\treturn cfg\n}", "func (opt *Options) Config() (*config.Config, error) {\n\tw, err := os.Getwd()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgitCmd, err := git.New(w)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &config.Config{\n\t\tGitCommand: gitCmd,\n\n\t\tRemote: opt.Remote,\n\t\tRef: opt.Ref,\n\n\t\tAddr: opt.Addr,\n\t\tGracefulShutdownTimeout: opt.GracefulShutdownTimeout,\n\n\t\tToken: opt.Token,\n\t}\n\n\treturn c, nil\n}", "func (r Repository) Config(opt *ConfigOptions) (map[string]string, error) {\n\tif opt == nil {\n\t\topt = &ConfigOptions{}\n\t}\n\ttext, err := r.run(nil, opt.Timeout, \"config\", \"-l\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlines := strings.Split(text, \"\\n\")\n\tout := make(map[string]string, len(lines))\n\tfor _, line := range lines {\n\t\tidx := strings.Index(line, \"=\")\n\t\tif idx > 0 {\n\t\t\tkey, value := line[:idx], line[idx+1:]\n\t\t\tout[key] = value\n\t\t}\n\t}\n\treturn out, nil\n}", "func GetConfig(connection *plugin.Connection) githubConfig {\n\tif connection == nil || connection.Config == nil {\n\t\treturn githubConfig{}\n\t}\n\tconfig, _ := connection.Config.(githubConfig)\n\treturn config\n}", "func ReadConfig(ctx context.Context, git *Tool) (*Config, error) {\n\tp, err := git.Start(ctx, \"config\", \"-z\", \"--list\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"read git config: %v\", err)\n\t}\n\tcfg, parseErr := parseConfig(p)\n\twaitErr := p.Wait()\n\tif waitErr != nil {\n\t\treturn nil, fmt.Errorf(\"read git config: %v\", waitErr)\n\t}\n\tif parseErr != nil {\n\t\treturn nil, fmt.Errorf(\"read git config: %v\", parseErr)\n\t}\n\treturn cfg, nil\n}", "func (b *Bridge) Config() (Config, error) {\n\tconfig := Config{}\n\tif !b.isAvailable() {\n\t\treturn config, ErrBridgeNotAvailable\n\t}\n\n\turl := b.baseURL.String() + \"api/\" + b.Username + \"/config\"\n\n\tres, err := http.Get(url)\n\tif err != nil {\n\t\treturn config, err\n\t}\n\n\terr = json.NewDecoder(res.Body).Decode(&config)\n\treturn config, err\n}", "func NewConfig() Config {\n\tc := Config{}\n\tc.GitTime = os.Getenv(\"GIT_TIME\")\n\tc.GitBranch = os.Getenv(\"GIT_BRANCH\")\n\tc.GitHash = os.Getenv(\"GIT_HASH\")\n\tc.BuildTime = convertTime(c.GitTime)\n\n\treturn c\n}", "func GetConfig() *Config {\n\tgithubRef := os.Getenv(\"GH_REF\")\n\tgithubRepo := os.Getenv(\"GH_REPO\")\n\ttoken := os.Getenv(\"GH_TOKEN\")\n\tdockerRepo := os.Getenv(\"DOCKER_REPO\")\n\tdockerTag := os.Getenv(\"DOCKER_TAG\")\n\n\tworkflowID := os.Getenv(\"GH_WORKFLOW\")\n\n\tcfg := &Config{\n\t\tGithubRef: githubRef,\n\t\tGithubRepo: githubRepo,\n\t\tGithubToken: token,\n\t\tGithubWorkflowID: workflowID,\n\t\tDockerRepo: dockerRepo,\n\t\tDockerTag: dockerTag,\n\t}\n\n\treturn cfg\n}", "func getConfig() Conf {\n\tfile, _ := os.Open(\"config/config.json\")\n\tdefer file.Close()\n\tdecoder := json.NewDecoder(file)\n\tConfig := defaultConfig()\n\terr := decoder.Decode(&Config)\n\tif err != nil {\n\t\tfmt.Println(\"error:\", err)\n\t}\n\treturn Config\n}", "func GetConfig() models.Config {\n\t// Use by default ./config.json\n\tconfig, err := loadConfiguration(\"./config.json\")\n\tif err != nil {\n\t\t// If it doesn't exist, take environnement variables\n\t\tconfig = models.Config{\n\t\t\tURI: os.Getenv(\"MONGODB_URI\"),\n\t\t\tDatabase: os.Getenv(\"DATABASE\"),\n\t\t\tSecret: os.Getenv(\"SECRET\"),\n\t\t\tGmailAddress: os.Getenv(\"GMAIL_ADDRESS\"),\n\t\t\tGmailPassword: os.Getenv(\"GMAIL_PASSWORD\"),\n\t\t\tStripeKey: os.Getenv(\"STRIPE_KEY\"),\n\t\t\tBucketName: os.Getenv(\"BUCKET_NAME\"),\n\t\t\tBucketPubURL: os.Getenv(\"BUCKET_PUB_URL\"),\n\t\t}\n\t\treturn config\n\t}\n\treturn config\n}", "func getConfig() (*config, error) {\n\tret := &config{}\n\tif err := c.Load(ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n}", "func (b *backend) Config(ctx context.Context, s logical.Storage) (*config, error) {\n\tentry, err := s.Get(ctx, \"config\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif entry == nil {\n\t\treturn nil, nil\n\t}\n\n\tvar result config\n\tif err := entry.DecodeJSON(&result); err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading configuration: %s\", err)\n\t}\n\n\tif len(result.TokenPolicies) == 0 && len(result.Policies) > 0 {\n\t\tresult.TokenPolicies = result.Policies\n\t}\n\n\treturn &result, nil\n}", "func GetConfig() (config *Config, err error) {\n\tconfigpath := path.Join(os.Getenv(\"GOPATH\"), \"src\", \"hellofresh\", \"config.json\")\n\tconfigFile, err := os.Open(configpath)\n\tdefer configFile.Close()\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tjsonParser := json.NewDecoder(configFile)\n\tif err = jsonParser.Decode(&config); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (app *Application) GetConfig() *Config {\n return app.config\n}", "func config() (*Config, error) {\n\tif cfg != nil {\n\t\treturn cfg, nil\n\t}\n\treturn nil, ErrConfigNotLoaded\n}", "func (c *Command) GetConfig() *commonEthereum.Config {\n\treturn c.config\n}", "func ParseConfig() (models.SynchroGitSettings, error) {\n\n\tconfig := os.Getenv(\"CONFIG\");\n\tsynchroGitConfigFile, err := os.Open(config)\n\tsettings := new(models.SynchroGitSettings)\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error while opening synchroGitSync.json : %s\\n\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\terr = json.NewDecoder(synchroGitConfigFile).Decode(&settings)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error while parsing synchroGitSync.json : %s\\n\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Println(\"Successfully imported synchroGitSettings from synchroGitSync.json\")\n\tfmt.Printf(\"Detected %d repos to sync\\n\", len(settings.Syncs))\n\tsynchroGitConfigFile.Close()\n\treturn *settings, err\n}", "func Create(c *gin.Context) apierror.APIErrors {\n\tctx := c.Request.Context()\n\tlogger := requestctx.Logger(ctx)\n\n\tcluster, err := kubernetes.GetCluster(ctx)\n\tif err != nil {\n\t\treturn apierror.InternalError(err)\n\t}\n\n\tvar request models.GitconfigCreateRequest\n\terr = c.BindJSON(&request)\n\tif err != nil {\n\t\treturn apierror.NewBadRequestError(err.Error())\n\t}\n\n\tgitconfigName := request.ID\n\tif gitconfigName == \"\" {\n\t\treturn apierror.NewBadRequestError(\"name of gitconfig to create not found\")\n\t}\n\terrorMsgs := validation.IsDNS1123Subdomain(gitconfigName)\n\tif len(errorMsgs) > 0 {\n\t\treturn apierror.NewBadRequestErrorf(\"Git configurations' name must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc').\")\n\t}\n\n\tmanager, err := gitbridge.NewManager(logger, cluster.Kubectl.CoreV1().Secrets(helmchart.Namespace()))\n\tif err != nil {\n\t\treturn apierror.InternalError(err, \"creating git configuration manager\")\n\t}\n\n\tgitconfigList := manager.Configurations\n\n\t// see delete.go\n\tgcNames := map[string]struct{}{}\n\tfor _, gitconfig := range gitconfigList {\n\t\tgcNames[gitconfig.ID] = struct{}{}\n\t}\n\n\t// already known ?\n\tif _, ok := gcNames[gitconfigName]; ok {\n\t\treturn apierror.NewConflictError(\"gitconfig\", gitconfigName)\n\t}\n\n\tsecret := gitbridge.NewSecretFromConfiguration(gitbridge.Configuration{\n\t\tID: request.ID,\n\t\tURL: request.URL,\n\t\tProvider: request.Provider,\n\t\tUsername: request.Username,\n\t\tPassword: request.Password,\n\t\tUserOrg: request.UserOrg,\n\t\tRepository: request.Repository,\n\t\tSkipSSL: request.SkipSSL,\n\t\tCertificate: request.Certificates,\n\t})\n\n\terr = cluster.CreateSecret(ctx, helmchart.Namespace(), secret)\n\tif err != nil {\n\t\treturn apierror.InternalError(err)\n\t}\n\n\terr = addGitconfigToUser(ctx, request.ID)\n\tif err != nil {\n\t\treturn apierror.InternalError(err)\n\t}\n\n\tresponse.Created(c)\n\treturn nil\n}", "func RepoConfig() (models.MultiRepoConfig, Error) {\n\n\trepoConfigFile := filepath.Join(\"./\", repoConfigFileName)\n\tymlRepoConfigFile := filepath.Join(\"./\", ymlRepoConfigFileName)\n\n\tif utils.Exists(repoConfigFile) {\n\t\tutils.LogDebug(fmt.Sprintf(\"Reading repo config file %s\", repoConfigFile))\n\n\t\tyamlFile, err := ioutil.ReadFile(repoConfigFile) // #nosec G304\n\n\t\tif err != nil {\n\t\t\tvar e Error\n\t\t\te.Err = err\n\t\t\te.Message = \"Unable to read doppler repo config file\"\n\t\t\treturn models.MultiRepoConfig{}, e\n\t\t}\n\n\t\tvar repoConfig models.MultiRepoConfig\n\n\t\tif err := yaml.Unmarshal(yamlFile, &repoConfig); err != nil {\n\t\t\t// Try parsing old repoConfig format (i.e., no slice) for backwards compatibility\n\t\t\tvar oldRepoConfig models.RepoConfig\n\t\t\tif err := yaml.Unmarshal(yamlFile, &oldRepoConfig); err != nil {\n\t\t\t\tvar e Error\n\t\t\t\te.Err = err\n\t\t\t\te.Message = \"Unable to parse doppler repo config file\"\n\t\t\t\treturn models.MultiRepoConfig{}, e\n\t\t\t} else {\n\t\t\t\trepoConfig.Setup = append(repoConfig.Setup, oldRepoConfig.Setup)\n\t\t\t\treturn repoConfig, Error{}\n\t\t\t}\n\t\t}\n\n\t\treturn repoConfig, Error{}\n\t} else if utils.Exists(ymlRepoConfigFile) {\n\t\tutils.LogWarning(fmt.Sprintf(\"Found %s file, please rename to %s for repo configuration\", ymlRepoConfigFile, repoConfigFileName))\n\t} else {\n\t\t// If no config file exists, then this is for an interactive setup, so\n\t\t// return a MultiRepoConfig object containing an empty ProjectConfig object\n\t\tvar repoConfig models.MultiRepoConfig\n\t\trepoConfig.Setup = append(repoConfig.Setup, models.ProjectConfig{Path: configuration.Scope})\n\t\treturn repoConfig, Error{}\n\t}\n\treturn models.MultiRepoConfig{}, Error{}\n}", "func (p *Platform) Config() interface{} {\n\treturn p.config\n}", "func (conf *Conf) OverwriteFromGit(repo *Repository) (err error) {\n\tbuf := bytes.NewBuffer(nil)\n\terr = repo.Git(context.Background(), nil, buf, \"config\", \"--get-regexp\", \"^bits\")\n\tif err != nil {\n\t\treturn nil //no bits conf, nothing to do\n\t}\n\n\ts := bufio.NewScanner(buf)\n\tfor s.Scan() {\n\t\tfields := strings.Fields(s.Text())\n\t\tif len(fields) < 2 {\n\t\t\treturn fmt.Errorf(\"unexpected configuration returned from git: %v\", s.Text())\n\t\t}\n\n\t\tswitch fields[0] {\n\t\tcase \"bits.deduplication-scope\":\n\t\t\tscope, err := strconv.ParseUint(fields[1], 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unexpected format for configured dedup scope '%v', expected a base10 number\", fields[1])\n\t\t\t}\n\n\t\t\tconf.DeduplicationScope = scope\n\t\tcase \"bits.aws-s3-bucket-name\":\n\t\t\tconf.AWSS3BucketName = fields[1]\n\t\tcase \"bits.aws-access-key-id\":\n\t\t\tconf.AWSAccessKeyID = fields[1]\n\t\tcase \"bits.aws-secret-access-key\":\n\t\t\tconf.AWSSecretAccessKey = fields[1]\n\t\t}\n\t}\n\n\treturn nil\n}", "func (repo ConfigRepository) Get() (*domain.Config, error) {\n\trepo.ensureConfigFile()\n\tconfigFile, err := os.Open(config.ConfigFilePath)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer configFile.Close()\n\tconfigData, err := ioutil.ReadAll(configFile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tvar config domain.Config\n\terr = json.Unmarshal(configData, &config)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn &config, nil\n}", "func initConfig() {\n\ttoken := rootCmd.Flag(\"token\").Value.String()\n\tgiturl := rootCmd.Flag(\"giturl\").Value.String()\n\tGitClient = gitlab.NewClient(nil, token)\n\tGitClient.SetBaseURL(giturl + \"/api/v4/\")\n}", "func GetConfig() *Config {\n\treturn &Config{\n\t\tDB: &DBConfig{\n\t\t\tUsername: \"postgres\",\n\t\t\tPassword: \"cristiano1994\",\n\t\t\tDatabase: \"spataro_visitas\",\n\t\t\tPort: 5432,\n\t\t\tHost: \"localhost\",\n\t\t},\n\t}\n}", "func GetConfig() interface{} {\n\treturn std.GetConfig()\n}", "func GetConfig() Config {\n\treturn cfg\n}", "func GetConfig() *Config {\n\treturn config\n}", "func Get() *Config {\n\treturn config\n}", "func initConfig() (*Config, error) {\n\tghEmail := os.Getenv(\"GH_EMAIL\")\n\tif ghEmail == \"\" {\n\t\tlog.Fatalf(\"GitHub user email is empty\")\n\t}\n\n\tghToken := os.Getenv(\"GH_TOKEN\")\n\tif ghToken == \"\" {\n\t\tlog.Fatalf(\"GitHub auth token is empty\")\n\t}\n\n\tif ghEmail != \"\" || ghToken != \"\" {\n\t\treturn &Config{\n\t\t\tGHAuthEmail: ghEmail,\n\t\t\tGHAuthToken: ghToken,\n\t\t}, nil\n\t}\n\treturn nil, fmt.Errorf(\"[ERR] No config loaded\")\n}", "func (o Ocs) GetConfig(w http.ResponseWriter, r *http.Request) {\n\tmustNotFail(render.Render(w, r, response.DataRender(&data.ConfigData{\n\t\tVersion: \"1.7\", // TODO get from env\n\t\tWebsite: \"ocis\", // TODO get from env\n\t\tHost: \"\", // TODO get from FRONTEND config\n\t\tContact: \"\", // TODO get from env\n\t\tSSL: \"true\", // TODO get from env\n\t})))\n}", "func (b *bot) Config() *config.Config {\n\treturn b.config\n}", "func (c *Config) Config() interface{} {\n\treturn c.config.ConfigTree\n}", "func getConfig() (*config, error) {\n\tret := new(config)\n\tif err := conf.Load(ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n}", "func (c *Conn) Config() *ConnConfig { return c.config.Copy() }", "func (backend *Backend) Config() *config.Config {\n\treturn backend.config\n}", "func (c *GinHttp) getConfig() *GinHttp {\n\tif _, err := toml.DecodeFile(ConfPath+c.getTomlFile(), &c); err != nil {\n\t\tfmt.Println(err)\n\t\treturn c\n\t}\n\treturn c\n}", "func GetConfig() (config *Config) {\n\tpwd, _ := os.Getwd()\n\tpath := path.Join(pwd, \"config.json\")\n\tconfigFile, err := os.Open(path)\n\tdefer configFile.Close()\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tjsonParser := json.NewDecoder(configFile)\n\tif err = jsonParser.Decode(&config); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn\n}", "func GetConfig() (config *Config) {\n\tpwd, _ := os.Getwd()\n\tpath := path.Join(pwd, \"config.json\")\n\tconfigFile, err := os.Open(path)\n\tdefer configFile.Close()\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tjsonParser := json.NewDecoder(configFile)\n\tif err = jsonParser.Decode(&config); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn\n}", "func config(c context.Context) *Config {\n\tcfg, _ := c.Value(configContextKey(0)).(*Config)\n\tif cfg == nil {\n\t\tpanic(\"impossible, configContextKey is not set\")\n\t}\n\treturn cfg\n}", "func GetConfig() Config {\n\tport, ok := os.LookupEnv(\"PORT\")\n\tif !ok {\n\t\tport = \"8080\"\n\t}\n\n\tenv, ok := os.LookupEnv(\"ENV\")\n\tif !ok {\n\t\tenv = \"development\"\n\t}\n\n\tpgHost, ok := os.LookupEnv(\"PG_HOST\")\n\tif !ok {\n\t\tpgHost = \"localhost\"\n\t}\n\n\tpgPort, ok := os.LookupEnv(\"PG_PORT\")\n\tif !ok {\n\t\tpgPort = \"5432\"\n\t}\n\n\tpgUser, ok := os.LookupEnv(\"PG_USER\")\n\tif !ok {\n\t\tpgUser = \"postgres\"\n\t}\n\n\tpgPassword, ok := os.LookupEnv(\"PG_PASSWORD\")\n\tif !ok {\n\t\tpgPassword = \"\"\n\t}\n\n\tpgDBName, ok := os.LookupEnv(\"PG_DB_NAME\")\n\tif !ok {\n\t\tpgDBName = \"ginexamples\"\n\t}\n\n\tlogFile, ok := os.LookupEnv(\"LOGFILE\")\n\tif !ok {\n\t\tlogFile = \"\"\n\t}\n\n\treturn Config{\n\t\tPort: port,\n\t\tEnv: env,\n\t\tPGHost: pgHost,\n\t\tPGPort: pgPort,\n\t\tPGUser: pgUser,\n\t\tPGPassword: pgPassword,\n\t\tPGDBName: pgDBName,\n\t\tLogFile: logFile,\n\t}\n}", "func GetConfig() Configuration{\n\tcurrentPath := files.GetCurrentDirectory()\n\tjsonFile, err := os.Open(currentPath + \"/config.json\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tfmt.Println(currentPath)\n\tdefer jsonFile.Close()\n\tbyteValue, _ := ioutil.ReadAll(jsonFile)\n\tvar configuration Configuration\n\tjson.Unmarshal(byteValue, &configuration)\n\treturn configuration\n}", "func configDiffs(base string) (map[string]struct{}, error) {\n\tbase, err := filepath.Abs(base)\n\tif !strings.HasSuffix(base, string(filepath.Separator)) {\n\t\tbase += string(filepath.Separator)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout, err := runCmd(base, \"git\", \"rev-parse\", \"--show-toplevel\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgitroot := strings.Trim(string(out), \"\\n\")\n\tlog.Infof(\"config git root %s\", gitroot)\n\n\tresults := map[string]struct{}{}\n\tout, err = runCmd(base, \"git\", \"status\", \"--porcelain\", \"-uall\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tscanner := bufio.NewScanner(bytes.NewReader(out))\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tif !statusPattern.MatchString(line) {\n\t\t\tcontinue\n\t\t}\n\t\tpath := filepath.Join(gitroot, line[3:])\n\t\tif !strings.HasPrefix(path, base) {\n\t\t\tcontinue\n\t\t}\n\t\tresults[path[len(base):]] = struct{}{}\n\t}\n\treturn results, nil\n}", "func Get() *Config {\n\treturn &config\n}", "func Get() *Config {\n\treturn &config\n}", "func (_ GitConfig) Name() string {\n\treturn GitConfigName\n}", "func GetConfig() Config {\n\tif !loaded {\n\t\tbyteData, err := ioutil.ReadFile(\"config.json\")\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tjson.Unmarshal(byteData, &config)\n\t\tloaded = true\n\t}\n\treturn config\n}", "func (o *OAuth2) Config() *wx.AppConfig {\n\treturn o.config\n}", "func Config() models.ConfigStruct {\n\tconfig := models.ConfigStruct{}\n\tsource, err := ioutil.ReadFile(\"./config/config.yml\")\n\tif err != nil {\n\t\tsource, err = ioutil.ReadFile(\"../config/config.yml\")\n\t\tif err != nil {\n\t\t\tsource, err = ioutil.ReadFile(\"../../config/config.yml\")\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t}\n\terr = yaml.Unmarshal(source, &config)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn config\n}", "func (s *Services) Config() *Configuration { return &s.config }", "func (s *configsService) Config(db *gorm.DB, app *App) (*Config, error) {\n\tr, err := releasesFind(db, ReleasesQuery{App: app})\n\tif err != nil {\n\t\tif err == gorm.RecordNotFound {\n\t\t\t// It's possible to have config without releases, this handles that.\n\t\t\tc, err := configsFind(db, ConfigsQuery{App: app})\n\t\t\tif err != nil {\n\t\t\t\tif err == gorm.RecordNotFound {\n\t\t\t\t\t// Return an empty config.\n\t\t\t\t\treturn &Config{\n\t\t\t\t\t\tAppID: app.ID,\n\t\t\t\t\t\tApp: app,\n\t\t\t\t\t\tVars: make(Vars),\n\t\t\t\t\t}, nil\n\t\t\t\t}\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\treturn c, nil\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\treturn r.Config, nil\n}", "func (serv *Server) Config() Config {\n return serv.config\n}", "func GetConfig() *Config {\n\treturn &cfg\n}", "func (margelet *Margelet) GetConfigRepository() *ChatConfigRepository {\n\treturn margelet.ChatConfigRepository\n}", "func GetCfg(appVersion string, gitRevision string) (Cfg, error) {\n\tversion := getopt.BoolLong(\"version\", 'V', \"Print version information and exit.\")\n\tlistPlugins := getopt.BoolLong(\"list-plugins\", 'l', \"Print the list of plugins.\")\n\thelp := getopt.BoolLong(\"help\", 'h', \"Print usage information and exit\")\n\trevalOnly := getopt.BoolLong(\"revalidate-only\", 'y', \"Whether to exclude files not named 'regex_revalidate.config'\")\n\tdir := getopt.StringLong(\"dir\", 'D', \"\", \"ATS config directory, used for config files without location parameters or with relative paths. May be blank. If blank and any required config file location parameter is missing or relative, will error.\")\n\tviaRelease := getopt.BoolLong(\"via-string-release\", 'r', \"Whether to use the Release value from the RPM package as a replacement for the ATS version specified in the build that is returned in the Via and Server headers from ATS.\")\n\tdnsLocalBind := getopt.BoolLong(\"dns-local-bind\", 'b', \"Whether to use the server's Service Addresses to set the ATS DNS local bind address.\")\n\tdisableParentConfigComments := getopt.BoolLong(\"disable-parent-config-comments\", 'c', \"Disable adding a comments to parent.config individual lines\")\n\tdefaultEnableH2 := getopt.BoolLong(\"default-client-enable-h2\", '2', \"Whether to enable HTTP/2 on Delivery Services by default, if they have no explicit Parameter. This is irrelevant if ATS records.config is not serving H2. If omitted, H2 is disabled.\")\n\tdefaultTLSVersionsStr := getopt.StringLong(\"default-client-tls-versions\", 'T', \"\", \"Comma-delimited list of default TLS versions for Delivery Services with no Parameter, e.g. '--default-tls-versions=1.1,1.2,1.3'. If omitted, all versions are enabled.\")\n\tnoOutgoingIP := getopt.BoolLong(\"no-outgoing-ip\", 'i', \"Whether to not set the records.config outgoing IP to the server's addresses in Traffic Ops. Default is false.\")\n\tatsVersion := getopt.StringLong(\"ats-version\", 'a', \"\", \"The ATS version, e.g. 9.1.2-42.abc123.el7.x86_64. If omitted, generation will attempt to get the ATS version from the Server Parameters, and fall back to lib/go-atscfg.DefaultATSVersion\")\n\tverbosePtr := getopt.CounterLong(\"verbose\", 'v', `Log verbosity. Logging is output to stderr. By default, errors are logged. To log warnings, pass '-v'. To log info, pass '-vv'. To omit error logging, see '-s'`)\n\tsilentPtr := getopt.BoolLong(\"silent\", 's', `Silent. Errors are not logged, and the 'verbose' flag is ignored. If a fatal error occurs, the return code will be non-zero but no text will be output to stderr`)\n\tcache := getopt.StringLong(\"cache\", 'C', \"ats\", \"Cache server type. Generate configuration files for specific cache server type, e.g. 'ats', 'varnish'.\")\n\n\tconst useStrategiesFlagName = \"use-strategies\"\n\tconst defaultUseStrategies = t3cutil.UseStrategiesFlagFalse\n\tuseStrategiesPtr := getopt.EnumLong(useStrategiesFlagName, 0, []string{string(t3cutil.UseStrategiesFlagTrue), string(t3cutil.UseStrategiesFlagCore), string(t3cutil.UseStrategiesFlagFalse), string(t3cutil.UseStrategiesFlagCore), \"\"}, \"\", \"[true | core| false] whether to generate config using strategies.yaml instead of parent.config. If true use the parent_select plugin, if 'core' use ATS core strategies, if false use parent.config.\")\n\n\tconst goDirectFlagName = \"go-direct\"\n\tgoDirectPtr := getopt.StringLong(goDirectFlagName, 'G', \"false\", \"[true|false|old] default will set go_direct to false, you can set go_direct true, or old will be based on opposite of parent_is_proxy directive.\")\n\n\tgetopt.Parse()\n\n\tif *version {\n\t\tcfg := &Cfg{Version: appVersion, GitRevision: gitRevision}\n\t\tfmt.Println(cfg.AppVersion())\n\t\tos.Exit(0)\n\t} else if *help {\n\t\tgetopt.PrintUsage(os.Stdout)\n\t\tos.Exit(0)\n\t} else if *listPlugins {\n\t\treturn Cfg{ListPlugins: true}, nil\n\t}\n\n\tlogLocationError := log.LogLocationStderr\n\tlogLocationWarn := log.LogLocationNull\n\tlogLocationInfo := log.LogLocationNull\n\tlogLocationDebug := log.LogLocationNull\n\tif *silentPtr {\n\t\tlogLocationError = log.LogLocationNull\n\t} else {\n\t\tif *verbosePtr >= 1 {\n\t\t\tlogLocationWarn = log.LogLocationStderr\n\t\t}\n\t\tif *verbosePtr >= 2 {\n\t\t\tlogLocationInfo = log.LogLocationStderr\n\t\t\tlogLocationDebug = log.LogLocationStderr // t3c only has 3 verbosity options: none (-s), error (default or --verbose=0), warning (-v), and info (-vv). Any code calling log.Debug is treated as Info.\n\t\t}\n\t}\n\n\tif *verbosePtr > 2 {\n\t\treturn Cfg{}, errors.New(\"Too many verbose options. The maximum log verbosity level is 2 (-vv or --verbose=2) for errors (0), warnings (1), and info (2)\")\n\t}\n\n\t// The flag takes the full version, for forward-compatibility in case we need it in the future,\n\t// but we only need the major version at the moment.\n\tatsMajorVersion := uint(0)\n\tif *atsVersion != \"\" {\n\t\terr := error(nil)\n\t\tatsMajorVersion, err = atscfg.GetATSMajorVersionFromATSVersion(*atsVersion)\n\t\tif err != nil {\n\t\t\treturn Cfg{}, errors.New(\"parsing ATS version '\" + *atsVersion + \"': \" + err.Error())\n\t\t}\n\t}\n\n\tdefaultTLSVersions := atscfg.DefaultDefaultTLSVersions\n\n\t*defaultTLSVersionsStr = strings.TrimSpace(*defaultTLSVersionsStr)\n\tif len(*defaultTLSVersionsStr) > 0 {\n\t\tdefaultTLSVersionsStrs := strings.Split(*defaultTLSVersionsStr, \",\")\n\n\t\tdefaultTLSVersions = []atscfg.TLSVersion{}\n\t\tfor _, tlsVersionStr := range defaultTLSVersionsStrs {\n\t\t\ttlsVersion := atscfg.StringToTLSVersion(tlsVersionStr)\n\t\t\tif tlsVersion == atscfg.TLSVersionInvalid {\n\t\t\t\treturn Cfg{}, errors.New(\"unknown TLS Version '\" + tlsVersionStr + \"' in '\" + *defaultTLSVersionsStr + \"'\")\n\t\t\t}\n\t\t\tdefaultTLSVersions = append(defaultTLSVersions, tlsVersion)\n\t\t}\n\t}\n\n\tif !getopt.IsSet(useStrategiesFlagName) {\n\t\t*useStrategiesPtr = defaultUseStrategies.String()\n\t}\n\n\tswitch *goDirectPtr {\n\tcase \"false\", \"true\", \"old\":\n\tdefault:\n\t\treturn Cfg{}, errors.New(goDirectFlagName + \" should be false, true, or old\")\n\t}\n\n\tcfg := Cfg{\n\t\tLogLocationErr: logLocationError,\n\t\tLogLocationWarn: logLocationWarn,\n\t\tLogLocationInfo: logLocationInfo,\n\t\tLogLocationDebug: logLocationDebug,\n\t\tListPlugins: *listPlugins,\n\t\tRevalOnly: *revalOnly,\n\t\tDir: *dir,\n\t\tViaRelease: *viaRelease,\n\t\tSetDNSLocalBind: *dnsLocalBind,\n\t\tNoOutgoingIP: *noOutgoingIP,\n\t\tATSMajorVersion: atsMajorVersion,\n\t\tParentComments: !(*disableParentConfigComments),\n\t\tDefaultEnableH2: *defaultEnableH2,\n\t\tDefaultTLSVersions: defaultTLSVersions,\n\t\tVersion: appVersion,\n\t\tGitRevision: gitRevision,\n\t\tUseStrategies: t3cutil.UseStrategiesFlag(*useStrategiesPtr),\n\t\tGoDirect: *goDirectPtr,\n\t\tCache: *cache,\n\t}\n\tif err := log.InitCfg(cfg); err != nil {\n\t\treturn Cfg{}, errors.New(\"Initializing loggers: \" + err.Error() + \"\\n\")\n\t}\n\treturn cfg, nil\n}", "func getConfig() (conf utils.Config) {\n\tconf = utils.Config{\n\t\tEndpoint: \"play.minio.io:9000\",\n\t\tAccessKey: \"minio\",\n\t\tSecretKey: \"minio123\",\n\t\tSecure: true,\n\t}\n\treturn\n}", "func GetConfig() Config {\n\tvar config Config\n\tfile := \"./config.json\"\n\tconfigFile, err := os.Open(file)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n\tdefer configFile.Close()\n\tjsonParser := json.NewDecoder(configFile)\n\tjsonParser.Decode(&config)\n\tCreateDir(config.DataDir)\n\treturn config\n}", "func Config() string {\n\treturn \"openblocks config\"\n}", "func GetConfig() Config {\n\tif value, ok := os.LookupEnv(\"APP_DEBUG\"); ok {\n\t\tvalue = strings.Trim(value, \" \\r\\n\")\n\t\tvalue = strings.ToLower(value)\n\t\tif value == \"true\" {\n\t\t\tconfig.Debug = true\n\t\t} else {\n\t\t\tgin.SetMode(gin.ReleaseMode)\n\t\t}\n\t}\n\n\tif value, ok := os.LookupEnv(\"GEO_IP_DB_PATH\"); ok {\n\t\tvalue = strings.Trim(value, \" \\r\\n\")\n\t\tconfig.GeoIP.DBPath = value\n\t} else {\n\t\t// log.Fatalln(\"GEO_IP_DB_PATH is not set\")\n\t}\n\n\tif value, ok := os.LookupEnv(\"REQUEST_ID_LENGTH\"); ok {\n\t\tif length, err := strconv.Atoi(value); err == nil {\n\t\t\tconfig.RequestID.Length = length\n\t\t}\n\t}\n\n\treturn config\n}", "func (conn *Conn) Config() *Config {\n\treturn conn.cfg\n}", "func GetConfig() config.Configuration {\n\tconfig := config.Configuration{\n\t\tWorkingDirectory: \"work\",\n\t}\n\treturn config\n}", "func (bs *bootstrap) Config() *Config {\n\treturn bs.config\n}", "func (c *EpinioClient) ShowGitconfig(gcName string) error {\n\tlog := c.Log.WithName(\"ShowGitconfig\").WithValues(\"Gitconfig\", gcName)\n\tlog.Info(\"start\")\n\tdefer log.Info(\"return\")\n\n\tc.ui.Note().\n\t\tWithStringValue(\"Name\", gcName).\n\t\tMsg(\"Showing gitconfig...\")\n\n\tgitconfig, err := c.API.GitconfigShow(gcName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.ui.Success().WithTable(\"Key\", \"Value\").\n\t\tWithTableRow(\"Name\", gitconfig.Meta.Name).\n\t\t// WithTableRow(\"Created\", gitconfig.Meta.CreatedAt.String()).\n\t\tWithTableRow(\"Provider\", string(gitconfig.Provider)).\n\t\tWithTableRow(\"URL\", gitconfig.URL).\n\t\tWithTableRow(\"User/Org\", gitconfig.UserOrg).\n\t\tWithTableRow(\"Repository\", gitconfig.Repository).\n\t\tWithTableRow(\"Skip SSL\", fmt.Sprintf(\"%v\", gitconfig.SkipSSL)).\n\t\tWithTableRow(\"Username\", gitconfig.Username).\n\t\tMsg(\"Details:\")\n\n\treturn nil\n}", "func (crc *CommandsRunnerClient) GetConfig(extensionName string) (string, error) {\n\turl := \"config\"\n\tif extensionName != \"\" {\n\t\turl += \"?extension-name=\" + extensionName\n\t}\n\t//Call the rest API\n\tdata, errCode, err := crc.RestCall(http.MethodGet, global.BaseURL, url, nil, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif errCode != http.StatusOK {\n\t\treturn \"\", errors.New(\"Unable to get config: \" + data + \",please check logs\")\n\t}\n\t//Convert to text otherwize return the json\n\tif crc.OutputFormat == \"text\" {\n\t\t//\tvar configAux config.Config\n\t\tcfg, jsonErr := config.ParseJson(data)\n\t\tif jsonErr != nil {\n\t\t\tfmt.Println(jsonErr.Error())\n\t\t\treturn \"\", jsonErr\n\t\t}\n\t\tps, jsonErr := cfg.Map(global.ConfigRootKey)\n\t\t//\t\tjsonErr = json.Unmarshal([]byte(data), &configAux)\n\t\tif jsonErr != nil {\n\t\t\tfmt.Println(jsonErr.Error())\n\t\t\treturn \"\", jsonErr\n\t\t}\n\t\tout := \"\"\n\t\tfor k, v := range ps {\n\t\t\tout += fmt.Sprintf(\"=>\\n\")\n\t\t\tout += fmt.Sprintf(\"Name : %s\\n\", k)\n\t\t\tout += fmt.Sprintf(\"Value : %s\\n\", v)\n\t\t}\n\t\treturn out, nil\n\t}\n\treturn crc.convertJSONOrYAML(data)\n}", "func GetConfig() *Config {\n\treturn &Config{\n\t\tDB: &DBConfig{\n\t\t\tDialect: \"mysql\",\n\t\t\tHost: os.Getenv(\"MYSQL_HOSTNAME\"),\n\t\t\tPort: porta,\n\t\t\tUsername: os.Getenv(\"MYSQL_USER\"),\n\t\t\tPassword: os.Getenv(\"MYSQL_PASSWORD\"),\n\t\t\tName: os.Getenv(\"MYSQL_DATABASE\"),\n\t\t\tCharset: \"utf8\",\n\t\t},\n\t}\n}", "func Config() aws.Config {\n\treturn config.Copy()\n}", "func GetConfig() basicauth.Config {\n\treturn cfg\n}", "func (d *Decoder) Config() *Config {\n\treturn d.cfg\n}", "func NewConfig() Config {\n\treturn Config{\n\t\tConfig: html.NewConfig(),\n\t\tStyle: \"github\",\n\t\tFormatOptions: []chromahtml.Option{},\n\t\tCSSWriter: nil,\n\t\tWrapperRenderer: nil,\n\t\tCodeBlockOptions: nil,\n\t}\n}", "func getRepositoryMappedConfig(imageID ImageID, config Config, repo string) Config {\n\tparts := strings.SplitN(repo, \"/\", 2)\n\tregistry, name := parts[0], parts[1]\n\n\tpullSpec := config.GetE2EImage()\n\n\th := sha256.New()\n\th.Write([]byte(pullSpec))\n\thash := base64.RawURLEncoding.EncodeToString(h.Sum(nil))[:16]\n\n\tshortName := reCharSafe.ReplaceAllLiteralString(pullSpec, \"-\")\n\tshortName = reDashes.ReplaceAllLiteralString(shortName, \"-\")\n\tmaxLength := 127 - 16 - 6 - 10\n\tif len(shortName) > maxLength {\n\t\tshortName = shortName[len(shortName)-maxLength:]\n\t}\n\tvar version string\n\tif imageID == None {\n\t\tversion = fmt.Sprintf(\"e2e-%s-%s\", shortName, hash)\n\t} else {\n\t\tversion = fmt.Sprintf(\"e2e-%d-%s-%s\", imageID, shortName, hash)\n\t}\n\n\treturn Config{\n\t\tregistry: registry,\n\t\tname: name,\n\t\tversion: version,\n\t}\n}", "func Config() (map[string]string, error) {\n\tfh, err := os.Open(gpgConfigLoc())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\t_ = fh.Close()\n\t}()\n\n\treturn parseGpgConfig(fh)\n}", "func (b *backend) Config(ctx context.Context, s logical.Storage) (*config, error) {\n\tentry, err := s.Get(ctx, \"config\")\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to get config from storage\")\n\t}\n\tif entry == nil || len(entry.Value) == 0 {\n\t\treturn nil, errors.New(\"no configuration in storage\")\n\t}\n\n\tvar result config\n\tif err := entry.DecodeJSON(&result); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to decode configuration\")\n\t}\n\n\treturn &result, nil\n}", "func GetConfig(filename string) Config {\n\tconfig := Config{}\n\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\terr = json.Unmarshal(data, &config)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn config\n}", "func Config() *Configuration {\n\tif config == nil {\n\t\tconfig = getConfig()\n\t}\n\treturn config\n}", "func FindGitConfig(dir string) (string, error) {\n\tvar err error\n\n\tif dir, err = findGitDir(dir); err != nil {\n\t\treturn \"\", err\n\t}\n\tif dir, err = getGitCommonDir(dir); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(dir, \"config\"), nil\n}", "func (m *Mutator) Config(ctx context.Context) (ispec.Image, error) {\n\tif err := m.cache(ctx); err != nil {\n\t\treturn ispec.Image{}, errors.Wrap(err, \"getting cache failed\")\n\t}\n\n\treturn *m.config, nil\n}", "func setConfig() error {\n\tviper.SetDefault(\"environment\", \"development\")\n\tviper.SetConfigName(\"config\")\n\tviper.AddConfigPath(\"$HOME/.git-grabber\")\n\treturn viper.ReadInConfig()\n}", "func Config() Configuration {\n\treturn NewConfig(\"config\", \".\", \"yml\")\n}", "func GetConfig() common.AresServerConfig {\n\treturn config\n}", "func (v Repository) GitConfigRemoteURL(name string) string {\n\treturn v.Config().Get(\"remote.\" + name + \".url\")\n}", "func Config() *GlobalConfig {\n\tconfigLock.RLock()\n\tdefer configLock.RUnlock()\n\treturn config\n}", "func GetConfig() Config {\n\tonce.Do(func() {\n\n\t\tconfigPath := os.Getenv(\"CONFIG\")\n\t\tif configPath == \"\" {\n\t\t\tlogger.Fatal(\"Error:Config environment variable not found\")\n\t\t}\n\t\tlogger.Info(\"Using config file at %v\", configPath)\n\t\tconfigFile, err := os.Open(configPath)\n\t\tif err != nil {\n\t\t\tlogger.Fatal(\"Error:Config file Path\", err)\n\t\t}\n\t\tdefer configFile.Close()\n\t\tjsonParser := json.NewDecoder(configFile)\n\t\tif err = jsonParser.Decode(&config); err == nil {\n\t\t\tlogger.Info(\"successfully parsed config %+v\", config)\n\t\t} else {\n\t\t\tlogger.Fatal(\"Error:\", err)\n\t\t}\n\t})\n\n\treturn config\n}", "func Get() (*Config, error) {\n\tif cfg != nil {\n\t\treturn cfg, nil\n\t}\n\n\tcfg = &Config{\n\t\tBindAddr: \"localhost:25100\",\n\t\tAwsRegion: \"eu-west-1\",\n\t\tUploadBucketName: \"dp-frontend-florence-file-uploads\",\n\t\tEncryptionDisabled: false,\n\t\tGracefulShutdownTimeout: 5 * time.Second,\n\t\tHealthCheckInterval: 30 * time.Second,\n\t\tHealthCheckCriticalTimeout: 90 * time.Second,\n\t\tVaultToken: \"\",\n\t\tVaultAddress: \"http://localhost:8200\",\n\t\tVaultPath: \"secret/shared/psk\",\n\t}\n\n\treturn cfg, envconfig.Process(\"\", cfg)\n}", "func getconfig(c *cli.Context) (result Config, err error) {\n\tif result, err = NewConfigFromFilename(c.String(\"config\")); err != nil {\n\t\terr = exitError(\"Config returned error: %s\", err)\n\t\treturn\n\t}\n\t// validate configuration\n\tif err = result.Validate(); err != nil {\n\t\terr = exitError(\"Config returned error: %s\", err)\n\t\treturn\n\t}\n\treturn\n}", "func Config() Provider {\n\treturn defaultConfig\n}", "func (s *Submodule) Config() *config.Submodule {\n\treturn s.c\n}", "func (b *Backend) GetConfig() string {\n\tvar sb strings.Builder\n\tsb.WriteString(\"name \" + b.Config.Name + \"\\n\")\n\tsb.WriteString(\"method \" + b.Config.Method + \"\\n\")\n\tsb.WriteString(\"realm \" + b.Config.Realm + \"\\n\")\n\tsb.WriteString(\"provider \" + b.Config.Provider)\n\treturn sb.String()\n}", "func Config() Info {\n\tinfoMutex.RLock()\n\tdefer infoMutex.RUnlock()\n\treturn info\n}", "func Config() Info {\n\tinfoMutex.RLock()\n\tdefer infoMutex.RUnlock()\n\treturn info\n}", "func Config() Provider {\r\n\treturn defaultConfig\r\n}", "func GetConfigRepo(aliasRepoPath string) error {\n\tlog.Printf(\"Cloning products repo to %s\", aliasRepoPath)\n\t_, err := git.PlainClone(aliasRepoPath, false, &git.CloneOptions{\n\t\tURL: helper.AliasRepo,\n\t\tProgress: os.Stdout,\n\t})\n\n\tif errors.Is(err, git.ErrRepositoryAlreadyExists) {\n\t\treturn pullConfigRepo(aliasRepoPath)\n\t}\n\treturn err\n}", "func readConfig() (*config, error) {\n\tcfgPath := *configPath\n\tuserSpecified := *configPath != \"\"\n\n\tvar homeDir string\n\tif testHomeDir != \"\" {\n\t\thomeDir = testHomeDir\n\t} else {\n\t\tu, err := user.Current()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thomeDir = u.HomeDir\n\t}\n\n\tif !userSpecified {\n\t\tcfgPath = filepath.Join(homeDir, \"src-config.json\")\n\t} else if strings.HasPrefix(cfgPath, \"~/\") {\n\t\tcfgPath = filepath.Join(homeDir, cfgPath[2:])\n\t}\n\tdata, err := os.ReadFile(os.ExpandEnv(cfgPath))\n\tif err != nil && (!os.IsNotExist(err) || userSpecified) {\n\t\treturn nil, err\n\t}\n\tvar cfg config\n\tif err == nil {\n\t\tcfg.ConfigFilePath = cfgPath\n\t\tif err := json.Unmarshal(data, &cfg); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tenvToken := os.Getenv(\"SRC_ACCESS_TOKEN\")\n\tenvEndpoint := os.Getenv(\"SRC_ENDPOINT\")\n\n\tif userSpecified {\n\t\t// If a config file is present, either zero or both environment variables must be present.\n\t\t// We don't want to partially apply environment variables.\n\t\tif envToken == \"\" && envEndpoint != \"\" {\n\t\t\treturn nil, errConfigMerge\n\t\t}\n\t\tif envToken != \"\" && envEndpoint == \"\" {\n\t\t\treturn nil, errConfigMerge\n\t\t}\n\t}\n\n\t// Apply config overrides.\n\tif envToken != \"\" {\n\t\tcfg.AccessToken = envToken\n\t}\n\tif envEndpoint != \"\" {\n\t\tcfg.Endpoint = envEndpoint\n\t}\n\tif cfg.Endpoint == \"\" {\n\t\tcfg.Endpoint = \"https://sourcegraph.com\"\n\t}\n\n\tcfg.AdditionalHeaders = parseAdditionalHeaders()\n\t// Ensure that we're not clashing additonal headers\n\t_, hasAuthorizationAdditonalHeader := cfg.AdditionalHeaders[\"authorization\"]\n\tif cfg.AccessToken != \"\" && hasAuthorizationAdditonalHeader {\n\t\treturn nil, errConfigAuthorizationConflict\n\t}\n\n\t// Lastly, apply endpoint flag if set\n\tif endpoint != nil && *endpoint != \"\" {\n\t\tcfg.Endpoint = *endpoint\n\t}\n\n\tcfg.Endpoint = cleanEndpoint(cfg.Endpoint)\n\n\treturn &cfg, nil\n}", "func getConfig() (Config, error) {\n\tvar configPath string\n\tvar err error\n\n\tif !configRead {\n\t\tconfigPath, err = getConfigPath()\n\t\tif err != nil {\n\t\t\treturn Config{}, err\n\t\t}\n\t\tconfig, err = parseConfig(configPath)\n\t\tif err != nil {\n\t\t\treturn Config{}, err\n\t\t}\n\t\tconfigRead = true\n\t}\n\n\treturn config, nil\n}", "func (n *Node) Config() *Config {\n\treturn n.config\n}", "func Get() *Config {\n\treturn configuration\n}", "func getConfig(c *gin.Context) {\n\tc.HTML(http.StatusOK, \"config_ui.html\", gin.H{\n\t\t\"config\": biConfig,\n\t})\n}", "func Get() *Config {\n\treturn &c\n}", "func GitserverPushConfig(repo *types.Repo, au auth.Authenticator) (*protocol.PushConfig, error) {\n\t// Empty authenticators are not allowed.\n\tif au == nil {\n\t\treturn nil, ErrNoPushCredentials{}\n\t}\n\n\tcloneURL, err := getCloneURL(repo)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"getting clone URL\")\n\t}\n\n\t// If the repo is cloned using SSH, we need to pass along a private key and passphrase.\n\tif cloneURL.IsSSH() {\n\t\tsshA, ok := au.(auth.AuthenticatorWithSSH)\n\t\tif !ok {\n\t\t\treturn nil, ErrNoSSHCredential\n\t\t}\n\t\tprivateKey, passphrase := sshA.SSHPrivateKey()\n\t\treturn &protocol.PushConfig{\n\t\t\tRemoteURL: cloneURL.String(),\n\t\t\tPrivateKey: privateKey,\n\t\t\tPassphrase: passphrase,\n\t\t}, nil\n\t}\n\n\textSvcType := repo.ExternalRepo.ServiceType\n\tswitch av := au.(type) {\n\tcase *auth.OAuthBearerTokenWithSSH:\n\t\tif err := setOAuthTokenAuth(cloneURL, extSvcType, av.Token); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase *auth.OAuthBearerToken:\n\t\tif err := setOAuthTokenAuth(cloneURL, extSvcType, av.Token); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\tcase *auth.BasicAuthWithSSH:\n\t\tif err := setBasicAuth(cloneURL, extSvcType, av.Username, av.Password); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase *auth.BasicAuth:\n\t\tif err := setBasicAuth(cloneURL, extSvcType, av.Username, av.Password); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tdefault:\n\t\treturn nil, ErrNoPushCredentials{CredentialsType: fmt.Sprintf(\"%T\", au)}\n\t}\n\n\treturn &protocol.PushConfig{RemoteURL: cloneURL.String()}, nil\n}", "func GetConfig() *Config {\n\treturn &values\n}", "func (p *Provider) Config(url *url.URL) *oauth.Config {\n\treturn &oauth.Config{\n\t\tClientId: p.ClientID,\n\t\tClientSecret: p.ClientSecret,\n\t\tScope: p.Scope,\n\t\tAuthURL: p.AuthURL,\n\t\tTokenURL: p.TokenURL,\n\t\tRedirectURL: fmt.Sprintf(\"%s://%s/-/auth/%s/callback\", url.Scheme, url.Host,\n\t\t\tstrings.ToLower(p.Name)),\n\t}\n}", "func getConf() *config {\n\tviper.AddConfigPath(\".\")\n\tviper.SetConfigName(\"config\")\n\terr := viper.ReadInConfig()\n\n\tif err != nil {\n\t\tfmt.Printf(\"%v\", err)\n\t}\n\n\tconf := &config{}\n\terr = viper.Unmarshal(conf)\n\tif err != nil {\n\t\tfmt.Printf(\"unable to decode into config struct, %v\", err)\n\t}\n\n\tsd := conf.SearchDirectory\n\n\tif len(sd) == 0 {\n\t\tpanic(\"Please set a `searchDirectory` in config.yml\")\n\t}\n\n\tconf.SearchDirectory = appendTrailingSlashIfNotExist(conf.SearchDirectory)\n\tconf.RemotePath = appendTrailingSlashIfNotExist(conf.RemotePath)\n\tconf.RemoteOldPath = appendTrailingSlashIfNotExist(conf.RemoteOldPath)\n\n\treturn conf\n}" ]
[ "0.7744009", "0.7410123", "0.670251", "0.661456", "0.6592749", "0.65113205", "0.6497694", "0.64752066", "0.6394821", "0.63012666", "0.62980485", "0.6286497", "0.6229654", "0.6194471", "0.61894053", "0.61824733", "0.61724776", "0.61576897", "0.61292005", "0.6120914", "0.61203307", "0.6108714", "0.6101544", "0.6099184", "0.6094684", "0.60923207", "0.60900736", "0.60773087", "0.6075688", "0.6069534", "0.6068668", "0.60565215", "0.6055393", "0.6041729", "0.6041606", "0.60347265", "0.6024645", "0.6024645", "0.60236996", "0.6012785", "0.6012271", "0.598452", "0.598395", "0.598395", "0.59749186", "0.59680575", "0.5965576", "0.5965449", "0.5959824", "0.59530276", "0.59516525", "0.5945272", "0.59435564", "0.59408474", "0.5918277", "0.5912409", "0.5907594", "0.59019136", "0.59009665", "0.59007853", "0.58954203", "0.58942205", "0.58869535", "0.58790684", "0.5866915", "0.58646876", "0.5862412", "0.58591306", "0.5849063", "0.5846904", "0.58452433", "0.5841398", "0.5837154", "0.582207", "0.5816312", "0.58020484", "0.57991374", "0.57969826", "0.5795786", "0.57951874", "0.57948506", "0.57924014", "0.57923025", "0.5780534", "0.57724154", "0.57706326", "0.5767491", "0.5767491", "0.5766158", "0.57638144", "0.57617694", "0.57587767", "0.5749579", "0.5749241", "0.57450926", "0.57419807", "0.5739534", "0.5736875", "0.57327664", "0.5726707" ]
0.7850194
0
FindRepository locates repository object search from the given dir.
func FindRepository(dir string) (*Repository, error) { var ( gitDir string commonDir string workDir string gitConfig GitConfig err error ) gitDir, err = findGitDir(dir) if err != nil { return nil, err } commonDir, err = getGitCommonDir(gitDir) if err != nil { return nil, err } gitConfig, err = LoadFileWithDefault(filepath.Join(commonDir, "config")) if err != nil { return nil, err } if !gitConfig.GetBool("core.bare", false) { workDir, _ = getWorkTree(gitDir) } return &Repository{ gitDir: gitDir, gitCommonDir: commonDir, workDir: workDir, gitConfig: gitConfig, }, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func FindRepository(ctx context.Context, exec boil.ContextExecutor, iD string, selectCols ...string) (*Repository, error) {\n\trepositoryObj := &Repository{}\n\n\tsel := \"*\"\n\tif len(selectCols) > 0 {\n\t\tsel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), \",\")\n\t}\n\tquery := fmt.Sprintf(\n\t\t\"select %s from `repositories` where `id`=?\", sel,\n\t)\n\n\tq := queries.Raw(query, iD)\n\n\terr := q.Bind(ctx, exec, repositoryObj)\n\tif err != nil {\n\t\tif errors.Cause(err) == sql.ErrNoRows {\n\t\t\treturn nil, sql.ErrNoRows\n\t\t}\n\t\treturn nil, errors.Wrap(err, \"models: unable to select from repositories\")\n\t}\n\n\treturn repositoryObj, nil\n}", "func (f *RepoFinder) Find() error {\n\tif _, err := Exists(f.root); err != nil {\n\t\treturn err\n\t}\n\n\twalkOpts := &godirwalk.Options{\n\t\tErrorCallback: f.errorCb,\n\t\tCallback: f.walkCb,\n\t\t// Use Unsorted to improve speed because repos will be processed by goroutines in a random order anyway.\n\t\tUnsorted: true,\n\t}\n\n\terr := godirwalk.Walk(f.root, walkOpts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(f.repos) == 0 {\n\t\treturn fmt.Errorf(\"no git repos found in root path %s\", f.root)\n\t}\n\n\treturn nil\n}", "func (r *LocalRegistry) findRepository(name *core.PackageName) *Repository {\n\t// find repository using package name\n\tfor _, repository := range r.Repositories {\n\t\tif repository.Repository == name.FullyQualifiedName() {\n\t\t\treturn repository\n\t\t}\n\t}\n\t// find repository using package Id\n\tfor _, repository := range r.Repositories {\n\t\tfor _, artie := range repository.Packages {\n\t\t\tif strings.Contains(artie.Id, name.Name) {\n\t\t\t\treturn repository\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (s *Service) MatchRepository(ctx context.Context, q *apiclient.RepositoryRequest) (*apiclient.RepositoryResponse, error) {\n\tvar repoResponse apiclient.RepositoryResponse\n\tconfig := s.initConstants.PluginConfig\n\tif config.Spec.Discover.FileName != \"\" {\n\t\tlog.Debugf(\"config.Spec.Discover.FileName is provided\")\n\t\tpattern := strings.TrimSuffix(q.Path, \"/\") + \"/\" + strings.TrimPrefix(config.Spec.Discover.FileName, \"/\")\n\t\tmatches, err := filepath.Glob(pattern)\n\t\tif err != nil || len(matches) == 0 {\n\t\t\tlog.Debugf(\"Could not find match for pattern %s. Error is %v.\", pattern, err)\n\t\t\treturn &repoResponse, err\n\t\t} else if len(matches) > 0 {\n\t\t\trepoResponse.IsSupported = true\n\t\t\treturn &repoResponse, nil\n\t\t}\n\t}\n\n\tif config.Spec.Discover.Find.Glob != \"\" {\n\t\tlog.Debugf(\"config.Spec.Discover.Find.Glob is provided\")\n\t\tpattern := strings.TrimSuffix(q.Path, \"/\") + \"/\" + strings.TrimPrefix(config.Spec.Discover.Find.Glob, \"/\")\n\t\t// filepath.Glob doesn't have '**' support hence selecting third-party lib\n\t\t// https://github.com/golang/go/issues/11862\n\t\tmatches, err := zglob.Glob(pattern)\n\t\tif err != nil || len(matches) == 0 {\n\t\t\tlog.Debugf(\"Could not find match for pattern %s. Error is %v.\", pattern, err)\n\t\t\treturn &repoResponse, err\n\t\t} else if len(matches) > 0 {\n\t\t\trepoResponse.IsSupported = true\n\t\t\treturn &repoResponse, nil\n\t\t}\n\t}\n\n\tlog.Debugf(\"Going to try runCommand.\")\n\tfind, err := runCommand(config.Spec.Discover.Find.Command, q.Path, os.Environ())\n\tif err != nil {\n\t\treturn &repoResponse, err\n\t}\n\n\tvar isSupported bool\n\tif find != \"\" {\n\t\tisSupported = true\n\t}\n\treturn &apiclient.RepositoryResponse{\n\t\tIsSupported: isSupported,\n\t}, nil\n}", "func TestRepositoryFind(t *testing.T) {\n\tdefer gock.Off()\n\n\tgock.New(\"https://gitlab.com\").\n\t\tGet(\"/api/v4/projects/diaspora/diaspora\").\n\t\tReply(200).\n\t\tType(\"application/json\").\n\t\tSetHeaders(mockHeaders).\n\t\tFile(\"testdata/repo.json\")\n\n\tclient := NewDefault()\n\tgot, res, err := client.Repositories.Find(context.Background(), \"diaspora/diaspora\")\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\twant := new(scm.Repository)\n\traw, _ := ioutil.ReadFile(\"testdata/repo.json.golden\")\n\tjson.Unmarshal(raw, want)\n\n\tif diff := cmp.Diff(got, want); diff != \"\" {\n\t\tt.Errorf(\"Unexpected Results\")\n\t\tt.Log(diff)\n\t}\n\n\tt.Run(\"Request\", testRequest(res))\n\tt.Run(\"Rate\", testRate(res))\n}", "func requestedRepository(repoName string) (repository.Repository, error) {\n\t/*\t_, repoName, err := parseGitCommand(sshcmd)\n\t\tif err != nil {\n\t\t\treturn repository.Repository{}, err\n\t\t}*/\n\tvar repo repository.Repository\n\tconn, err := db.Conn()\n\tif err != nil {\n\t\treturn repository.Repository{}, err\n\t}\n\tdefer conn.Close()\n\tif err := conn.Repository().Find(bson.M{\"_id\": repoName}).One(&repo); err != nil {\n\t\treturn repository.Repository{}, errors.New(\"Repository not found\")\n\t}\n\treturn repo, nil\n}", "func (r *RepoFinder) Find() ([]string, error) {\n\tif _, err := Exists(r.root); err != nil {\n\t\treturn nil, err\n\t}\n\n\twalkOpts := &godirwalk.Options{\n\t\tErrorCallback: r.errorCb,\n\t\tCallback: r.walkCb,\n\t\t// Use Unsorted to improve speed because repos will be processed by goroutines in a random order anyway.\n\t\tUnsorted: true,\n\t}\n\n\terr := godirwalk.Walk(r.root, walkOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(r.repos) == 0 {\n\t\treturn nil, fmt.Errorf(\"no git repos found in root path %s\", r.root)\n\t}\n\n\treturn r.repos, nil\n}", "func (u *Updater) FindRepositories() error {\n\treturn filepath.Walk(u.root, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !info.IsDir() {\n\t\t\treturn err\n\t\t}\n\t\tif info.Name() != \".git\" {\n\t\t\t// Skip\n\t\t\treturn nil\n\t\t}\n\t\tgitPath := filepath.Dir(path)\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"path\": gitPath,\n\t\t}).Info(\"found git repository\")\n\t\tu.repositories = append(u.repositories, gitPath)\n\t\treturn nil\n\t})\n}", "func (d DefaultGuesser) Find(dir string) Default {\n\tfor _, candidate := range d {\n\t\tif candidate.Test(dir) {\n\t\t\treturn candidate\n\t\t}\n\t}\n\treturn nil\n}", "func Find(dir string, rootDir string, classifier Classifier) (string, error) {\n\tdir, err := filepath.Abs(dir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\trootDir, err = filepath.Abs(rootDir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif !strings.HasPrefix(dir, rootDir) {\n\t\treturn \"\", fmt.Errorf(\"licenses.Find: rootDir %s should contain dir %s\", rootDir, dir)\n\t}\n\tfound, err := findUpwards(dir, licenseRegexp, rootDir, func(path string) bool {\n\t\t// TODO(RJPercival): Return license details\n\t\tif _, err := classifier.Identify(path); err != nil {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t})\n\tif err != nil {\n\t\tif errors.Is(err, errNotFound) {\n\t\t\treturn \"\", fmt.Errorf(\"cannot find a known open source license for %q whose name matches regexp %s and locates up until %q\", dir, licenseRegexp, rootDir)\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"finding a known open source license: %w\", err)\n\t}\n\treturn found, nil\n}", "func (c *TestContext) FindRepoRoot() string {\n\tgoMod := c.findRepoFile(\"go.mod\")\n\treturn filepath.Dir(goMod)\n}", "func findAnyRepo(importPath string) RemoteRepo {\n\tfor _, v := range vcsMap {\n\t\ti := strings.Index(importPath+\"/\", v.suffix+\"/\")\n\t\tif i < 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif !strings.Contains(importPath[:i], \"/\") {\n\t\t\tcontinue // don't match vcs suffix in the host name\n\t\t}\n\t\treturn &anyRepo{\n\t\t\tbaseRepo{\n\t\t\t\troot: importPath[:i] + v.suffix,\n\t\t\t\tvcs: v,\n\t\t\t},\n\t\t\timportPath[:i],\n\t\t}\n\t}\n\treturn nil\n}", "func (f *RepoFinder) Find(ctx context.Context, filter RepoFilter) ([]*github.Repository, error) {\n\tif filter.NoPrivate && filter.NoPublic {\n\t\treturn nil, nil // Nothing to do.\n\t}\n\n\towner, _, err := f.Client.Users.Get(ctx, filter.Owner)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't read owner information: %s\", err)\n\t}\n\n\t// A single repository. No other criteria apply.\n\tif filter.Repo != \"\" {\n\t\trepo, _, err := f.Client.Repositories.Get(ctx, filter.Owner, filter.Repo)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"can't read repository: %s\", err)\n\t\t}\n\t\treturn []*github.Repository{repo}, nil\n\t}\n\n\tvar repos []*github.Repository\n\tswitch t := owner.GetType(); t {\n\tcase \"User\":\n\t\trepos, err = f.userRepos(ctx, filter)\n\tcase \"Organization\":\n\t\trepos, err = f.orgRepos(ctx, filter)\n\tdefault:\n\t\terr = fmt.Errorf(\"unknown owner type %s\", t)\n\t}\n\n\treturn repos, err\n}", "func Open(path string) (repo *Repo, err error) {\n\tif path == \"\" {\n\t\tpath = \".\"\n\t}\n\tpath, err = filepath.Abs(path)\n\tbasepath := path\n\tif err != nil {\n\t\treturn\n\t}\n\tfor {\n\t\tfound, gitdir, workdir := findRepo(path)\n\t\tif found {\n\t\t\trepo = new(Repo)\n\t\t\trepo.GitDir = gitdir\n\t\t\trepo.WorkDir = workdir\n\t\t\treturn\n\t\t}\n\t\tparent := filepath.Dir(path)\n\t\tif parent == path {\n\t\t\tbreak\n\t\t}\n\t\tpath = parent\n\t}\n\treturn nil, errors.New(fmt.Sprintf(\"Could not find a Git repository in %s or any of its parents!\", basepath))\n}", "func FindGitDir() (string, error) {\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn findGitDirIn(wd, 0)\n}", "func SearchDir(dirPath string, cb func(comment *Comment)) error {\n\terr := godirwalk.Walk(dirPath, &godirwalk.Options{\n\t\tCallback: func(path string, de *godirwalk.Dirent) error {\n\t\t\tlocalPath, err := filepath.Rel(dirPath, path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tpathComponents := strings.Split(localPath, string(os.PathSeparator))\n\t\t\t// let's ignore git directories TODO: figure out a more generic way to set ignores\n\t\t\tmatched, err := filepath.Match(\".git\", pathComponents[0])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif matched {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif de.IsRegular() {\n\t\t\t\tp, err := filepath.Abs(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tf, err := os.Open(p)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\terr = SearchFile(localPath, f, cb)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tf.Close()\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t\tUnsorted: true,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func ScanForRepositories(directoryPath string) ([]string, error) {\n repoFolders := make([]string, 0)\n files, err := ioutil.ReadDir(directoryPath)\n if err == nil {\n for _, f := range files {\n if f.IsDir() {\n repoFolders = append(repoFolders, filepath.Join(directoryPath, f.Name()))\n }\n }\n }\n return repoFolders, err\n}", "func findInDir(dir, name string, recurse bool) string {\n\tfis, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\tvar revisions []string\n\tmname := strings.TrimSuffix(name, \".yang\")\n\tfor _, fi := range fis {\n\t\tswitch {\n\t\tcase !fi.IsDir():\n\t\t\tif fn := fi.Name(); fn == name {\n\t\t\t\treturn filepath.Join(dir, name)\n\t\t\t} else if strings.HasPrefix(fn, mname) && revisionDateSuffixRegex.MatchString(strings.TrimPrefix(fn, mname)) {\n\t\t\t\trevisions = append(revisions, fn)\n\t\t\t}\n\t\tcase recurse:\n\t\t\tif n := findInDir(filepath.Join(dir, fi.Name()), name, recurse); n != \"\" {\n\t\t\t\treturn n\n\t\t\t}\n\t\t}\n\t}\n\tif len(revisions) == 0 {\n\t\treturn \"\"\n\t}\n\tsort.Strings(revisions)\n\treturn filepath.Join(dir, revisions[len(revisions)-1])\n}", "func (rs *RepositoriesService) Find(opt *RepositoryListOptions) ([]Repository, *http.Response, error) {\n\tu, err := urlWithOptions(\"/repos\", opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := rs.client.NewRequest(\"GET\", u, nil, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar reposResp listRepositoriesResponse\n\tresp, err := rs.client.Do(req, &reposResp)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn reposResp.Repositories, resp, err\n}", "func DiscoverRepository() (*Repository, error) {\n\tcmd := exec.Command(\"git\", \"rev-parse\", \"--git-dir\")\n\tdata, err := cmd.Output()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpath := strings.Trim(string(data), \"\\n \")\n\treturn &Repository{Path: path}, nil\n}", "func (g *GitLocal) FindGitConfigDir(dir string) (string, string, error) {\n\treturn g.GitCLI.FindGitConfigDir(dir)\n}", "func FindIn(fsys fs.FS, dir, name string) (parent string, err error) {\n\tif dir == \"\" {\n\t\treturn \"\", errors.New(\"dir cannot be empty\")\n\t}\n\tif name == \"\" {\n\t\treturn \"\", errors.New(\"name cannot be empty\")\n\t}\n\tdir = filepath.Clean(dir)\n\tfor {\n\t\tcandidate := filepath.Join(dir, name)\n\t\tif !fs.ValidPath(candidate) {\n\t\t\treturn \"\", fmt.Errorf(\"invalid path: %q\", candidate)\n\t\t}\n\t\t_, err := fs.Stat(fsys, candidate)\n\t\tif err == nil {\n\t\t\treturn dir, nil\n\t\t}\n\t\tif dir == \".\" || dir == \"/\" {\n\t\t\t// Hit root.\n\t\t\treturn \"\", os.ErrNotExist\n\t\t}\n\t\t// Pop up a directory.\n\t\tdir = filepath.Dir(dir)\n\t}\n}", "func (r *LocalRegistry) findRepositoryByPackageId(id string) []Repository {\n\tvar repos []Repository\n\tfor _, repository := range r.Repositories {\n\t\tfor _, p := range repository.Packages {\n\t\t\tif p.Id == id {\n\t\t\t\trepos = append(repos, *repository)\n\t\t\t}\n\t\t}\n\t}\n\treturn repos\n}", "func (c *TestContext) findRepoFile(wantFile string) string {\n\td := c.GetTestDefinitionDirectory()\n\tfor {\n\t\tif foundFile, ok := c.hasChild(d, wantFile); ok {\n\t\t\treturn foundFile\n\t\t}\n\n\t\td = filepath.Dir(d)\n\t\tif d == \".\" || d == \"\" || d == filepath.Dir(d) {\n\t\t\tc.T.Fatalf(\"could not find %s\", wantFile)\n\t\t}\n\t}\n}", "func InitializeRepo(dir string) (e *RepoEntity, err error) {\n\te, err = FastInitializeRepo(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// need nothing extra but loading additional components\n\treturn e, e.loadComponents(true)\n}", "func (r *LocalRegistry) findDanglingRepo() *Repository {\n\tfor _, r := range r.Repositories {\n\t\tif strings.Contains(r.Repository, \"none\") {\n\t\t\treturn r\n\t\t}\n\t}\n\t// if the dangling repo does not exist, it creates one\n\tdanglingRepo := &Repository{\n\t\tRepository: \"<none>\",\n\t\tPackages: []*Package{},\n\t}\n\t// adds it to the collection of repos of the registry\n\tr.Repositories = append(r.Repositories, danglingRepo)\n\t// return the repo\n\treturn danglingRepo\n}", "func Find(dir, name string) (parent string, err error) {\n\t// There's a lot of duplication here with FindIn.\n\t// But the amount of work to get os.DirFS to work correctly across platforms\n\t// is more than the amount of work required to just duplicate the code.\n\tif dir == \"\" {\n\t\treturn \"\", errors.New(\"dir cannot be empty\")\n\t}\n\tif name == \"\" {\n\t\treturn \"\", errors.New(\"name cannot be empty\")\n\t}\n\tdir = filepath.Clean(dir)\n\tfor {\n\t\tcandidate := filepath.Join(dir, name)\n\t\t_, err := os.Stat(candidate)\n\t\tif err == nil {\n\t\t\treturn dir, nil\n\t\t}\n\t\tparent := filepath.Dir(dir)\n\t\tif parent == dir {\n\t\t\t// Hit root.\n\t\t\treturn \"\", os.ErrNotExist\n\t\t}\n\t\t// Pop up a directory.\n\t\tdir = parent\n\t}\n}", "func resolveRepository(ctx context.Context, repoID api.RepoID) (*graphqlbackend.RepositoryResolver, error) {\n\trepo, err := backend.Repos.Get(ctx, repoID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn graphqlbackend.NewRepositoryResolver(repo), nil\n}", "func NewRepoFinder(root string) *RepoFinder {\n\treturn &RepoFinder{\n\t\troot: root,\n\t}\n}", "func SearchRepo(terms []string) (*RepoSearchResult, error) {\n\tq := url.QueryEscape(strings.Join(terms, \" \"))\n\tif q != \"\" {\n\t\tq = \"?q=topic:\" + q\n\t}\n\tresp, err := http.Get(RepoURL + q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t//check the status code\n\tif resp.StatusCode != http.StatusOK {\n\t\tresp.Body.Close()\n\t\treturn nil, fmt.Errorf(\"search query Faild %s\", resp.Status)\n\t}\n\tvar result RepoSearchResult\n\tif err := json.NewDecoder(resp.Body).Decode(&result); err != nil {\n\t\tresp.Body.Close()\n\t\treturn nil, err\n\t}\n\tresp.Body.Close()\n\treturn &result, nil\n}", "func (r *RepositoryStruct) Find(obj interface{}, conds ...interface{}) (interface{}, error) {\n\tvar result *gorm.DB\n\n\tif conds == nil {\n\t\tresult = r.Connection.Find(obj)\n\t} else {\n\t\tresult = r.Connection.Find(obj, conds)\n\t}\n\n\tif result.Error != nil {\n\t\treturn obj, result.Error\n\t}\n\treturn obj, nil\n}", "func findGitDir(dir string) (string, error) {\n\tvar err error\n\n\tdir, err = absPath(dir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor {\n\t\t// Check if is in a bare repo\n\t\tif isGitDir(dir) {\n\t\t\treturn dir, nil\n\t\t}\n\n\t\t// Check .git\n\t\tgitdir := filepath.Join(dir, \".git\")\n\t\tfi, err := os.Stat(gitdir)\n\t\tif err != nil {\n\t\t\t// Test parent dir\n\t\t\toldDir := dir\n\t\t\tdir = filepath.Dir(dir)\n\t\t\tif oldDir == dir {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcontinue\n\t\t} else if fi.IsDir() {\n\t\t\tif isGitDir(gitdir) {\n\t\t\t\treturn gitdir, nil\n\t\t\t}\n\t\t\treturn \"\", fmt.Errorf(\"corrupt git dir: %s\", gitdir)\n\t\t} else {\n\t\t\tf, err := os.Open(gitdir)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", fmt.Errorf(\"cannot open gitdir file '%s'\", gitdir)\n\t\t\t}\n\t\t\tdefer f.Close()\n\t\t\treader := bufio.NewReader(f)\n\t\t\tline, err := reader.ReadString('\\n')\n\t\t\tif strings.HasPrefix(line, \"gitdir:\") {\n\t\t\t\trealgit := strings.TrimSpace(strings.TrimPrefix(line, \"gitdir:\"))\n\t\t\t\tif !filepath.IsAbs(realgit) {\n\t\t\t\t\trealgit, err = absJoin(filepath.Dir(gitdir), realgit)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn \"\", err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif isGitDir(realgit) {\n\t\t\t\t\treturn realgit, nil\n\t\t\t\t}\n\t\t\t\treturn \"\", fmt.Errorf(\"gitdir '%s' points to corrupt git repo: %s\", gitdir, realgit)\n\t\t\t}\n\t\t\treturn \"\", fmt.Errorf(\"bad gitdir file '%s'\", gitdir)\n\t\t}\n\t}\n\treturn \"\", ErrNotInGitDir\n}", "func (c *config) Repo(u *model.User, owner, name string) (*model.Repo, error) {\n\trepo, err := c.newClient(u).FindRepo(owner, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn convertRepo(repo), nil\n}", "func openRepository(path string) (repo *git.Repository, repositoryPath string, err error) {\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\tfor {\n\t\trepo, err = git.OpenRepository(path)\n\t\tif err != nil {\n\t\t\tpath = popLastDirectory(path)\n\t\t\t// Root hit\n\t\t\tif path == \"/\" {\n\t\t\t\tpath = wd\n\t\t\t\tlog.Println(\"Output directory is not in a git repository. Creating one in \" + path)\n\t\t\t\trepo, err = git.InitRepository(wd, false)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, \"\", errors.Wrap(err, \"Could not initialize git repository\")\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn repo, path, nil\n}", "func FindDir(dir string) (string, bool) {\n\tfound := FindPath(dir, commonBaseSearchPaths, func(fileInfo os.FileInfo) bool {\n\t\treturn fileInfo.IsDir()\n\t})\n\tif found == \"\" {\n\t\treturn \"./\", false\n\t}\n\n\treturn found, true\n}", "func FindGithubRepo(ctx context.Context, file, githubInstance, remoteName string) (string, error) {\n\tif remoteName == \"\" {\n\t\tremoteName = \"origin\"\n\t}\n\n\turl, err := findGitRemoteURL(ctx, file, remoteName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t_, slug, err := findGitSlug(url, githubInstance)\n\treturn slug, err\n}", "func (s *tagStore) Repository(ctx context.Context, t *models.Tag) (*models.Repository, error) {\n\tdefer metrics.InstrumentQuery(\"tag_repository\")()\n\tq := `SELECT\n\t\t\tid,\n\t\t\ttop_level_namespace_id,\n\t\t\tname,\n\t\t\tpath,\n\t\t\tparent_id,\n\t\t\tcreated_at,\n\t\t\tupdated_at\n\t\tFROM\n\t\t\trepositories\n\t\tWHERE\n\t\t\ttop_level_namespace_id = $1\n\t\t\tAND id = $2`\n\trow := s.db.QueryRowContext(ctx, q, t.NamespaceID, t.RepositoryID)\n\n\treturn scanFullRepository(row)\n}", "func (is *ImageStoreLocal) GetNextRepository(repo string) (string, error) {\n\tvar lockLatency time.Time\n\n\tdir := is.rootDir\n\n\tis.RLock(&lockLatency)\n\tdefer is.RUnlock(&lockLatency)\n\n\t_, err := os.ReadDir(dir)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Msg(\"failure walking storage root-dir\")\n\n\t\treturn \"\", err\n\t}\n\n\tfound := false\n\tstore := \"\"\n\terr = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\trel, err := filepath.Rel(is.rootDir, path)\n\t\tif err != nil {\n\t\t\treturn nil //nolint:nilerr // ignore paths not relative to root dir\n\t\t}\n\n\t\tok, err := is.ValidateRepo(rel)\n\t\tif !ok || err != nil {\n\t\t\treturn nil //nolint:nilerr // ignore invalid repos\n\t\t}\n\n\t\tif repo == \"\" && ok && err == nil {\n\t\t\tstore = rel\n\n\t\t\treturn io.EOF\n\t\t}\n\n\t\tif found {\n\t\t\tstore = rel\n\n\t\t\treturn io.EOF\n\t\t}\n\n\t\tif rel == repo {\n\t\t\tfound = true\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn store, err\n}", "func NewRepoFinder(root string) *RepoFinder {\n\treturn &RepoFinder{\n\t\troot: root,\n\t\tmaxWorkers: maxWorkers,\n\t}\n}", "func getRepo(repos []config.Repository, repoName string) (config.Repository, bool) {\n\tfor _, repo := range repos {\n\t\tif repo.Name == repoName {\n\t\t\treturn repo, true\n\t\t}\n\t}\n\treturn config.Repository{}, false\n}", "func (g GlobalCfg) MatchingRepo(repoID string) *Repo {\n\tfor i := len(g.Repos) - 1; i >= 0; i-- {\n\t\trepo := g.Repos[i]\n\t\tif repo.IDMatches(repoID) {\n\t\t\treturn &repo\n\t\t}\n\t}\n\treturn nil\n}", "func OpenRepository(opts GlobalOptions) (*repository.Repository, error) {\n\tif opts.Repo == \"\" {\n\t\treturn nil, errors.Fatal(\"Please specify repository location (-r)\")\n\t}\n\n\tbe, err := open(opts.Repo, opts, opts.extended)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbe = backend.NewRetryBackend(be, 10, func(msg string, err error, d time.Duration) {\n\t\tWarnf(\"%v returned error, retrying after %v: %v\\n\", msg, d, err)\n\t})\n\n\ts := repository.New(be)\n\n\tpasswordTriesLeft := 1\n\tif stdinIsTerminal() && opts.password == \"\" {\n\t\tpasswordTriesLeft = 3\n\t}\n\n\tfor ; passwordTriesLeft > 0; passwordTriesLeft-- {\n\t\topts.password, err = ReadPassword(opts, \"enter password for repository: \")\n\t\tif err != nil && passwordTriesLeft > 1 {\n\t\t\topts.password = \"\"\n\t\t\tfmt.Printf(\"%s. Try again\\n\", err)\n\t\t}\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\terr = s.SearchKey(opts.ctx, opts.password, maxKeys, opts.KeyHint)\n\t\tif err != nil && passwordTriesLeft > 1 {\n\t\t\topts.password = \"\"\n\t\t\tfmt.Printf(\"%s. Try again\\n\", err)\n\t\t}\n\t}\n\tif err != nil {\n\t\tif errors.IsFatal(err) {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errors.Fatalf(\"%s\", err)\n\t}\n\n\tif stdoutIsTerminal() && !opts.JSON {\n\t\tid := s.Config().ID\n\t\tif len(id) > 8 {\n\t\t\tid = id[:8]\n\t\t}\n\t\tif !opts.JSON {\n\t\t\tVerbosef(\"repository %v opened successfully, password is correct\\n\", id)\n\t\t}\n\t}\n\n\tif opts.NoCache {\n\t\treturn s, nil\n\t}\n\n\tc, err := cache.New(s.Config().ID, opts.CacheDir)\n\tif err != nil {\n\t\tWarnf(\"unable to open cache: %v\\n\", err)\n\t\treturn s, nil\n\t}\n\n\tif c.Created && !opts.JSON {\n\t\tVerbosef(\"created new cache in %v\\n\", c.Base)\n\t}\n\n\t// start using the cache\n\ts.UseCache(c)\n\n\toldCacheDirs, err := cache.Old(c.Base)\n\tif err != nil {\n\t\tWarnf(\"unable to find old cache directories: %v\", err)\n\t}\n\n\t// nothing more to do if no old cache dirs could be found\n\tif len(oldCacheDirs) == 0 {\n\t\treturn s, nil\n\t}\n\n\t// cleanup old cache dirs if instructed to do so\n\tif opts.CleanupCache {\n\t\tPrintf(\"removing %d old cache dirs from %v\\n\", len(oldCacheDirs), c.Base)\n\n\t\tfor _, item := range oldCacheDirs {\n\t\t\tdir := filepath.Join(c.Base, item.Name())\n\t\t\terr = fs.RemoveAll(dir)\n\t\t\tif err != nil {\n\t\t\t\tWarnf(\"unable to remove %v: %v\\n\", dir, err)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif stdoutIsTerminal() {\n\t\t\tVerbosef(\"found %d old cache directories in %v, run `restic cache --cleanup` to remove them\\n\",\n\t\t\t\tlen(oldCacheDirs), c.Base)\n\t\t}\n\t}\n\n\treturn s, nil\n}", "func (p *Plugin) Find(ctx context.Context, droneRequest *config.Request) (*drone.Config, error) {\n\tsomeUuid := uuid.New()\n\tlogrus.Infof(\"%s %s/%s started\", someUuid, droneRequest.Repo.Namespace, droneRequest.Repo.Name)\n\tdefer logrus.Infof(\"%s finished\", someUuid)\n\n\t// connect to scm\n\tclient, err := p.NewScmClient(ctx, someUuid, droneRequest.Repo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq := request{\n\t\tRequest: droneRequest,\n\t\tUUID: someUuid,\n\t\tClient: client,\n\t}\n\n\t// make sure this plugin is enabled for the requested repo slug\n\tif ok := p.allowlisted(&req); !ok {\n\t\t// do the default behavior by returning nil, nil\n\t\treturn nil, nil\n\t}\n\n\t// avoid running for jsonnet or starlark configurations\n\tif !strings.HasSuffix(droneRequest.Repo.Config, \".yaml\") && !strings.HasSuffix(droneRequest.Repo.Config, \".yml\") {\n\t\treturn nil, nil\n\t}\n\n\t// load the considerFile entries, if configured for considerFile\n\tif req.ConsiderData, err = p.newConsiderDataFromRequest(ctx, &req); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn p.getConfig(ctx, &req)\n}", "func Find() (string, error) {\n\tp, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Cannot get working dir: %v\", err)\n\t}\n\n\treturn findRecursiveGoMod(p)\n}", "func findInDir(file, dir string, env environ.Env) (string, error) {\n\tvar exts []string\n\tif x, ok := env.Lookup(`PATHEXT`); ok {\n\t\tfor _, e := range strings.Split(strings.ToLower(x), `;`) {\n\t\t\tif e == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif e[0] != '.' {\n\t\t\t\te = \".\" + e\n\t\t\t}\n\t\t\texts = append(exts, e)\n\t\t}\n\t} else {\n\t\texts = []string{\".com\", \".exe\", \".bat\", \".cmd\"}\n\t}\n\n\tif f, err := findExecutable(filepath.Join(dir, file), exts); err == nil {\n\t\treturn f, nil\n\t}\n\treturn \"\", errors.New(\"not found\")\n}", "func DetermineRepoDir(template, abbreviations, cloneToDir string, checkout,\n\tnoInput bool, password, directory string) {\n\tcleanUp := false\n\tif IsZipFile(template) {\n\t\t_ = UnZip(\"dir\", false, \"password\")\n\t\tcleanUp = true\n\t} else if IsRepoURL(template) {\n\t\tcleanUp = false\n\t} else {\n\t\tcleanUp = false\n\t}\n\tfmt.Println(cleanUp)\n\n}", "func Repo(path string) (*git.Repository, error) {\n\trepo, repoErr := git.PlainOpen(path)\n\n\tif repoErr != nil {\n\t\treturn nil, repoErr\n\t}\n\n\treturn repo, repoErr\n}", "func (v Repository) RepoDir() string {\n\tif path.IsDir(v.DotGit) {\n\t\treturn v.DotGit\n\t}\n\treturn v.GitDir\n}", "func DoFind(clnt Client, ctx *cli.Context) {\n\tpathnameParts := strings.SplitAfter(ctx.Args().Get(0), \"/\")\n\talias := strings.TrimSuffix(pathnameParts[0], \"/\")\n\t_, err := getHostConfig(alias)\n\n\t// iterate over all content which is within the given directory\n\tfor content := range clnt.List(true, false, DirNone) {\n\t\tfileContent := parseContent(content)\n\t\tfilePath := fileContent.Key\n\n\t\t// traversing in a object store not a file path\n\t\tif err == nil {\n\t\t\tfilePath = path.Join(alias, filePath)\n\t\t}\n\n\t\tif ctx.String(\"maxdepth\") != \"\" {\n\t\t\ti, e := strconv.Atoi(ctx.String(\"maxdepth\"))\n\t\t\ts := \"\"\n\n\t\t\tfatalIf(probe.NewError(e), \"Error parsing string passed to flag maxdepth\")\n\n\t\t\t// we are going to be parsing the path by x amounts\n\t\t\tpathParts := strings.SplitAfter(filePath, \"/\")\n\n\t\t\t// handle invalid params\n\t\t\t// ex. user specifies:\n\t\t\t// maxdepth 2, but the given object only has a maxdepth of 1\n\t\t\tif (len(pathParts)-1) < i || i < 0 {\n\n\t\t\t\t// -1 is meant to handle each the array being 0 indexed, but the size not being 0 indexed\n\t\t\t\ti = len(pathParts) - 1\n\t\t\t}\n\n\t\t\t// append portions of path into a string\n\t\t\tfor j := 0; j <= i; j++ {\n\t\t\t\ts += pathParts[j]\n\t\t\t}\n\n\t\t\tfilePath = s\n\t\t\tfileContent.Key = s\n\t\t}\n\n\t\t// maxdepth can modify the filepath to end in a directory\n\t\t// to be consistent with find we do not want to be listing directories\n\t\t// so any parms which end in / will be ignored\n\t\tif !strings.HasSuffix(filePath, \"/\") {\n\n\t\t\torBool := ctx.Bool(\"or\")\n\n\t\t\tmatch := fileContentMatch(fileContent, orBool, ctx)\n\n\t\t\tif match && ctx.String(\"print\") != \"\" {\n\t\t\t\tdoFindPrint(filePath, ctx, fileContent)\n\t\t\t} else if match {\n\t\t\t\tprintMsg(findMSG{\n\t\t\t\t\tPath: filePath,\n\t\t\t\t})\n\t\t\t}\n\n\t\t\tif !ctx.Bool(\"watch\") && match && ctx.String(\"exec\") != \"\" {\n\t\t\t\tdoFindExec(ctx, filePath, fileContent)\n\t\t\t}\n\t\t}\n\n\t\tif ctx.Bool(\"watch\") {\n\t\t\tdoFindWatch(ctx.Args().Get(0), ctx)\n\t\t}\n\t}\n\n}", "func (l *StandardLogger) RepoNotFound(id string) {\n\tl.Errorf(repoNotFound.message, id)\n}", "func (c *TestContext) FindBinDir() string {\n\treturn c.findRepoFile(\"bin\")\n}", "func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) {\n\tvar finishedWalk bool\n\tvar foundRepos []string\n\n\tif len(repos) == 0 {\n\t\treturn 0, errors.New(\"no space in slice\")\n\t}\n\n\troot, err := pathFor(repositoriesRootPathSpec{})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\terr = reg.blobStore.driver.Walk(ctx, root, func(fileInfo driver.FileInfo) error {\n\t\terr := handleRepository(fileInfo, root, last, func(repoPath string) error {\n\t\t\tfoundRepos = append(foundRepos, repoPath)\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// if we've filled our array, no need to walk any further\n\t\tif len(foundRepos) == len(repos) {\n\t\t\tfinishedWalk = true\n\t\t\treturn driver.ErrSkipDir\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tn = copy(repos, foundRepos)\n\n\tif err != nil {\n\t\treturn n, err\n\t} else if !finishedWalk {\n\t\t// We didn't fill buffer. No more records are available.\n\t\treturn n, io.EOF\n\t}\n\n\treturn n, err\n}", "func (r *LocalRegistry) findPackageByRepoAndId(name *core.PackageName, id string) *Package {\n\tfor _, repository := range r.Repositories {\n\t\trep := fmt.Sprintf(\"%s/%s/%s\", name.Domain, name.Group, name.Name)\n\t\tif rep == repository.Repository {\n\t\t\tfor _, pack := range repository.Packages {\n\t\t\t\tif pack.Id == id {\n\t\t\t\t\treturn pack\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func FindDir(dir string) (string, bool) {\n\tcommonBaseSearchPaths := []string{\n\t\t\".\",\n\t\t\"..\",\n\t\t\"../..\",\n\t\t\"../../..\",\n\t\t\"../../../..\",\n\t}\n\tfound := fileutils.FindPath(dir, commonBaseSearchPaths, func(fileInfo os.FileInfo) bool {\n\t\treturn fileInfo.IsDir()\n\t})\n\tif found == \"\" {\n\t\treturn \"./\", false\n\t}\n\n\treturn found, true\n}", "func (connection *Connection) searchRepository(adabasID *ID, repository *Repository,\n\tmapName string) (err error) {\n\tif repository == nil {\n\t\tadatypes.Central.Log.Debugf(\"Search in global repositories\")\n\t\tconnection.adabasMap, _, err = SearchMapRepository(adabasID, mapName)\n\t\tif err != nil {\n\t\t\tadatypes.Central.Log.Debugf(\"Search in global repositories fail: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tif connection.adabasMap == nil {\n\t\t\treturn adatypes.NewGenericError(85, mapName)\n\t\t}\n\t} else {\n\t\tadatypes.Central.Log.Debugf(\"Search in given repository %v: %s\", repository, repository.DatabaseURL.URL.String())\n\t\tconnection.adabasToMap = adabasID.getAdabas(&repository.DatabaseURL.URL)\n\t\t// NewAdabas(repository.DatabaseURL.URL.String(), adabasID)\n\t\tif err != nil {\n\t\t\tadatypes.Central.Log.Debugf(\"New Adabas to map ID error: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tconnection.adabasMap, err = repository.SearchMap(connection.adabasToMap, mapName)\n\t\tif err != nil {\n\t\t\tadatypes.Central.Log.Debugf(\"Search map error: %v\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\tadatypes.Central.Log.Debugf(\"Found map %s\", connection.adabasMap.Name)\n\tif connection.adabasMap.URL().String() != connection.adabasToMap.URL.String() {\n\t\tconnection.adabasToMap = connection.ID.getAdabas(connection.adabasMap.URL())\n\t}\n\tif connection.adabasMap.URL().String() == connection.adabasMap.Data.URL.String() {\n\t\tadatypes.Central.Log.Debugf(\"Same URL %v -> %v\", connection.adabasMap.URL().String(), connection.adabasToMap.URL)\n\t\tconnection.adabasToData = connection.adabasToMap\n\t} else {\n\t\tadatypes.Central.Log.Debugf(\"Create new Adabas URL %v!=%v\", connection.adabasMap.URL().String(), connection.adabasMap.Data.URL.String())\n\t\tconnection.adabasToData = adabasID.getAdabas(&connection.adabasMap.Data.URL)\n\t\t// NewAdabas(&connection.adabasMap.Data.URL, adabasID)\n\t\t// if err != nil {\n\t\t// \tadatypes.Central.Log.Debugf(\"Error new ADabas URL %v\", err)\n\t\t// \treturn err\n\t\t// }\n\t}\n\tadatypes.Central.Log.Debugf(\"Final error: %v\", err)\n\treturn\n}", "func (repo Repository) GetByDir(dir string) (data [][]byte, err error) {\n\tfullPath := path.Join(repo.StorageDir, dir)\n\tfiles, err := ioutil.ReadDir(fullPath)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, file := range files {\n\t\tnewFile := []byte{}\n\t\tfilePath := path.Join(fullPath, file.Name())\n\t\tf, err := os.Open(filePath)\n\t\tif err != nil {\n\t\t\treturn data, err\n\t\t}\n\t\t_, err = f.Read(newFile)\n\t\tif err != nil {\n\t\t\treturn data, err\n\t\t}\n\n\t\tdata = append(data, newFile)\n\t}\n\n\treturn\n}", "func findPublicRepo(importPath string) (RemoteRepo, error) {\n\tfor _, host := range knownHosts {\n\t\tif hm := host.pattern.FindStringSubmatch(importPath); hm != nil {\n\t\t\treturn host.repo(hm[1])\n\t\t}\n\t}\n\treturn nil, nil\n}", "func (r *repo) Search(query string) ([]dbmodels.Repository, error) {\n\trc := r.db.Database().Collection(viper.GetString(\"db.repository_collection\"))\n\tctx, cancel := context.WithTimeout(\n\t\tcontext.Background(),\n\t\ttime.Duration(viper.GetInt(\"db.query_timeout_in_sec\"))*time.Second,\n\t)\n\tdefer cancel()\n\n\tfilter := bson.D{\n\t\t{\"$or\", []interface{}{\n\t\t\tbson.D{{\"name\", primitive.Regex{Pattern: query, Options: \"i\"}}},\n\t\t\tbson.D{{\"full_name\", primitive.Regex{Pattern: query, Options: \"i\"}}},\n\t\t}},\n\t}\n\n\tfilterCursor, err := rc.Find(ctx, filter)\n\tif err != nil {\n\t\treturn []dbmodels.Repository{}, fmt.Errorf(\"Failed to search repos with error %v\", err)\n\t}\n\n\tvar repos []dbmodels.Repository\n\tif err = filterCursor.All(ctx, &repos); err != nil {\n\t\treturn []dbmodels.Repository{}, fmt.Errorf(\"Failed to search repos with error %v\", err)\n\t}\n\n\treturn repos, nil\n}", "func FindGitConfig(dir string) (string, error) {\n\tvar err error\n\n\tif dir, err = findGitDir(dir); err != nil {\n\t\treturn \"\", err\n\t}\n\tif dir, err = getGitCommonDir(dir); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(dir, \"config\"), nil\n}", "func GetRepository(name string) *Repository {\n\tfor i := range repos {\n\t\tif repos[i].Name == name {\n\t\t\treturn &repos[i]\n\t\t}\n\t}\n\tlog.Fatalln(\"Can't not find repository.\")\n\treturn nil\n}", "func (r *Resolver) Repository() generated.RepositoryResolver { return &repositoryResolver{r} }", "func find(dir, name string, results []*string) []*string {\n\tif !strings.HasSuffix(dir, conf.PathSeparator) {\n\t\tdir += conf.PathSeparator\n\t}\n\n\tf, _ := os.Open(dir)\n\tfileInfos, err := f.Readdir(-1)\n\tf.Close()\n\n\tif nil != err {\n\t\tlogger.Errorf(\"Read dir [%s] failed: [%s]\", dir, err.Error())\n\n\t\treturn results\n\t}\n\n\tfor _, fileInfo := range fileInfos {\n\t\tfname := fileInfo.Name()\n\t\tpath := dir + fname\n\n\t\tif fileInfo.IsDir() {\n\t\t\tif gulu.Str.Contains(fname, defaultExcludesFind) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// enter the directory recursively\n\t\t\tresults = find(path, name, results)\n\t\t} else {\n\t\t\t// match filename\n\t\t\tpattern := filepath.Dir(path) + conf.PathSeparator + name\n\n\t\t\tmatch, err := filepath.Match(strings.ToLower(pattern), strings.ToLower(path))\n\n\t\t\tif nil != err {\n\t\t\t\tlogger.Errorf(\"Find match filename failed: [%s]\", err.Error())\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif match {\n\t\t\t\tresults = append(results, &path)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn results\n}", "func (sry *Sryun) Repo(user *model.User, owner, name string) (*model.Repo, error) {\n\trepo := &model.Repo{}\n\trepo.Owner = owner\n\trepo.Name = name\n\trepo.FullName = fmt.Sprintf(\"%s/%s\", owner, name)\n\trepo.Link = repoLink\n\trepo.IsPrivate = true\n\trepo.Clone = clone\n\trepo.Branch = branch\n\trepo.Avatar = sry.User.Avatar\n\trepo.Kind = model.RepoGit\n\n\treturn repo, nil\n}", "func FindInDir(path string, filename string) (string, error) {\n\tcurrent, _ := filepath.Abs(path)\n\n\tfor {\n\t\tmatches, err := filepath.Glob(filepath.Join(current, filename))\n\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif len(matches) > 0 {\n\t\t\treturn matches[0], nil\n\t\t}\n\n\t\t// Give up if reached file system root\n\t\tif current == \"/\" {\n\t\t\tbreak\n\t\t}\n\n\t\tcurrent = filepath.Join(current, \"..\")\n\t}\n\n\treturn \"\", errors.New(\"no matching files found\")\n}", "func FindFoldedDir(dir string, query string, preferExact bool) (name string, err error) {\n\tif preferExact {\n\t\treturn findFoldedDirPreferExact(dir, query)\n\t} else {\n\t\treturn findFoldedDirNoExact(dir, query)\n\t}\n}", "func (s *Submodule) Repository() (*Repository, error) {\n\tstorer, err := s.w.r.Storer.Module(s.c.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = storer.Reference(plumbing.HEAD)\n\tif err != nil && err != plumbing.ErrReferenceNotFound {\n\t\treturn nil, err\n\t}\n\n\tworktree := s.w.fs.Dir(s.c.Path)\n\tif err == nil {\n\t\treturn Open(storer, worktree)\n\t}\n\n\tr, err := Init(storer, worktree)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = r.CreateRemote(&config.RemoteConfig{\n\t\tName: DefaultRemoteName,\n\t\tURL: s.c.URL,\n\t})\n\n\treturn r, err\n}", "func (s searcher) Search(ctx context.Context, query SearchQuery) (\n\tresp *SearchResult, err error) {\n\n\tsw := stopwatch.New()\n\tsw.Start(\"total\")\n\n\tvar (\n\t\trepo *Repo\n\t\tshards []string\n\t\tirepo interface{}\n\t\twaitingFor int\n\t\tch chan *SearchResult\n\t)\n\tresp = NewSearchResult()\n\n\t// If the repo is not found or is not available to search,\n\t// exit with a RepoUnavailable error. Ignore repo being\n\t// indexed currently.\n\trepokey := query.firstKey()\n\tlog.Info(\"search [%s] [path %s] local repo=%v\", query.Re, query.PathRe, repokey)\n\n\tif repokey == \"\" {\n\t\tresp.Error = kRepoKeyEmptyError.Error()\n\t\tlog.Debug(\"search backend error %v\", resp.Error)\n\t\tgoto done\n\t}\n\tirepo = s.repos.Get(repokey)\n\tif irepo == nil {\n\t\tresp.Errors[repokey] = kRepoUnavailableError\n\t\tgoto done\n\t}\n\trepo = irepo.(*Repo)\n\tresp.Repos[repokey] = repo\n\n\tif repo.State == INDEXING {\n\t\t// The repo is (con)currently indexing\n\t\tgoto done\n\t} else if repo.State != OK {\n\t\t// The repo is currently in error, exit early\n\t\t// and potentially delete the offending repo.\n\t\tresp.Errors[repokey] = kRepoUnavailableError\n\t\tif s.cfg.DeleteRepoOnError {\n\t\t\t_ = s.repos.Delete(repokey)\n\t\t}\n\t\tgoto done\n\t}\n\n\tshards = repo.Shards()\n\twaitingFor = len(shards)\n\tch = make(chan *SearchResult, waitingFor+1)\n\tdefer close(ch)\n\n\tfor _, shard := range shards {\n\t\tgo func(r *Repo, fname string) {\n\t\t\tsr, e := searchLocal(ctx, query, r, fname)\n\t\t\tsr.Repos[r.Key] = r\n\t\t\tif e != nil {\n\t\t\t\t// Report the error, possibly marking the repo as unavailable\n\t\t\t\t// and if so, potentially deleting it if configured to do so.\n\t\t\t\tif os.IsNotExist(e) || os.IsPermission(e) {\n\t\t\t\t\tlog.Warning(\"repo [%s] not available error: %v\", r.Key, e)\n\t\t\t\t\tr.State = ERROR\n\t\t\t\t\tsr.Errors[r.Key] = errs.NewStructError(\n\t\t\t\t\t\terrs.NewRepoUnavailableError())\n\n\t\t\t\t\tif s.cfg.DeleteRepoOnError {\n\t\t\t\t\t\t_ = s.repos.Delete(r.Key)\n\t\t\t\t\t} else {\n\t\t\t\t\t\t_ = s.repos.Set(r.Key, r)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tsr.Errors[r.Key] = errs.NewStructError(e)\n\t\t\t\t}\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\t// Report the timeout\n\t\t\t\tresp.Errors[repokey] = kSearchTimeoutError\n\t\t\tdefault:\n\t\t\t\tch <- sr\n\t\t\t}\n\n\t\t}(repo, shard)\n\t}\n\n\t// Await goroutine completion either in error or otherwise\n\tfor waitingFor > 0 {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tbreak\n\t\tcase in := <-ch:\n\t\t\tlog.Debug(\"got one %q\", in)\n\t\t\tresp.Update(in)\n\t\t\twaitingFor--\n\t\t}\n\t}\ndone:\n\tresp.Durations.Search = sw.Stop(\"total\")\n\tlog.Info(\"search [%s] [path %s] local done %d matches errors=%v (%v)\",\n\t\tquery.Re, query.PathRe, resp.NumMatches, resp.Errors, resp.Durations.Search)\n\treturn\n}", "func FindInGoSearchPath(pkg string) string {\n\treturn FindInSearchPath(FullGoSearchPath(), pkg)\n}", "func (ur *FooRepository) Find() {\n\t// do something\n\tur.sqlHandler.Execute()\n}", "func search() {\n\tr := *repo\n\ti := *issue\n\trepos := setRepositories()\n\n\tfor key, value := range repos {\n\t\tif repos[key].name == r {\n\t\t\tclient := &http.Client{}\n\t\t\trepoName := value\n\t\t\treq, err := http.NewRequest(\"GET\", \"https://api.github.com/search/issues\", nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"[ERROR] Some issue with request.\")\n\t\t\t}\n\t\t\tq := req.URL.Query()\n\t\t\tq.Add(\"q\", fmt.Sprintf(\"%s+repo:%s\", i, repoName.repo))\n\t\t\treq.URL.RawQuery = q.Encode()\n\t\t\tresp, _ := client.Do(req)\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tdefer resp.Body.Close()\n\t\t\tprintResults(body)\n\t\t}\n\t}\n}", "func RepoFindTodo(id int) Todo {\n\tfor _, todo := range todos {\n\t\tif todo.Id == id {\n\t\t\treturn todo\n\t\t}\n\t}\n\treturn Todo{}\n}", "func (s *Subrepo) Dir(dir string) string {\n\treturn filepath.Join(s.Root, dir)\n}", "func (r *Registry) Repository(ctx context.Context, name string) (registry.Repository, error) {\n\tref := registry.Reference{\n\t\tRegistry: r.Reference.Registry,\n\t\tRepository: name,\n\t}\n\treturn newRepositoryWithOptions(ref, &r.RepositoryOptions)\n}", "func FindInSearchPath(searchPath, pkg string) string {\n\tpathsList := filepath.SplitList(searchPath)\n\tfor _, path := range pathsList {\n\t\tif evaluatedPath, err := filepath.EvalSymlinks(filepath.Join(path, \"src\", pkg)); err == nil {\n\t\t\tif _, err := os.Stat(evaluatedPath); err == nil {\n\t\t\t\treturn evaluatedPath\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}", "func (f *finder) Find(dir string) ([]string, error) {\n\tdockerfiles := []string{}\n\terr := f.fsWalk(dir, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Skip hidden directories.\n\t\tif info.IsDir() && strings.HasPrefix(info.Name(), \".\") {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\t// Add relative path to Dockerfile.\n\t\tif isDockerfile(info) {\n\t\t\trelpath, err := filepath.Rel(dir, path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdockerfiles = append(dockerfiles, relpath)\n\t\t}\n\t\treturn nil\n\t})\n\treturn dockerfiles, err\n}", "func (repository *Repository) SearchMapInRepository(adabas *Adabas, mapName string) (adabasMap *Map, err error) {\n\tadatypes.Central.Log.Debugf(\"Map repository: %#v\", repository)\n\tvar dbid Dbid\n\tdbid, err = repository.DatabaseURL.dbid()\n\tif err != nil {\n\t\tfmt.Printf(\"Error dbid %v\\n\", err)\n\t\treturn\n\t}\n\tif adabas == nil {\n\t\terr = adatypes.NewGenericError(48)\n\t\treturn\n\t}\n\tadatypes.Central.Log.Debugf(\"Database id %d got map repository dbid %d\", adabas.Acbx.Acbxdbid, dbid)\n\tif adabas.Acbx.Acbxdbid == 0 && adabas.Acbx.Acbxdbid != dbid {\n\t\tadabas.Close()\n\t\tadabas.Acbx.Acbxdbid = dbid\n\t\tadatypes.Central.Log.Debugf(\"Set new dbid after map %s to %d\", mapName, dbid)\n\t\tadatypes.Central.Log.Debugf(\"Call search in repository using Adabas %s/%03d\", adabas.URL.String(), adabas.Acbx.Acbxfnr)\n\t}\n\tadatypes.Central.Log.Debugf(\"Load repository of %s/%d\", repository.URL.String(), repository.Fnr)\n\terr = repository.LoadMapRepository(adabas)\n\tif err != nil {\n\t\tadatypes.Central.Log.Debugf(\"Error loading repository %v\", err)\n\t\treturn\n\t}\n\tadatypes.Central.Log.Debugf(\"Read map in repository of %s/%d\", repository.URL.String(), repository.Fnr)\n\tadabasMap, err = repository.readAdabasMap(adabas, mapName)\n\tif err != nil {\n\t\treturn\n\t}\n\tadatypes.Central.Log.Debugf(\"Found map <%s> in repository of %s/%d\", adabasMap.Name, repository.URL.String(), repository.Fnr)\n\trepository.AddMapToCache(mapName, adabasMap)\n\treturn\n}", "func FindRootRepoPath() (string, error) {\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Error getting pwd: \", err)\n\t\tos.Exit(1)\n\t}\n\n\tparts := strings.SplitAfter(pwd, string(os.PathSeparator))\n\tfor i, _ := range parts {\n\t\ttestPath := path.Join(parts[:i+1]...)\n\t\tif IsRepo(testPath) {\n\t\t\treturn testPath, nil\n\t\t}\n\t}\n\n\t// Return pwd in case we're cloning into pwd.\n\treturn pwd, fmt.Errorf(\"No .git found in %s or any parent dir.\", pwd)\n}", "func Find(path string, fn func(s string, i os.FileInfo)) (err error) {\n\tdirh, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer dirh.Close()\n\n\tinfoSlice := make([]os.FileInfo, 1)\n\tinfoSlice, err = dirh.Readdir(1)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor err != io.EOF {\n\t\tinfo := infoSlice[0]\n\t\tif first {\n\t\t\tfn(path, info)\n\t\t} else {\n\t\t\tfn(path +\"/\"+ info.Name(), info)\n\t\t}\n\t\tfirst = false\n\n\t\tif info.Mode().IsDir() {\n\t\t\tFind(path +\"/\"+ info.Name(), fn)\n\t\t}\n\t\tinfoSlice, err = dirh.Readdir(1)\n\t}\n\treturn nil\n}", "func FindHandler(w http.ResponseWriter, r *http.Request) {\n\thttpSession, _ := session.HTTPSession.Get(r, session.CookieName)\n\tif httpSession.IsNew {\n\t\thttp.Error(w, \"Forbidden\", http.StatusForbidden)\n\n\t\treturn\n\t}\n\tuid := httpSession.Values[\"uid\"].(string)\n\n\tresult := gulu.Ret.NewResult()\n\tdefer gulu.Ret.RetResult(w, r, result)\n\n\tvar args map[string]interface{}\n\tif err := json.NewDecoder(r.Body).Decode(&args); err != nil {\n\t\tlogger.Error(err)\n\t\tresult.Code = -1\n\n\t\treturn\n\t}\n\n\tpath := args[\"path\"].(string) // path of selected file in file tree\n\tif !gulu.Go.IsAPI(path) && !session.CanAccess(uid, path) {\n\t\thttp.Error(w, \"Forbidden\", http.StatusForbidden)\n\n\t\treturn\n\t}\n\n\tname := args[\"name\"].(string)\n\n\tuserWorkspace := conf.GetUserWorkspace(uid)\n\tworkspaces := filepath.SplitList(userWorkspace)\n\n\tif \"\" != path && !gulu.File.IsDir(path) {\n\t\tpath = filepath.Dir(path)\n\t}\n\n\tfounds := foundPaths{}\n\n\tfor _, workspace := range workspaces {\n\t\trs := find(workspace+conf.PathSeparator+\"src\", name, []*string{})\n\n\t\tfor _, r := range rs {\n\t\t\tsubstr := gulu.Str.LCS(path, *r)\n\n\t\t\tfounds = append(founds, &foundPath{Path: filepath.ToSlash(*r), score: len(substr)})\n\t\t}\n\t}\n\n\tsort.Sort(founds)\n\n\tresult.Data = founds\n}", "func (this *SearchDir) SearchDirs(root, pattern string) {\n this.search(root, pattern, false, true)\n}", "func GetDpsRepository() (obj DpsRepository, err error) {\n\tif repoObj == nil {\n\t\t//ERROR\n\t}\n\tobj = *repoObj\n\treturn\n}", "func Repository_FromRepositoryAttributes(scope constructs.Construct, id *string, attrs *RepositoryAttributes) IRepository {\n\t_init_.Initialize()\n\n\tvar returns IRepository\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_ecr.Repository\",\n\t\t\"fromRepositoryAttributes\",\n\t\t[]interface{}{scope, id, attrs},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func FindCurrency(exec boil.Executor, iD int, selectCols ...string) (*Currency, error) {\n\tcurrencyObj := &Currency{}\n\n\tsel := \"*\"\n\tif len(selectCols) > 0 {\n\t\tsel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), \",\")\n\t}\n\tquery := fmt.Sprintf(\n\t\t\"select %s from \\\"currency\\\" where \\\"id\\\"=$1\", sel,\n\t)\n\n\tq := queries.Raw(query, iD)\n\n\terr := q.Bind(nil, exec, currencyObj)\n\tif err != nil {\n\t\tif errors.Cause(err) == sql.ErrNoRows {\n\t\t\treturn nil, sql.ErrNoRows\n\t\t}\n\t\treturn nil, errors.Wrap(err, \"models: unable to select from currency\")\n\t}\n\n\treturn currencyObj, nil\n}", "func load(repo *Repo) (err error) {\n \n err = loadRoot(repo)\n if err != nil {\n return\n }\n\n // Watch root directory for changes.\n // No need to remember watcher id as root doesn't change\n // _, err = fwatch.WatchDir(dir, reloadRoot)\n return\n}", "func (d *BaseDevice) Repo() *Repository {\n\treturn d.deviceImpl.Repo()\n}", "func FastInitializeRepo(dir string) (e *RepoEntity, err error) {\n\tf, err := os.Open(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// get status of the file\n\tfstat, _ := f.Stat()\n\tr, err := git.PlainOpen(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// initialize RepoEntity with minimum viable fields\n\te = &RepoEntity{RepoID: helpers.RandomString(8),\n\t\tName: fstat.Name(),\n\t\tAbsPath: dir,\n\t\tModTime: fstat.ModTime(),\n\t\tRepository: *r,\n\t\tstate: Available,\n\t\tmutex: &sync.RWMutex{},\n\t\tlisteners: make(map[string][]RepositoryListener),\n\t}\n\treturn e, nil\n}", "func (s *Service) ResolveRepository(name string) (*RepositoryInfo, error) {\n\treturn s.Config.NewRepositoryInfo(name)\n}", "func (e *cniExec) FindInPath(plugin string, paths []string) (string, error) {\n\treturn invoke.FindInPath(plugin, paths)\n}", "func (g git) OpenRepository(repoDir string) (Repository, error) {\n\treturn g.openRepository(repoDir)\n}", "func (g *GitLab) Repo(ctx context.Context, user *model.User, remoteID model.ForgeRemoteID, owner, name string) (*model.Repo, error) {\n\tclient, err := newClient(g.url, user.Token, g.SkipVerify)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif remoteID.IsValid() {\n\t\tintID, err := strconv.ParseInt(string(remoteID), 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t_repo, _, err := client.Projects.GetProject(int(intID), nil, gitlab.WithContext(ctx))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn g.convertGitLabRepo(_repo)\n\t}\n\n\t_repo, err := g.getProject(ctx, client, owner, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn g.convertGitLabRepo(_repo)\n}", "func HandleFind(\n\trepos core.RepositoryStore,\n\tsecrets core.SecretStore,\n) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar (\n\t\t\tnamespace = chi.URLParam(r, \"owner\")\n\t\t\tname = chi.URLParam(r, \"name\")\n\t\t\tsecret = chi.URLParam(r, \"secret\")\n\t\t)\n\t\trepo, err := repos.FindName(r.Context(), namespace, name)\n\t\tif err != nil {\n\t\t\trender.NotFound(w, err)\n\t\t\treturn\n\t\t}\n\t\tresult, err := secrets.FindName(r.Context(), repo.ID, secret)\n\t\tif err != nil {\n\t\t\trender.NotFound(w, err)\n\t\t\treturn\n\t\t}\n\t\tsafe := result.Copy()\n\t\trender.JSON(w, safe, 200)\n\t}\n}", "func (this *SearchDir) Search(root, pattern string) {\n this.search(root, pattern, true, true)\n}", "func OrganizeRepoFolder(c *cli.Context) error {\n\trepoPath, err := filepath.Abs(c.String(\"path\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdryRun := c.Bool(\"dry-run\")\n\n\tif dryRun {\n\t\tfmt.Println(\"#### Running in dry-run mode ####\")\n\t}\n\n\tpaths := make(map[string]bool)\n\two := make(map[string]string)\n\n\terr = godirwalk.Walk(repoPath, &godirwalk.Options{\n\t\tCallback: func(osPathname string, de *godirwalk.Dirent) error {\n\t\t\tif de.IsDir() && de.Name() == \".git\" {\n\n\t\t\t\tr, err := git.PlainOpen(osPathname)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"[error] Was not able to read git on %s\\n\", osPathname)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tremote, err := r.Remote(\"origin\")\n\n\t\t\t\t// If repo doesn't have remote, we'll skip.\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"[error] Missing remote(s) on %s\\n\", osPathname)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tre, err := getRepoName(remote)\n\n\t\t\t\t// If repo failed to get repo name, skip it.\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"[error] Not able to get repository name on %s\\n\", osPathname)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tcorrectPath := repoPath + \"/\" + re.Org + \"/\" + re.Name\n\n\t\t\t\tif correctPath+\"/.git\" == osPathname {\n\t\t\t\t\tpaths[correctPath] = true\n\t\t\t\t\two[strings.ReplaceAll(osPathname, \"/.git\", \"\")] = correctPath\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tisAvail := false\n\t\t\t\ti := 0\n\t\t\t\tfor isAvail == false {\n\t\t\t\t\tif !paths[correctPath] {\n\t\t\t\t\t\tpaths[correctPath] = true\n\t\t\t\t\t\tisAvail = true\n\t\t\t\t\t} else {\n\t\t\t\t\t\ti = i + 1\n\t\t\t\t\t\tcorrectPath = correctPath + \"-\" + strconv.Itoa(i)\n\t\t\t\t\t\tfmt.Printf(\"[duplicate] %s will be named %s\\n\", osPathname, correctPath)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\two[strings.ReplaceAll(osPathname, \"/.git\", \"\")] = correctPath\n\t\t\t}\n\t\t\t// fmt.Printf(\"%s %s\\n\", de.ModeType(), osPathname)\n\t\t\treturn nil\n\t\t},\n\t\tUnsorted: true,\n\t})\n\n\tfmt.Printf(\"mv %s %s\\nmkdir %s\\n\", repoPath, repoPath+\"-bak\", repoPath)\n\tif !dryRun {\n\t\tos.Rename(repoPath, repoPath+\"-bak\")\n\t\t_ = os.Mkdir(repoPath, 0700)\n\t}\n\n\t// Loop through the work order\n\tfor s, d := range wo {\n\t\tfmt.Printf(\"mkdir %s\\n\", path.Dir(d))\n\t\tif !dryRun {\n\t\t\t_ = os.Mkdir(path.Dir(d), 0700)\n\t\t}\n\t\tns := strings.ReplaceAll(s, repoPath, repoPath+\"-bak\")\n\t\tfmt.Printf(\"mv %s %s\\n\", ns, d)\n\t\tif !dryRun {\n\t\t\tos.Rename(ns, d)\n\t\t}\n\t}\n\n\tfmt.Printf(\n\t\t\"\\n\\n### DONE ###\\nRepositories had issues, are kept in %s.\\n\"+\n\t\t\t\"You should clean that up (at least rename the folder before you run this command again).\", repoPath+\"-bak\")\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}", "func (cp Pack) Find(args []string) (Account, error) {\n\treturn cp.FindWithPrompt(args, list.Default())\n}", "func FindCodl(c cookoo.Context, p *cookoo.Params) (interface{}, cookoo.Interrupt) {\n\tdir := cookoo.GetString(\"dir\", \".\", p)\n\n\twhere := filepath.Join(dir, \"*.codl\")\n\tfiles, err := filepath.Glob(where)\n\n\treturn files, err\n}", "func matchGoogleRepo(id string) (RemoteRepo, error) {\n\troot := \"code.google.com/p/\" + id\n\treturn &googleRepo{baseRepo{\"https://\" + root, root, nil}}, nil\n}", "func getRepoName(dir string) (string, error) {\n\tr, err := git.PlainOpen(dir)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error opening git dir %s: %w\", dir, err)\n\t}\n\trm, err := r.Remote(defaultRemote)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error finding remote %s in git dir %s: %w\", defaultRemote, dir, err)\n\t}\n\n\t// validate remote URL\n\tremoteURL, err := url.Parse(rm.Config().URLs[0])\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error parsing remote URL: %w\", err)\n\t}\n\ttrimmedRemotePath := strings.TrimSuffix(remoteURL.Path, \"/\")\n\tsplitRemotePath := strings.Split(trimmedRemotePath, \"/\")\n\t// expect path to be /owner/repo\n\tif len(splitRemotePath) != 3 {\n\t\treturn \"\", fmt.Errorf(\"expected owner/repo, got %s\", trimmedRemotePath)\n\t}\n\treturn splitRemotePath[len(splitRemotePath)-1], nil\n}", "func matchGithubRepo(root string) (RemoteRepo, error) {\n\tif strings.HasSuffix(root, \".git\") {\n\t\treturn nil, errors.New(\"path must not include .git suffix\")\n\t}\n\treturn &baseRepo{\"http://\" + root + \".git\", root, vcsMap[\"git\"]}, nil\n}", "func (r *CatalogRepository) Find(searchTerm string, categories []uint64) (products []*catalog.Product, err error) {\n\treturn\n}", "func NewRepo(dbInfo DBInfo) (*Repo, error) {\n\tctx := context.Background()\n\tclient, err := mongo.NewClient(options.Client().ApplyURI(dbInfo.URL))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient.Connect(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Println(\"Connected to DB\")\n\n\treturn &Repo{\n\t\tclient: client,\n\t\tctx: ctx,\n\t\tdbInfo: dbInfo,\n\t}, err\n\n}" ]
[ "0.67655885", "0.6583548", "0.5961574", "0.5914727", "0.58771306", "0.582271", "0.5820217", "0.5819151", "0.55795795", "0.5564507", "0.5556771", "0.55446625", "0.5502477", "0.5489713", "0.54813373", "0.5453073", "0.5391034", "0.536849", "0.5359692", "0.534294", "0.5341294", "0.53339183", "0.5317599", "0.5293535", "0.529348", "0.52905273", "0.52887005", "0.52735656", "0.5272044", "0.5266766", "0.5265079", "0.52628684", "0.52512896", "0.52153474", "0.5199613", "0.5195508", "0.5191864", "0.5175921", "0.51732206", "0.5163215", "0.5161412", "0.5135421", "0.513228", "0.51273143", "0.5123661", "0.5119647", "0.5112536", "0.51113296", "0.5104611", "0.5088804", "0.5067666", "0.50632435", "0.5052093", "0.50366604", "0.50283504", "0.50112504", "0.5009009", "0.49830633", "0.49650767", "0.49612433", "0.49539417", "0.4920848", "0.49171308", "0.48991007", "0.48965496", "0.488463", "0.4880983", "0.48564348", "0.48345023", "0.48067552", "0.48028335", "0.47958693", "0.4792543", "0.47918385", "0.47874388", "0.47684097", "0.47675678", "0.47647786", "0.4762033", "0.47606888", "0.47580186", "0.47541794", "0.47481114", "0.47480047", "0.47348765", "0.4734773", "0.47058976", "0.46955076", "0.46920007", "0.46849966", "0.46838745", "0.4679419", "0.4668333", "0.46683168", "0.46664765", "0.46622407", "0.4661839", "0.46608093", "0.46574548", "0.46486503" ]
0.7402806
0
CheckSemanticTitle checks if the given PR contains semantic title
func CheckSemanticTitle(pr *gogh.PullRequest, config PluginConfiguration, logger log.Logger) string { change := ghservice.NewRepositoryChangeForPR(pr) prefixes := GetValidTitlePrefixes(config) isTitleWithValidType := HasTitleWithValidType(prefixes, *pr.Title) if !isTitleWithValidType { if prefix, ok := wip.GetWorkInProgressPrefix(*pr.Title, wip.LoadConfiguration(logger, change)); ok { trimmedTitle := strings.TrimPrefix(*pr.Title, prefix) isTitleWithValidType = HasTitleWithValidType(prefixes, trimmedTitle) } } if !isTitleWithValidType { allPrefixes := "`" + strings.Join(prefixes, "`, `") + "`" return fmt.Sprintf(TitleFailureMessage, pr.GetTitle(), allPrefixes) } return "" }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func testFrontMatterTitle(mdBytes []byte) error {\n\tfm, _, err := frontparser.ParseFrontmatterAndContent(mdBytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, exists := fm[\"title\"]; exists == false {\n\t\treturn errors.New(\"can't find title in frontmatter\")\n\t}\n\treturn nil\n}", "func IsTitle(r rune) bool", "func (me TSearchHITsSortProperty) IsTitle() bool { return me.String() == \"Title\" }", "func (me TGetReviewableHITsSortProperty) IsTitle() bool { return me.String() == \"Title\" }", "func IsTitle(rune int) bool {\n\tif rune < 0x80 {\t// quick ASCII check\n\t\treturn false\n\t}\n\treturn Is(Title, rune);\n}", "func testFrontmatterTitle(path string) error {\n\tif strings.HasSuffix(path, \".md\") {\n\t\tfileBytes, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// if file has frontmatter\n\t\tif frontparser.HasFrontmatterHeader(fileBytes) {\n\t\t\tfm, _, err := frontparser.ParseFrontmatterAndContent(fileBytes)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// skip markdowns that are not published\n\t\t\tif published, exists := fm[\"published\"]; exists {\n\t\t\t\tif publishedBool, ok := published.(bool); ok {\n\t\t\t\t\tif publishedBool == false {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif _, exists := fm[\"title\"]; exists == false {\n\t\t\t\treturn errors.New(\"can't find title in frontmatter\")\n\t\t\t}\n\t\t} else {\n\t\t\t// no frontmatter is not an error\n\t\t\t// markdown files without frontmatter won't be considered\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn nil\n}", "func (o *SecurityProblem) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *Snippet) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (o *InlineResponse20034Milestone) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (r *readability) getArticleTitle(doc *goquery.Document) string {\n\t// Get title tag\n\ttitle := doc.Find(\"title\").First().Text()\n\ttitle = normalizeText(title)\n\toriginalTitle := title\n\n\t// Create list of separator\n\tseparators := []string{`|`, `-`, `\\`, `/`, `>`, `»`}\n\thierarchialSeparators := []string{`\\`, `/`, `>`, `»`}\n\n\t// If there's a separator in the title, first remove the final part\n\ttitleHadHierarchicalSeparators := false\n\tif idx, sep := findSeparator(title, separators...); idx != -1 {\n\t\ttitleHadHierarchicalSeparators = hasSeparator(title, hierarchialSeparators...)\n\n\t\tindex := strings.LastIndex(originalTitle, sep)\n\t\ttitle = originalTitle[:index]\n\n\t\t// If the resulting title is too short (3 words or fewer), remove\n\t\t// the first part instead:\n\t\tif len(strings.Fields(title)) < 3 {\n\t\t\tindex = strings.Index(originalTitle, sep)\n\t\t\ttitle = originalTitle[index+1:]\n\t\t}\n\t} else if strings.Contains(title, \": \") {\n\t\t// Check if we have an heading containing this exact string, so we\n\t\t// could assume it's the full title.\n\t\texistInHeading := false\n\t\tdoc.Find(\"h1,h2\").EachWithBreak(func(_ int, heading *goquery.Selection) bool {\n\t\t\theadingText := strings.TrimSpace(heading.Text())\n\t\t\tif headingText == title {\n\t\t\t\texistInHeading = true\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\treturn true\n\t\t})\n\n\t\t// If we don't, let's extract the title out of the original title string.\n\t\tif !existInHeading {\n\t\t\tindex := strings.LastIndex(originalTitle, \":\")\n\t\t\ttitle = originalTitle[index+1:]\n\n\t\t\t// If the title is now too short, try the first colon instead:\n\t\t\tif len(strings.Fields(title)) < 3 {\n\t\t\t\tindex = strings.Index(originalTitle, \":\")\n\t\t\t\ttitle = originalTitle[index+1:]\n\t\t\t\t// But if we have too many words before the colon there's something weird\n\t\t\t\t// with the titles and the H tags so let's just use the original title instead\n\t\t\t} else {\n\t\t\t\tindex = strings.Index(originalTitle, \":\")\n\t\t\t\ttitle = originalTitle[:index]\n\t\t\t\tif len(strings.Fields(title)) > 5 {\n\t\t\t\t\ttitle = originalTitle\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else if strLen(title) > 150 || strLen(title) < 15 {\n\t\thOne := doc.Find(\"h1\").First()\n\t\tif hOne != nil {\n\t\t\ttitle = normalizeText(hOne.Text())\n\t\t}\n\t}\n\n\t// If we now have 4 words or fewer as our title, and either no\n\t// 'hierarchical' separators (\\, /, > or ») were found in the original\n\t// title or we decreased the number of words by more than 1 word, use\n\t// the original title.\n\tcurTitleWordCount := len(strings.Fields(title))\n\tnoSeparatorWordCount := len(strings.Fields(removeSeparator(originalTitle, separators...)))\n\tif curTitleWordCount <= 4 && (!titleHadHierarchicalSeparators || curTitleWordCount != noSeparatorWordCount-1) {\n\t\ttitle = originalTitle\n\t}\n\n\treturn title\n}", "func (o *Content) GetTitleOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title.Get(), o.Title.IsSet()\n}", "func TestFrontMatterTitle(t *testing.T) {\n\tfilepath.Walk(\"/docs\", func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\tt.Error(err.Error(), \"-\", path)\n\t\t}\n\t\tpublished, mdBytes, err := isPublishedMarkdown(path)\n\t\tif err != nil {\n\t\t\tt.Error(err.Error(), \"-\", path)\n\t\t}\n\t\tif published == false {\n\t\t\treturn nil\n\t\t}\n\t\terr = testFrontMatterTitle(mdBytes)\n\t\tif err != nil {\n\t\t\tt.Error(err.Error(), \"-\", path)\n\t\t}\n\t\treturn nil\n\t})\n}", "func getArticleTitle(doc *goquery.Document) string {\n\t// Get title tag\n\ttitle := doc.Find(\"title\").First().Text()\n\ttitle = normalizeText(title)\n\toriginalTitle := title\n\n\t// Create list of separator\n\tseparators := []string{`|`, `-`, `\\`, `/`, `>`, `»`}\n\thierarchialSeparators := []string{`\\`, `/`, `>`, `»`}\n\n\t// If there's a separator in the title, first remove the final part\n\ttitleHadHierarchicalSeparators := false\n\tif idx, sep := findSeparator(title, separators...); idx != -1 {\n\t\ttitleHadHierarchicalSeparators = hasSeparator(title, hierarchialSeparators...)\n\n\t\tindex := strings.LastIndex(originalTitle, sep)\n\t\ttitle = originalTitle[:index]\n\n\t\t// If the resulting title is too short (3 words or fewer), remove\n\t\t// the first part instead:\n\t\tif len(strings.Fields(title)) < 3 {\n\t\t\tindex = strings.Index(originalTitle, sep)\n\t\t\ttitle = originalTitle[index+1:]\n\t\t}\n\t} else if strings.Contains(title, \": \") {\n\t\t// Check if we have an heading containing this exact string, so we\n\t\t// could assume it's the full title.\n\t\texistInHeading := false\n\t\tdoc.Find(\"h1,h2\").EachWithBreak(func(_ int, heading *goquery.Selection) bool {\n\t\t\theadingText := strings.TrimSpace(heading.Text())\n\t\t\tif headingText == title {\n\t\t\t\texistInHeading = true\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\treturn true\n\t\t})\n\n\t\t// If we don't, let's extract the title out of the original title string.\n\t\tif !existInHeading {\n\t\t\tindex := strings.LastIndex(originalTitle, \":\")\n\t\t\ttitle = originalTitle[index+1:]\n\n\t\t\t// If the title is now too short, try the first colon instead:\n\t\t\tif len(strings.Fields(title)) < 3 {\n\t\t\t\tindex = strings.Index(originalTitle, \":\")\n\t\t\t\ttitle = originalTitle[:index]\n\t\t\t\t// But if we have too many words before the colon there's something weird\n\t\t\t\t// with the titles and the H tags so let's just use the original title instead\n\t\t\t} else {\n\t\t\t\tindex = strings.Index(originalTitle, \":\")\n\t\t\t\tbeforeColon := originalTitle[:index]\n\t\t\t\tif len(strings.Fields(beforeColon)) > 5 {\n\t\t\t\t\ttitle = originalTitle\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else if strLen(title) > 150 || strLen(title) < 15 {\n\t\thOne := doc.Find(\"h1\").First()\n\t\tif hOne != nil {\n\t\t\ttitle = hOne.Text()\n\t\t}\n\t}\n\n\t// If we now have 4 words or fewer as our title, and either no\n\t// 'hierarchical' separators (\\, /, > or ») were found in the original\n\t// title or we decreased the number of words by more than 1 word, use\n\t// the original title.\n\tcurTitleWordCount := len(strings.Fields(title))\n\tnoSeparatorWordCount := len(strings.Fields(removeSeparator(originalTitle, separators...)))\n\tif curTitleWordCount <= 4 && (!titleHadHierarchicalSeparators || curTitleWordCount != noSeparatorWordCount-1) {\n\t\ttitle = originalTitle\n\t}\n\n\treturn normalizeText(title)\n}", "func parseTitle(fs http.FileSystem, path string) (string, error) {\n\tf, err := fs.Open(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer f.Close()\n\tdoc, err := titlesContext.Parse(f, path, present.TitlesOnly)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn doc.Title, nil\n}", "func (m *ReviewMutation) Title() (r string, exists bool) {\n\tv := m.title\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}", "func (o *SecurityProblem) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (o *Snippet) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *PostWebhook) GetTitleOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Title, true\n}", "func (o *InlineResponse20033Milestones) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func HasTitleWithValidType(prefixes []string, title string) bool {\n\tpureTitle := strings.TrimSpace(title)\n\tfor _, prefix := range prefixes {\n\t\tprefixRegexp := regexp.MustCompile(`(?i)^` + prefix + `(:| |\\()+`)\n\t\tif prefixRegexp.MatchString(pureTitle) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func getTitle(w http.ResponseWriter, r *http.Request) (string, error) {\n\tm := validPath.FindStringSubmatch(r.URL.Path)\n\tif m == nil {\n\t\thttp.NotFound(w, r)\t// 404 not found http error\n\t\treturn \"\", errors.New(\"Invalid Page Title\")\n\t}\n\treturn m[2], nil // The title is the second subexpression\n}", "func getTitle(w http.ResponseWriter, r *http.Request) (string, error) {\n\tm := validPath.FindStringSubmatch(r.URL.Path)\n\tif m == nil {\n\t\thttp.NotFound(w, r)\n\t\treturn \"\", errors.New(\"Invalid Page Title\")\n\t}\n\treturn m[2], nil // The title is the second subexpression.\n}", "func (m *NametitleMutation) Title() (r string, exists bool) {\n\tv := m._Title\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}", "func isTitleElement(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"title\"\n}", "func (o *HealthIncident) GetTitleOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Title, true\n}", "func (o *InlineResponse20034Milestone) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func soleTitle(doc *html.Node) (title string, err error) {\n\ttype bailout struct{}\n\n\tdefer func() {\n\t\tswitch p := recover(); p {\n\t\tcase nil:\n\t\t\t// no panic\n\t\tcase bailout{}:\n\t\t\t// \"expected\" panic\n\t\t\terr = fmt.Errorf(\"multiple title elements\")\n\t\tdefault:\n\t\t\tpanic(p) // unexpected panic; carry on panicking\n\t\t}\n\t}()\n\t// Bail out of recursion if we find more than one non-empty title.\n\tforEachNode(doc, func(n *html.Node) {\n\t\tif n.Type == html.ElementNode && n.Data == \"title\" &&\n\t\t\tn.FirstChild != nil {\n\t\t\tif title != \"\" {\n\t\t\t\tpanic(bailout{}) // multiple title elements\n\t\t\t}\n\t\t\ttitle = n.FirstChild.Data\n\t\t}\n\t}, nil)\n\tif title == \"\" {\n\t\treturn \"\", fmt.Errorf(\"no title element\")\n\t}\n\treturn title, nil\n}", "func (o *InlineResponse20049Post) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (o *WorkbookChart) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *InlineResponse200115) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *NiaapiNewReleaseDetailAllOf) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (o *GroupWidgetDefinition) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (o *InlineResponse20049Post) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func title(markdown string) string {\n\ttitleLine := defaultTitle\n\tlines := strings.SplitN(markdown, \"\\n\", 2)\n\tif len(lines) > 0 {\n\t\ttitleLine = lines[0]\n\t\ttitleLine = strings.Trim(titleLine, \" #\")\n\t}\n\treturn titleLine\n}", "func (o *GroupWidgetDefinition) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *Content) HasTitle() bool {\n\tif o != nil && o.Title.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *InlineResponse20027Person) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func SoleTitle(doc *html.Node) (title string, err error) {\n\ttype bailout struct{}\n\n\tdefer func() {\n\t\tswitch p := recover(); p {\n\t\tcase nil:\n\t\t\t// no panic\n\t\tcase bailout{}:\n\t\t\t// \"expected\" panic\n\t\t\terr = fmt.Errorf(\"multiple title elements\")\n\t\tdefault:\n\t\t\t// unexpected panic; carry on panicking\n\t\t\tpanic(p)\n\t\t}\n\t}()\n\n\t// Bail out of recursion if we find more than one non-empty title.\n\tforEachNode(doc, func(n *html.Node) {\n\t\tif html.ElementNode == n.Type && \"title\" == n.Data && nil != n.FirstChild {\n\t\t\tif \"\" != title {\n\t\t\t\t// multiple title elements\n\t\t\t\tpanic(bailout{})\n\t\t\t}\n\n\t\t\ttitle = n.FirstChild.Data\n\t\t}\n\t}, nil)\n\n\tif \"\" == title {\n\t\treturn \"\", fmt.Errorf(\"no title element\")\n\t}\n\n\treturn title, nil\n}", "func (o *NiaapiNewReleaseDetailAllOf) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (m *RoleMutation) Title() (r string, exists bool) {\n\tv := m.title\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}", "func TestFrontmatterTitle(t *testing.T) {\n\tfilepath.Walk(\"/docs\", func(path string, info os.FileInfo, err error) error {\n\t\terr = testFrontmatterTitle(path)\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error(), \"-\", path)\n\t\t\tt.Fail()\n\t\t}\n\t\treturn nil\n\t})\n}", "func (p *PageRelatedArticle) GetTitle() (value string, ok bool) {\n\tif !p.Flags.Has(0) {\n\t\treturn value, false\n\t}\n\treturn p.Title, true\n}", "func (o *InlineResponse20033Milestones) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (o *InlineResponse2004People) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *LaunchpadClicks) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (m *TodoItemMutation) Title() (r string, exists bool) {\n\tv := m.title\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}", "func (p Params) Title() (title string, found bool) {\n\ttitle, found = p[titleKey]\n\treturn\n}", "func Title() (string, error) {\n\treturn \"\", nil\n}", "func (o *CatalogEntry) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func Title(v string) predicate.Task {\n\treturn predicate.Task(sql.FieldEQ(FieldTitle, v))\n}", "func TitleMatch(word []string, pageID int64) (ok bool, queryScore float64) {\n\tok = false\n\tqueryScore = 0.0\n\n\tfor _, w := range word {\n\t\twordScore := 0.0\n\t\ttitle := database.GetTitle(pageID)\n\t\t// splitTitle := strings.Split(title, \" \")\n\t\t// titleSlice := make([]string, 0)\n\t\t// for _, q := range splitTitle {\n\t\t// \ttitleSlice = append(titleSlice, q)\n\t\t// }\n\t\t// titleStem := stopstem.StemString(titleSlice)\n\t\tfor _, t := range title {\n\t\t\tif w == t {\n\t\t\t\tok = true\n\t\t\t\twordScore++\n\t\t\t\t// fmt.Println(w, \"match in \", pageID)\n\t\t\t}\n\t\t}\n\t\tqueryScore += wordScore\n\t}\n\treturn ok, queryScore\n}", "func textContainsTitle(text, title string) bool {\n\tre := regexp.MustCompile(\"(?i)\" + strings.Join(strings.Split(title, \" \"), `\\s+`))\n\treturn re.MatchString(text)\n}", "func filterTitle(ctx stick.Context, val stick.Value, args ...stick.Value) stick.Value {\n\treturn strings.Title(stick.CoerceString(val))\n}", "func (o *InlineResponse200115) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (o *CatalogEntry) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func ExtractTitle(r *Response) (title string) {\n\t// Try to parse the DOM\n\ttitleDom, err := getTitleWithDom(r)\n\t// In case of error fallback to regex\n\tif err != nil {\n\t\tfor _, match := range reTitle.FindAllString(r.Raw, -1) {\n\t\t\ttitle = match\n\t\t\tbreak\n\t\t}\n\t} else {\n\t\ttitle = renderNode(titleDom)\n\t}\n\n\ttitle = html.UnescapeString(trimTitleTags(title))\n\n\t// remove unwanted chars\n\ttitle = strings.TrimSpace(strings.Trim(title, cutset))\n\ttitle = stringsutil.ReplaceAll(title, \"\\n\", \"\\r\")\n\n\treturn title\n}", "func (o *TeamPermissionSettingAttributes) HasTitle() bool {\n\treturn o != nil && o.Title != nil\n}", "func TitleHasSuffix(v string) predicate.Task {\n\treturn predicate.Task(sql.FieldHasSuffix(FieldTitle, v))\n}", "func (o *GetMenuItemInformation200Response) GetTitleOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Title, true\n}", "func (o *LaunchpadClicks) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (pubManager PublicationManager) CheckByTitle(title string) (int64, error) {\n\n\trow := pubManager.dbCheckByTitle.QueryRow(title)\n\tvar res int64\n\terr := row.Scan(&res)\n\tif err != nil {\n\t\treturn -1, ErrNotFound\n\t}\n\t// returns 1 or 0\n\treturn res, err\n}", "func titleOrDefault(specDoc *loads.Document, name, defaultName string) string {\n\tif strings.TrimSpace(name) == \"\" {\n\t\tif specDoc.Spec().Info != nil && strings.TrimSpace(specDoc.Spec().Info.Title) != \"\" {\n\t\t\tname = specDoc.Spec().Info.Title\n\t\t} else {\n\t\t\tname = defaultName\n\t\t}\n\t}\n\treturn swag.ToGoName(name)\n}", "func Verify(a, b *Release, minTitleLength int) MatchResult {\n\tif a.ExtIDs.DOI != \"\" && a.ExtIDs.DOI == b.ExtIDs.DOI {\n\t\treturn MatchResult{StatusExact, ReasonDOI}\n\t}\n\tif a.WorkID != \"\" && a.WorkID == b.WorkID {\n\t\treturn MatchResult{StatusExact, ReasonWorkID}\n\t}\n\taTitleLower := strings.ToLower(a.Title)\n\tbTitleLower := strings.ToLower(b.Title)\n\tif utf8.RuneCountInString(a.Title) < minTitleLength {\n\t\treturn MatchResult{StatusAmbiguous, ReasonShortTitle}\n\t}\n\tif BlacklistTitle.Contains(aTitleLower) {\n\t\treturn MatchResult{StatusAmbiguous, ReasonBlacklisted}\n\t}\n\tif BlacklistTitle.Contains(bTitleLower) {\n\t\treturn MatchResult{StatusAmbiguous, ReasonBlacklisted}\n\t}\n\tfor _, fragment := range BlacklistTitleFragments.Slice() {\n\t\tif strings.Contains(aTitleLower, fragment) {\n\t\t\treturn MatchResult{StatusAmbiguous, ReasonBlacklistedFragment}\n\t\t}\n\t}\n\tif strings.Contains(aTitleLower, \"subject index\") && strings.Contains(bTitleLower, \"subject index\") {\n\t\tif a.ContainerID != \"\" && a.ContainerID != b.ContainerID {\n\t\t\treturn MatchResult{StatusDifferent, ReasonContainer}\n\t\t}\n\t}\n\tif a.Title != \"\" && a.Title == b.Title &&\n\t\ta.Extra.DataCite.MetadataVersion > 0 && b.Extra.DataCite.MetadataVersion > 0 &&\n\t\ta.Extra.DataCite.MetadataVersion != b.Extra.DataCite.MetadataVersion {\n\t\treturn MatchResult{StatusExact, ReasonDataciteVersion}\n\t}\n\tif strings.HasPrefix(a.ExtIDs.DOI, \"10.14288/\") && strings.HasPrefix(b.ExtIDs.DOI, \"10.14288/\") &&\n\t\ta.ExtIDs.DOI != b.ExtIDs.DOI {\n\t\treturn MatchResult{StatusDifferent, ReasonCustomPrefix1014288}\n\t}\n\tif strings.HasPrefix(a.ExtIDs.DOI, \"10.3403\") && strings.HasPrefix(b.ExtIDs.DOI, \"10.3403\") {\n\t\tif a.ExtIDs.DOI+\"u\" == b.ExtIDs.DOI || b.ExtIDs.DOI+\"u\" == a.ExtIDs.DOI {\n\t\t\treturn MatchResult{StatusStrong, ReasonCustomBSIUndated}\n\t\t}\n\t\taSubtitle := a.Subtitle()\n\t\tbSubtitle := b.Subtitle()\n\t\tif a.Title != \"\" && a.Title == b.Title &&\n\t\t\t((len(aSubtitle) > 0 && aSubtitle[0] != \"\" && len(bSubtitle) == 0) ||\n\t\t\t\t(len(aSubtitle) == 0 && len(bSubtitle) > 0 && bSubtitle[0] != \"\")) {\n\t\t\treturn MatchResult{StatusStrong, ReasonCustomBSISubdoc}\n\t\t}\n\t}\n\tif strings.HasPrefix(a.ExtIDs.DOI, \"10.1149\") && strings.HasPrefix(b.ExtIDs.DOI, \"10.1149\") {\n\t\tv := \"10.1149/ma\"\n\t\tif (strings.HasPrefix(a.ExtIDs.DOI, v) && !strings.HasPrefix(b.ExtIDs.DOI, v)) ||\n\t\t\t(!strings.HasPrefix(a.ExtIDs.DOI, v) && strings.HasPrefix(b.ExtIDs.DOI, v)) {\n\t\t\treturn MatchResult{StatusDifferent, ReasonCustomIOPMAPattern}\n\t\t}\n\t}\n\tif strings.Contains(a.Title, \"Zweckverband Volkshochschule\") && a.Title != b.Title {\n\t\treturn MatchResult{StatusDifferent, ReasonCustomVHS}\n\t}\n\tif PatAppendix.MatchString(a.Title) {\n\t\treturn MatchResult{StatusAmbiguous, ReasonAppendix}\n\t}\n\tif strings.HasPrefix(a.ExtIDs.DOI, \"10.6084/\") && strings.HasPrefix(b.ExtIDs.DOI, \"10.6084/\") {\n\t\tav := PatFigshareVersion.ReplaceAllString(a.ExtIDs.DOI, \"\")\n\t\tbv := PatFigshareVersion.ReplaceAllString(b.ExtIDs.DOI, \"\")\n\t\tif av == bv {\n\t\t\treturn MatchResult{StatusStrong, ReasonFigshareVersion}\n\t\t}\n\t}\n\tif PatVersionedDOI.MatchString(a.ExtIDs.DOI) && PatVersionedDOI.MatchString(b.ExtIDs.DOI) {\n\t\treturn MatchResult{StatusStrong, ReasonVersionedDOI}\n\t}\n\tif looksLikeComponent(a.ExtIDs.DOI, b.ExtIDs.DOI) {\n\t\treturn MatchResult{StatusStrong, ReasonVersionedDOI}\n\t}\n\tif len(a.Extra.DataCite.Relations) > 0 || len(b.Extra.DataCite.Relations) > 0 {\n\t\tgetRelatedDOI := func(rel *Release) *set.Set {\n\t\t\tss := set.New()\n\t\t\tfor _, rel := range rel.Extra.DataCite.Relations {\n\t\t\t\tif strings.ToLower(rel.RelatedIdentifierType) != \"doi\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tss.Add(rel.RelatedIdentifier())\n\t\t\t}\n\t\t\treturn ss\n\t\t}\n\t\taRelated := getRelatedDOI(a)\n\t\tbRelated := getRelatedDOI(b)\n\t\tif aRelated.Contains(b.ExtIDs.DOI) || bRelated.Contains(a.ExtIDs.DOI) {\n\t\t\treturn MatchResult{StatusStrong, ReasonDataciteRelatedID}\n\t\t}\n\t}\n\tif a.ExtIDs.Arxiv != \"\" && b.ExtIDs.Arxiv != \"\" {\n\t\taSub := PatArxivVersion.FindStringSubmatch(a.ExtIDs.Arxiv)\n\t\tbSub := PatArxivVersion.FindStringSubmatch(b.ExtIDs.Arxiv)\n\t\tif len(aSub) == 2 && len(bSub) == 2 && aSub[1] == bSub[1] {\n\t\t\treturn MatchResult{StatusStrong, ReasonArxivVersion}\n\t\t}\n\t}\n\tif a.ReleaseType != b.ReleaseType {\n\t\ttypes := set.FromSlice([]string{a.ReleaseType, b.ReleaseType})\n\t\tignoreTypes := set.FromSlice([]string{\"article\", \"article-journal\", \"report\", \"paper-conference\"})\n\t\tif types.Intersection(ignoreTypes).IsEmpty() {\n\t\t\treturn MatchResult{StatusDifferent, ReasonReleaseType}\n\t\t}\n\t\tif types.Contains(\"dataset\") && (types.Contains(\"article\") || types.Contains(\"article-journal\")) {\n\t\t\treturn MatchResult{StatusDifferent, ReasonReleaseType}\n\t\t}\n\t\tif types.Contains(\"book\") && (types.Contains(\"article\") || types.Contains(\"article-journal\")) {\n\t\t\treturn MatchResult{StatusDifferent, ReasonReleaseType}\n\t\t}\n\t}\n\tif a.ReleaseType == \"dataset\" && b.ReleaseType == \"dataset\" && a.ExtIDs.DOI != b.ExtIDs.DOI {\n\t\treturn MatchResult{StatusDifferent, ReasonDatasetDOI}\n\t}\n\tif a.ReleaseType == \"chapter\" && b.ReleaseType == \"chapter\" &&\n\t\ta.Extra.ContainerName != \"\" && a.Extra.ContainerName != b.Extra.ContainerName {\n\t\treturn MatchResult{StatusDifferent, ReasonBookChapter}\n\t}\n\tif a.Extra.Crossref.Type == \"component\" && a.Title != b.Title {\n\t\treturn MatchResult{StatusDifferent, ReasonComponent}\n\t}\n\tif a.ReleaseType == \"component\" && b.ReleaseType == \"component\" {\n\t\tif a.ExtIDs.DOI != \"\" && a.ExtIDs.DOI != b.ExtIDs.DOI {\n\t\t\treturn MatchResult{StatusDifferent, ReasonComponent}\n\t\t}\n\t}\n\taSlugTitle := strings.TrimSpace(strings.Replace(slugifyString(a.Title), \"\\n\", \" \", -1))\n\tbSlugTitle := strings.TrimSpace(strings.Replace(slugifyString(b.Title), \"\\n\", \" \", -1))\n\n\tif aSlugTitle == bSlugTitle {\n\t\tif a.ReleaseYear() != 0 && b.ReleaseYear() != 0 && absInt(a.ReleaseYear()-b.ReleaseYear()) > 40 {\n\t\t\treturn MatchResult{StatusDifferent, ReasonYear}\n\t\t}\n\t}\n\tif aSlugTitle == bSlugTitle {\n\t\tieeeArxivCheck := func(a, b *Release) (ok bool) {\n\t\t\treturn doiPrefix(a.ExtIDs.DOI) == \"10.1109\" && b.ExtIDs.Arxiv != \"\"\n\t\t}\n\t\tif ieeeArxivCheck(a, b) || ieeeArxivCheck(b, a) {\n\t\t\treturn MatchResult{StatusStrong, ReasonCustomIEEEArxiv}\n\t\t}\n\t}\n\tif aSlugTitle == bSlugTitle {\n\t\tif strings.HasPrefix(a.ExtIDs.DOI, \"10.7916/\") && strings.HasPrefix(b.ExtIDs.DOI, \"10.7916/\") {\n\t\t\treturn MatchResult{StatusAmbiguous, ReasonCustomPrefix107916}\n\t\t}\n\t}\n\tif aSlugTitle == bSlugTitle {\n\t\taSubtitle := a.Subtitle()\n\t\tbSubtitle := b.Subtitle()\n\t\tfor _, aSub := range aSubtitle {\n\t\t\tfor _, bSub := range bSubtitle {\n\t\t\t\tif slugifyString(aSub) != slugifyString(bSub) {\n\t\t\t\t\treturn MatchResult{StatusDifferent, ReasonSubtitle}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\trawAuthors := func(rel *Release) (names []string) {\n\t\tfor _, c := range rel.Contribs {\n\t\t\tname := strings.TrimSpace(c.RawName)\n\t\t\tif name == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnames = append(names, name)\n\t\t}\n\t\treturn names\n\t}\n\taAuthors := set.FromSlice(rawAuthors(a))\n\tbAuthors := set.FromSlice(rawAuthors(b))\n\taSlugAuthors := set.FromSlice(mapString(slugifyString, aAuthors.Slice()))\n\tbSlugAuthors := set.FromSlice(mapString(slugifyString, bAuthors.Slice()))\n\tif aTitleLower == bTitleLower {\n\t\tif aAuthors.Len() > 0 && aAuthors.Equals(bAuthors) {\n\t\t\tif a.ReleaseYear() > 0 && b.ReleaseYear() > 0 && absInt(a.ReleaseYear()-b.ReleaseYear()) > 4 {\n\t\t\t\treturn MatchResult{StatusDifferent, ReasonYear}\n\t\t\t}\n\t\t\treturn MatchResult{StatusExact, ReasonTitleAuthorMatch}\n\t\t}\n\t}\n\tif looksLikeFilename(a.Title) || looksLikeFilename(b.Title) {\n\t\tif a.Title != b.Title {\n\t\t\treturn MatchResult{StatusDifferent, ReasonTitleFilename}\n\t\t}\n\t}\n\tif a.Title != \"\" && a.Title == b.Title {\n\t\tif a.ReleaseYear() > 0 && b.ReleaseYear() > 0 && absInt(a.ReleaseYear()-b.ReleaseYear()) > 2 {\n\t\t\treturn MatchResult{StatusDifferent, ReasonYear}\n\t\t}\n\t}\n\t// XXX: skipping chemical formula detection (to few cases; https://git.io/Jtdax)\n\tif len(aSlugTitle) < 10 && aSlugTitle != bSlugTitle {\n\t\treturn MatchResult{StatusAmbiguous, ReasonShortTitle}\n\t}\n\tif PatDigits.MatchString(aSlugTitle) &&\n\t\taSlugTitle != bSlugTitle &&\n\t\tunifyDigits(aSlugTitle) == unifyDigits(bSlugTitle) {\n\t\treturn MatchResult{StatusDifferent, ReasonNumDiff}\n\t}\n\tif aSlugTitle != \"\" && bSlugTitle != \"\" &&\n\t\tstrings.ReplaceAll(aSlugTitle, \" \", \"\") == strings.ReplaceAll(bSlugTitle, \" \", \"\") {\n\t\tif aSlugAuthors.Intersection(bSlugAuthors).Len() > 0 {\n\t\t\tif a.ReleaseYear() > 0 && b.ReleaseYear() > 0 && absInt(a.ReleaseYear()-b.ReleaseYear()) > 4 {\n\t\t\t\treturn MatchResult{StatusDifferent, ReasonYear}\n\t\t\t}\n\t\t\treturn MatchResult{StatusStrong, ReasonSlugTitleAuthorMatch}\n\t\t}\n\t}\n\tif a.ReleaseYear() > 0 && a.ReleaseYear() == b.ReleaseYear() && aTitleLower == bTitleLower {\n\t\tif (a.ExtIDs.PMID != \"\" && b.ExtIDs.DOI != \"\") || (b.ExtIDs.PMID != \"\" && a.ExtIDs.DOI != \"\") {\n\t\t\treturn MatchResult{StatusStrong, ReasonPMIDDOIPair}\n\t\t}\n\t}\n\tif a.ExtIDs.Jstor != \"\" && b.ExtIDs.Jstor != \"\" && a.ExtIDs.Jstor != b.ExtIDs.Jstor {\n\t\treturn MatchResult{StatusDifferent, ReasonJstorID}\n\t}\n\tif a.ContainerID != \"\" && a.ContainerID == b.ContainerID && a.ExtIDs.DOI != b.ExtIDs.DOI &&\n\t\tdoiPrefix(a.ExtIDs.DOI) != \"10.1126\" &&\n\t\tdoiPrefix(a.ExtIDs.DOI) == doiPrefix(b.ExtIDs.DOI) {\n\t\treturn MatchResult{StatusDifferent, ReasonSharedDOIPrefix}\n\t}\n\tif aAuthors.Len() > 0 && aSlugAuthors.Intersection(bSlugAuthors).IsEmpty() {\n\t\tnumAuthors := set.Min(aSlugAuthors, bSlugAuthors)\n\t\tscore := averageScore(aSlugAuthors, bSlugAuthors)\n\t\tif (numAuthors < 3 && score > 0.9) || (numAuthors >= 3 && score > 0.5) {\n\t\t\treturn MatchResult{StatusStrong, ReasonTokenizedAuthors}\n\t\t}\n\t\taTok := set.FromSlice(strings.Fields(aSlugAuthors.Join(\" \")))\n\t\tbTok := set.FromSlice(strings.Fields(bSlugAuthors.Join(\" \")))\n\t\taTok = set.Filter(aTok, func(s string) bool {\n\t\t\treturn len(s) > 2\n\t\t})\n\t\tbTok = set.Filter(bTok, func(s string) bool {\n\t\t\treturn len(s) > 2\n\t\t})\n\t\tif aTok.Len() > 0 && bTok.Len() > 0 {\n\t\t\tif aTok.Jaccard(bTok) > 0.35 {\n\t\t\t\treturn MatchResult{StatusStrong, ReasonJaccardAuthors}\n\t\t\t}\n\t\t}\n\t\treturn MatchResult{StatusDifferent, ReasonContribIntersectionEmpty}\n\t}\n\tif doiPrefix(a.ExtIDs.DOI) == \"10.5860\" || doiPrefix(b.ExtIDs.DOI) == \"10.5860\" {\n\t\treturn MatchResult{StatusAmbiguous, ReasonCustomPrefix105860ChoiceReview}\n\t}\n\t// XXX: parse pages\n\taParsedPages := parsePageString(a.Pages)\n\tbParsedPages := parsePageString(b.Pages)\n\tif aParsedPages.Err != nil && bParsedPages.Err != nil {\n\t\tif absInt(aParsedPages.Count()-bParsedPages.Count()) > 5 {\n\t\t\treturn MatchResult{StatusDifferent, ReasonPageCount}\n\t\t}\n\t}\n\tif aAuthors.Equals(bAuthors) &&\n\t\ta.ContainerID == b.ContainerID &&\n\t\ta.ReleaseYear() == b.ReleaseYear() &&\n\t\ta.Title != b.Title &&\n\t\t(strings.Contains(a.Title, b.Title) || strings.Contains(b.Title, a.Title)) {\n\t\treturn MatchResult{StatusStrong, ReasonTitleArtifact}\n\t}\n\treturn MatchResult{\n\t\tStatusAmbiguous,\n\t\tReasonUnknown,\n\t}\n}", "func (o *WorkbookChart) GetTitleOk() (AnyOfmicrosoftGraphWorkbookChartTitle, bool) {\n\tif o == nil || o.Title == nil {\n\t\tvar ret AnyOfmicrosoftGraphWorkbookChartTitle\n\t\treturn ret, false\n\t}\n\treturn *o.Title, true\n}", "func (cep *CoreEvalProposal) GetTitle() string { return cep.Title }", "func (o *EventAttributes) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func cmpTitle(x, y interface{}) int {\n\txx, yy := toTrack(x, y)\n\tswitch {\n\tcase xx.Title < yy.Title:\n\t\treturn -1\n\tcase xx.Title == yy.Title:\n\t\treturn 0\n\tdefault:\n\t\treturn 1\n\t}\n}", "func (o *InlineResponse20027Person) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (o *InlineResponse2004People) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func pageTitle(m meta) string {\n\ttitle, ok := m[\"title\"].(string)\n\tif !ok || title == \"\" {\n\t\treturn defaultTitle\n\t}\n\treturn title + \" - \" + defaultTitle\n}", "func TitleEQ(v string) predicate.Task {\n\treturn predicate.Task(sql.FieldEQ(FieldTitle, v))\n}", "func (c *CheckRunAnnotation) GetTitle() string {\n\tif c == nil || c.Title == nil {\n\t\treturn \"\"\n\t}\n\treturn *c.Title\n}", "func (t *Title) SchemaTitle() *schema.Title {\n\t// Check for self being nil so we can safely chain this function\n\tif t == nil {\n\t\treturn nil\n\t}\n\n\tvar name, loc = t.MARCTitle, t.MARCLocation\n\n\t// Not great, but this does the trick well enough when we haven't gotten a\n\t// valid MARC record\n\tif !t.ValidLCCN {\n\t\tname = t.Name\n\t}\n\n\treturn &schema.Title{\n\t\tLCCN: t.LCCN,\n\t\tName: name,\n\t\tPlaceOfPublication: loc,\n\t}\n}", "func (r *Role) Title() string {\n\t// Uppercase all words, and also ensure \"MARC\" is fully capitalized\n\tvar c = cases.Title(language.AmericanEnglish)\n\treturn c.String(strings.Replace(r.Name, \"marc\", \"MARC\", -1))\n}", "func (l *LessonTut) Title() string {\n\tif l.mdContent.HasTitle() {\n\t\treturn l.mdContent.GetTitle()\n\t}\n\treturn l.Name()\n}", "func Title(in string) string {\n\n\trunes := []rune(in)\n\tlength := len(runes)\n\n\tvar out []rune\n\tfor i := 0; i < length; i++ {\n\t\tif i > 0 && unicode.IsUpper(runes[i]) && ((i+1 < length && unicode.IsLower(runes[i+1])) || unicode.IsLower(runes[i-1])) {\n\t\t\tout = append(out, ' ')\n\t\t}\n\t\tout = append(out, runes[i])\n\t}\n\n\treturn string(out)\n}", "func ScrubTrackTitle(original string) string {\n\tvar result string\n\tresult = normalizeParens(original)\n\tfor _, re := range TrackTitleIgnoredPhrases {\n\t\tresult = re.ReplaceAllString(result, \"\")\n\t}\n\tresult = Scrub(result)\n\tresult = strings.TrimSpace(result)\n\treturn result\n}", "func (o *GroupWidgetDefinition) GetShowTitleOk() (*bool, bool) {\n\tif o == nil || o.ShowTitle == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ShowTitle, true\n}", "func (t *Changeset) Title() (string, error) {\n\tswitch m := t.Metadata.(type) {\n\tcase *github.PullRequest:\n\t\treturn m.Title, nil\n\tcase *bitbucketserver.PullRequest:\n\t\treturn m.Title, nil\n\tdefault:\n\t\treturn \"\", errors.New(\"unknown changeset type\")\n\t}\n}", "func Title(operand string) string { return strings.Title(operand) }", "func Title(props *TitleProps, children ...Element) *TitleElem {\n\trProps := &_TitleProps{\n\t\tBasicHTMLElement: newBasicHTMLElement(),\n\t}\n\n\tif props != nil {\n\t\tprops.assign(rProps)\n\t}\n\n\treturn &TitleElem{\n\t\tElement: createElement(\"title\", rProps, children...),\n\t}\n}", "func (o *InlineResponse20034Milestone) GetTitle() string {\n\tif o == nil || o.Title == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Title\n}", "func (self *CommitMessagePanelDriver) Title(expected *TextMatcher) *CommitMessagePanelDriver {\n\tself.getViewDriver().Title(expected)\n\n\treturn self\n}", "func (swagger *MgwSwagger) GetTitle() string {\n\treturn swagger.title\n}", "func TitleContains(v string) predicate.Task {\n\treturn predicate.Task(sql.FieldContains(FieldTitle, v))\n}", "func (result *Result) GetTitle() string {\n\tif len(result.OpenGraph.Title) > 0 {\n\t\treturn result.OpenGraph.Title\n\t} else {\n\t\treturn result.Title\n\t}\n}", "func (o *ViewUserDashboard) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func TitleHasSuffix(v string) predicate.Announcement {\n\treturn predicate.Announcement(func(s *sql.Selector) {\n\t\ts.Where(sql.HasSuffix(s.C(FieldTitle), v))\n\t})\n}", "func (p *PullRequest) GetTitle() string {\n\tif p == nil || p.Title == nil {\n\t\treturn \"\"\n\t}\n\treturn *p.Title\n}", "func TitleHasSuffix(v string) predicate.Ethnicity {\n\treturn predicate.Ethnicity(func(s *sql.Selector) {\n\t\ts.Where(sql.HasSuffix(s.C(FieldTitle), v))\n\t})\n}", "func Title(embed *discordgo.MessageEmbed) *discordgo.MessageEmbed {\n\tembed.Author.Name = \"Command: title\"\n\tembed.Description = \"`title <text>` will create the text into title form.\"\n\tembed.Fields = []*discordgo.MessageEmbedField{\n\t\t{\n\t\t\tName: \"<text>\",\n\t\t\tValue: \"The text to change into title form.\",\n\t\t\tInline: true,\n\t\t},\n\t\t{\n\t\t\tName: \"Related commands:\",\n\t\t\tValue: \"`caps`, `lower`, `randomcaps`, `swap`\",\n\t\t},\n\t}\n\treturn embed\n}", "func FundTitle(v string) predicate.CoveredPerson {\n\treturn predicate.CoveredPerson(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldFundTitle), v))\n\t})\n}", "func (o *EventAttributes) HasTitle() bool {\n\treturn o != nil && o.Title != nil\n}", "func (o *ExpenseApplicationsIndexResponseExpenseApplications) GetTitleOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Title, true\n}", "func (me *XsdGoPkgHasElem_TitlesequenceCreateHITRequestschema_Title_XsdtString_) Walk() (err error) {\n\tif fn := WalkHandlers.XsdGoPkgHasElem_TitlesequenceCreateHITRequestschema_Title_XsdtString_; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func getHeaderTitle(resBody io.Reader) string {\n\tb, err := ioutil.ReadAll(resBody)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tstringB := string(b)\n\tstart := strings.Index(stringB, `<title>`)\n\tend := strings.Index(stringB, `</title>`)\n\tif start < 0 || end < 0 {\n\t\tstart = strings.Index(stringB, `<TITLE>`)\n\t\tend = strings.Index(stringB, `</TITLE>`)\n\t}\n\tif start < 0 || end < 0 {\n\t\treturn \"\"\n\t}\n\treturn stringB[start+7 : end]\n}", "func Title(v string) predicate.Ethnicity {\n\treturn predicate.Ethnicity(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldTitle), v))\n\t})\n}", "func TitleContainsFold(v string) predicate.Task {\n\treturn predicate.Task(sql.FieldContainsFold(FieldTitle, v))\n}", "func (o *SecurityProblem) GetTitle() string {\n\tif o == nil || o.Title == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Title\n}", "func (d UserData) HasTitle() bool {\n\treturn d.ModelData.Has(models.NewFieldName(\"Title\", \"title_id\"))\n}" ]
[ "0.63894886", "0.61719227", "0.6123725", "0.60611886", "0.60287726", "0.6002876", "0.5939993", "0.5821647", "0.5766424", "0.57502794", "0.57399386", "0.5739377", "0.57328135", "0.572084", "0.5645825", "0.56444085", "0.56426454", "0.55887145", "0.55743915", "0.55185646", "0.5507338", "0.54818815", "0.5481743", "0.5458701", "0.5449685", "0.5443272", "0.5427844", "0.53748447", "0.53592473", "0.53587687", "0.53278726", "0.5312468", "0.53122586", "0.5304976", "0.5304939", "0.5289268", "0.52744925", "0.5264944", "0.5262867", "0.52509344", "0.52506113", "0.52425826", "0.52332264", "0.5226052", "0.5214639", "0.52131873", "0.5204973", "0.5204302", "0.51966166", "0.51755536", "0.51652074", "0.5159333", "0.51506674", "0.5150148", "0.5131016", "0.509646", "0.5085669", "0.50834596", "0.5061851", "0.5058743", "0.50566447", "0.5055871", "0.5053916", "0.5046258", "0.50426215", "0.50380236", "0.5025967", "0.5024415", "0.5023387", "0.5011376", "0.4994427", "0.49912173", "0.49782237", "0.49763766", "0.49509934", "0.49327296", "0.49323153", "0.49292898", "0.4920379", "0.4917737", "0.4915908", "0.4907839", "0.4891799", "0.48808843", "0.48770264", "0.4873828", "0.4869412", "0.48664147", "0.48660403", "0.48648092", "0.48632225", "0.48596418", "0.48595098", "0.48573822", "0.48559967", "0.48531768", "0.48511252", "0.48504964", "0.48406354", "0.48382095" ]
0.85127145
0
CheckDescriptionLength checks if the given PR's description contains enough number of arguments
func CheckDescriptionLength(pr *gogh.PullRequest, config PluginConfiguration, logger log.Logger) string { actualLength := len(strings.TrimSpace(issueLinkRegexp.ReplaceAllString(pr.GetBody(), ""))) if actualLength < config.DescriptionContentLength { return fmt.Sprintf(DescriptionLengthShortMessage, config.DescriptionContentLength, actualLength) } return "" }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func CheckArgsLength(args []string, expectedLength int) error {\r\n\tif len(args) != expectedLength {\r\n\t\treturn fmt.Errorf(\"invalid number of arguments. Expected %v, got %v\", expectedLength, len(args))\r\n\t}\r\n\treturn nil\r\n}", "func IsValidArgsLength(args []string, n int) bool {\n\tif args == nil && n == 0 {\n\t\treturn true\n\t}\n\tif args == nil {\n\t\treturn false\n\t}\n\n\tif n < 0 {\n\t\treturn false\n\t}\n\n\targsNr := len(args)\n\tif argsNr < n || argsNr > n {\n\t\treturn false\n\t}\n\treturn true\n}", "func (d Description) EnsureLength() (Description, error) {\n\tif len(d.Moniker) > MaxMonikerLength {\n\t\treturn d, ErrDescriptionLength(\"moniker\", len(d.Moniker), MaxMonikerLength)\n\t}\n\tif len(d.Identity) > MaxIdentityLength {\n\t\treturn d, ErrDescriptionLength(\"identity\", len(d.Identity), MaxIdentityLength)\n\t}\n\tif len(d.Website) > MaxWebsiteLength {\n\t\treturn d, ErrDescriptionLength(\"website\", len(d.Website), MaxWebsiteLength)\n\t}\n\tif len(d.Details) > MaxDetailsLength {\n\t\treturn d, ErrDescriptionLength(\"details\", len(d.Details), MaxDetailsLength)\n\t}\n\n\treturn d, nil\n}", "func CheckLongDesc(cmd *cobra.Command) []error {\n\tfmt.Fprint(os.Stdout, \" ↳ checking long description\\n\")\n\tcmdPath := cmd.CommandPath()\n\tlong := cmd.Long\n\tif len(long) > 0 {\n\t\tif strings.Trim(long, \" \\t\\n\") != long {\n\t\t\treturn []error{fmt.Errorf(`command %q: long description is not normalized, make sure you are calling templates.LongDesc (from pkg/cmd/templates) before assigning cmd.Long`, cmdPath)}\n\t\t}\n\t}\n\treturn nil\n}", "func verifyLen(eventName string, parts []string, n int) (string, error) {\n\tif len(parts) < n {\n\t\treturn \"\", fmt.Errorf(\"%s: got %d parts, want %d\", eventName, len(parts), n)\n\t}\n\tif len(parts) > n {\n\t\treturn fmt.Sprintf(\"%s: got %d parts, expected %d\", eventName, len(parts), n), nil\n\t}\n\treturn \"\", nil\n}", "func TestLength(t *testing.T) {\n\tis := is.New(t)\n\n\titem := Item{\n\t\tText: strings.Repeat(\"x\", 20),\n\t\tParams: ItemParams{\n\t\t\tLength: 10,\n\t\t},\n\t}\n\tdisplayText := item.DisplayText()\n\tis.Equal(displayText, \"xxxxxxxxx…\")\n\n\titem = Item{\n\t\tText: strings.Repeat(\"x\", 20),\n\t\tParams: ItemParams{\n\t\t\tLength: 30,\n\t\t},\n\t}\n\tdisplayText = item.DisplayText()\n\tis.Equal(displayText, strings.Repeat(\"x\", 20))\n}", "func Len(t *testing.T, expected int, actual interface{}, message ...string) {\n\tif !compareLength(actual, expected) {\n\t\tt.Errorf(\"%v\\n Expected length \\n\\t[%#v]\\nto be\\n\\t[%#v]\\n%v \", message, actual, expected, callerInfo(2 +callStackAdjust))\n\t}\n}", "func lengthCheck(name string, array []byte, expected int) error {\n\tif len(array) != expected {\n\t\treturn fmt.Errorf(\"length of %s should be %d\", name, expected)\n\t}\n\treturn nil\n}", "func ShouldHaveLength(actual interface{}, expected ...interface{}) error {\n\tif err := need(1, expected); err != nil {\n\t\treturn err\n\t}\n\n\tlength, err := cast.ToInt64E(expected[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar actualLength int\n\n\tvalue := reflect.ValueOf(actual)\n\tswitch value.Kind() {\n\tcase reflect.Slice, reflect.Chan, reflect.Map, reflect.String:\n\t\tactualLength = value.Len()\n\t\tif value.Len() == int(length) {\n\t\t\treturn nil\n\t\t}\n\tcase reflect.Ptr:\n\t\telem := value.Elem()\n\t\tkind := elem.Kind()\n\t\tactualLength = elem.Len()\n\t\tif (kind == reflect.Slice || kind == reflect.Array) && actualLength == int(length) {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn fmt.Errorf(\"expected '%v' have length of %d but it wasn't (%d)\", actual, length, actualLength)\n}", "func Length(param string, min int, max int) error {\n\tlength := len(param)\n\tif min != -1 && length < min {\n\t\treturn fmt.Errorf(\"length of string %s %d, expected > %d\", param, length, min)\n\t}\n\tif max != -1 && length > max {\n\t\treturn fmt.Errorf(\"length of string %s %d, expected < %d\", param, length, max)\n\t}\n\treturn nil\n}", "func Len(t TestingT, v interface{}, length int, extras ...interface{}) bool {\n\tn, ok := tryLen(v)\n\tif !ok {\n\t\t_, acts := toString(nil, v)\n\n\t\treturn Errorf(t, fmt.Sprintf(\"Expect to apply buildin len() on %s\", acts), []labeledOutput{\n\t\t\t{\n\t\t\t\tlabel: labelMessages,\n\t\t\t\tcontent: formatExtras(extras...),\n\t\t\t},\n\t\t})\n\t}\n\n\tif n != length {\n\t\t_, acts := toString(nil, v)\n\n\t\treturn Errorf(t, fmt.Sprintf(\"Expect %s to have %d item(s)\", acts, length), []labeledOutput{\n\t\t\t{\n\t\t\t\tlabel: labelMessages,\n\t\t\t\tcontent: formatExtras(extras...),\n\t\t\t},\n\t\t\t{\n\t\t\t\tlabel: \"-expected\",\n\t\t\t\tcontent: strconv.Itoa(length),\n\t\t\t},\n\t\t\t{\n\t\t\t\tlabel: \"+received\",\n\t\t\t\tcontent: strconv.Itoa(n),\n\t\t\t},\n\t\t})\n\t}\n\n\treturn true\n}", "func Len(obj string, length int, a ...any) {\n\tl := len(obj)\n\n\tif l != length {\n\t\tdefMsg := fmt.Sprintf(assertionMsg+\": got '%d', want '%d'\", l, length)\n\t\tDefault().reportAssertionFault(defMsg, a...)\n\t}\n}", "func Len(t Testing, v interface{}, length int, formatAndArgs ...interface{}) bool {\n\tn, ok := getLen(v)\n\tif !ok {\n\t\treturn Fail(t,\n\t\t\tpretty.Sprintf(\"Could not apply len() with %# v\", v),\n\t\t\tformatAndArgs...)\n\t}\n\n\tif n != length {\n\t\treturn Fail(t,\n\t\t\tpretty.Sprintf(\"Expected %# v should have %d item(s), but got: %d item(s)\", v, length, n),\n\t\t\tformatAndArgs...)\n\t}\n\n\treturn true\n}", "func (t *Check) Length(length string, array []interface{}) (bool, error) {\n\tl, err := strconv.Atoi(length)\n\treturn l == len(array), err\n}", "func CheckSize(buf []byte, expected int, descrip string) {\n\tif len(buf) != expected {\n\t\tpanic(fmt.Sprintf(\"Incorrect %s buffer size, expected (%d), got (%d).\", descrip, expected, len(buf)))\n\t}\n}", "func ValidateAppDescription(ctx context.Context, description string) (string, error) {\n\tvar err error = nil\n\n\tfor description == \"\" {\n\t\tdescription, err = PromptForInput(ctx, \"Please specify a short description of the application:\")\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn description, err\n}", "func (s *TestSuite) Len(object interface{}, length int, msgAndArgs ...interface{}) {\n\ts.Require().Len(object, length, msgAndArgs...)\n}", "func (r Describe) validation(cmd *cobra.Command, args []string) error {\n\tif err := require.MaxArgs(args, 3); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o Args) Len() int { return len(o) }", "func check_args(parsed_query []string, num_expected int) bool {\n\treturn (len(parsed_query) >= num_expected)\n}", "func (v *verifier) Length(length int) *verifier {\n\treturn v.addVerification(\"Length\", len(v.Query) == length)\n}", "func ValidateLength(value string, maxLength int) error {\n\tif len(value) > maxLength {\n\t\treturn fmt.Errorf(\"Provided param value is too long.\")\n\t}\n\treturn nil\n}", "func (a *Assertions) Len(collection interface{}, length int, userMessageComponents ...interface{}) bool {\n\ta.assertion()\n\tif didFail, message := shouldHaveLength(collection, length); didFail {\n\t\treturn a.fail(message, userMessageComponents...)\n\t}\n\treturn true\n}", "func checkArgs(ctxt context.Context, s string, args ...interface{}) {\n\tc := 0\n\tfor _, f := range []string{\"%s\", \"%d\", \"%v\", \"%#v\", \"%t\", \"%p\"} {\n\t\tc += strings.Count(s, f)\n\t}\n\tl := len(args)\n\tif c != l {\n\t\tWarningf(ctxt, \"Wrong number of args for format string, [%d != %d]\", l, c)\n\t}\n}", "func IsLengthAtLeast(length int, s string) bool {\n\treturn len(s) >= length\n}", "func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {\n\treturn Len(a.t, object, length, msgAndArgs...)\n}", "func StringLength(val any, minLen int, maxLen ...int) bool {\n\treturn RuneLength(val, minLen, maxLen...)\n}", "func (cr CommandReply) Len() int {\r\n\tpanic(\"not implemented\")\r\n}", "func RuneLength(str string, params ...string) bool {\n\treturn StringLength(str, params...)\n}", "func Len(scope common.Scope, args ...interface{}) interface{} {\n\tif s, ok := args[0].(string); ok {\n\t\treturn int64(len(s))\n\t}\n\treturn 0\n}", "func IsLengthAtMost(length int, s string) bool {\n\treturn len(s) <= length\n}", "func (publishBuilderEntry *PublishBuilderEntry) Length() int {\n\tpublishBuilderEntry.ensureEncoded()\n\treturn len(publishBuilderEntry.encoded)\n}", "func Length(val any, wantLen int) bool {\n\tln := CalcLength(val)\n\treturn ln != -1 && ln == wantLen\n}", "func (v *parameter) HasMaxLength() bool {\n\treturn v.maxLength != nil\n}", "func checkNumberOfArgs(name string, nargs, nresults, min, max int) error {\n\tif min == max {\n\t\tif nargs != max {\n\t\t\treturn ExceptionNewf(TypeError, \"%s() takes exactly %d arguments (%d given)\", name, max, nargs)\n\t\t}\n\t} else {\n\t\tif nargs > max {\n\t\t\treturn ExceptionNewf(TypeError, \"%s() takes at most %d arguments (%d given)\", name, max, nargs)\n\t\t}\n\t\tif nargs < min {\n\t\t\treturn ExceptionNewf(TypeError, \"%s() takes at least %d arguments (%d given)\", name, min, nargs)\n\t\t}\n\t}\n\n\tif nargs > nresults {\n\t\treturn ExceptionNewf(TypeError, \"Internal error: not enough arguments supplied to Unpack*/Parse*\")\n\t}\n\treturn nil\n}", "func ValidateInputLength(cepRaw interface{}) observable.Observable {\n\treturn observable.Create(func(emitter *observer.Observer, disposed bool) {\n\t\tcep, _ := cepRaw.(string)\n\t\tcepLength := len(cep)\n\t\tif cepLength <= cepSize {\n\t\t\temitter.OnNext(cep)\n\t\t\temitter.OnDone()\n\t\t} else {\n\t\t\temitter.OnError(errors.New(\"Cep length is less than 8 characters\"))\n\t\t}\n\t})\n}", "func (e *metaMultiArgElement) length() int {\n\tif e.size >= 0 {\n\t\treturn e.size\n\t}\n\treturn 1\n}", "func Length(i interface{}) (l int, ok bool) {\n\tv, k := preprocess(i)\n\tswitch k {\n\tcase reflect.Map, reflect.Array, reflect.Slice, reflect.String:\n\t\treturn v.Len(), true\n\t}\n\treturn 0, false\n}", "func Length(val interface{}, wantLen int) bool {\n\tln := CalcLength(val)\n\tif ln == -1 {\n\t\treturn false\n\t}\n\n\treturn ln == wantLen\n}", "func AssertSize(list []string, wantedLength int, t *testing.T) {\n\tlength := len(list)\n\tif length != wantedLength {\n\t\tt.Fatalf(\"Expected size %d but found %d\", length, wantedLength)\n\t}\n}", "func (p *Parameters) Len() int {\n\treturn len(p.names)\n}", "func CheckArgs(argsLength, argIndex int) error {\n\tif argsLength == (argIndex + 1) {\n\t\treturn errors.New(\"Not specified key value.\")\n\t}\n\treturn nil\n}", "func CheckArguments(arguments []Argument, min int, max int, fname string, usage string) (int, ErrorValue) {\n\targLen := len(arguments)\n\tif argLen < min || argLen > max {\n\t\treturn argLen, NewErrorValue(fmt.Sprintf(\"Invalid call to %s. Usage: %s %s\", fname, fname, usage))\n\t}\n\treturn argLen, nil\n}", "func ByteLength(str string, params ...string) bool {\n\tif len(params) == 2 {\n\t\tmin, _ := ToInt(params[0])\n\t\tmax, _ := ToInt(params[1])\n\t\treturn len(str) >= int(min) && len(str) <= int(max)\n\t}\n\n\treturn false\n}", "func ValidateArgCount(expectedArgNo, argNo int) error {\n\tswitch {\n\tcase expectedArgNo < argNo:\n\t\treturn ErrUnexpectedArgs\n\tcase expectedArgNo > argNo:\n\t\treturn ErrNotEnoughArgs\n\tcase expectedArgNo == argNo:\n\t}\n\n\treturn nil\n}", "func IsCorrectLength(reads []string, k int) bool {\n for _, read := range reads {\n if (len(read) != k) {\n return false\n }\n }\n return true\n}", "func (t byRatioDesc) Len() int {\n\treturn len(t)\n}", "func testNumberLength(number string, metadata *PhoneMetadata, numberType PhoneNumberType) ValidationResult {\n\tdesc := getNumberDescByType(metadata, numberType)\n\n\t// There should always be \"possibleLengths\" set for every element. This is declared in the XML\n\t// schema which is verified by PhoneNumberMetadataSchemaTest.\n\t// For size efficiency, where a sub-description (e.g. fixed-line) has the same possibleLengths\n\t// as the parent, this is missing, so we fall back to the general desc (where no numbers of the\n\t// type exist at all, there is one possible length (-1) which is guaranteed not to match the\n\t// length of any real phone number).\n\tpossibleLengths := desc.PossibleLength\n\tif len(possibleLengths) == 0 {\n\t\tpossibleLengths = metadata.GeneralDesc.PossibleLength\n\t}\n\tlocalLengths := desc.PossibleLengthLocalOnly\n\n\tif numberType == FIXED_LINE_OR_MOBILE {\n\t\tif !descHasPossibleNumberData(getNumberDescByType(metadata, FIXED_LINE)) {\n\t\t\t// The rare case has been encountered where no fixedLine data is available (true for some\n\t\t\t// non-geographical entities), so we just check mobile.\n\t\t\treturn testNumberLength(number, metadata, MOBILE)\n\t\t} else {\n\t\t\tmobileDesc := getNumberDescByType(metadata, MOBILE)\n\t\t\tif descHasPossibleNumberData(mobileDesc) {\n\t\t\t\t// Note that when adding the possible lengths from mobile, we have to again check they\n\t\t\t\t// aren't empty since if they are this indicates they are the same as the general desc and\n\t\t\t\t// should be obtained from there.\n\t\t\t\tmobileLengths := mobileDesc.PossibleLength\n\t\t\t\tif len(mobileLengths) == 0 {\n\t\t\t\t\tmobileLengths = metadata.GeneralDesc.PossibleLength\n\t\t\t\t}\n\t\t\t\tpossibleLengths = mergeLengths(possibleLengths, mobileLengths)\n\n\t\t\t\tif len(localLengths) == 0 {\n\t\t\t\t\tlocalLengths = mobileDesc.PossibleLengthLocalOnly\n\t\t\t\t} else {\n\t\t\t\t\tlocalLengths = mergeLengths(localLengths, mobileDesc.PossibleLengthLocalOnly)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// If the type is not supported at all (indicated by the possible lengths containing -1 at this\n\t// point) we return invalid length.\n\tif possibleLengths[0] == -1 {\n\t\treturn INVALID_LENGTH\n\t}\n\n\tactualLength := int32(len(number))\n\n\t// This is safe because there is never an overlap beween the possible lengths and the local-only\n\t// lengths; this is checked at build time.\n\tfor _, l := range localLengths {\n\t\tif l == actualLength {\n\t\t\treturn IS_POSSIBLE_LOCAL_ONLY\n\t\t}\n\t}\n\tminimumLength := possibleLengths[0]\n\tif minimumLength == actualLength {\n\t\treturn IS_POSSIBLE\n\t} else if minimumLength > actualLength {\n\t\treturn TOO_SHORT\n\t} else if possibleLengths[len(possibleLengths)-1] < actualLength {\n\t\treturn TOO_LONG\n\t}\n\n\t// We skip the first element; we've already checked it.\n\tfor _, l := range possibleLengths[1:] {\n\t\tif l == actualLength {\n\t\t\treturn IS_POSSIBLE\n\t\t}\n\t}\n\treturn INVALID_LENGTH\n}", "func fnLen(ctx Context, doc *JDoc, params []string) interface{} {\n\tstats := ctx.Value(EelTotalStats).(*ServiceStats)\n\tif params == nil || len(params) != 1 {\n\t\tctx.Log().Error(\"error_type\", \"func_len\", \"op\", \"len\", \"cause\", \"wrong_number_of_parameters\", \"params\", params)\n\t\tstats.IncErrors()\n\t\tAddError(ctx, SyntaxError{fmt.Sprintf(\"wrong number of parameters in call to len function\"), \"len\", params})\n\t\treturn nil\n\t}\n\tvar obj interface{}\n\terr := json.Unmarshal([]byte(extractStringParam(params[0])), &obj)\n\tif err != nil {\n\t\treturn len(extractStringParam(params[0]))\n\t}\n\tswitch obj.(type) {\n\tcase []interface{}:\n\t\treturn len(obj.([]interface{}))\n\tcase map[string]interface{}:\n\t\treturn len(obj.(map[string]interface{}))\n\t}\n\treturn 0\n}", "func TestDeckLength(t *testing.T) {\n\tdeck := New()\n\tlength := len(deck)\n\tif length != 52 {\n\t\tt.Errorf(\"Length of deck is incorrect, got: %d, want: %d\", length, 52)\n\t}\n}", "func lenCheck(x string, y string) bool {\n\tif len(x) == len(y) {\n\t\treturn true\n\t}\n\treturn false\n}", "func TestCommandsHaveHelp(t *testing.T) {\n\tfor i, c := range coreCommands() {\n\t\tt.Run(fmt.Sprintf(\"test long description of command %d\", i), func(t *testing.T) {\n\t\t\tassert.NotEmpty(t, c.Long)\n\t\t})\n\t}\n}", "func TestLengths(t *testing.T) {\n // strings\n for k, v := range testStr {\n val := LenString(k)\n if val != v {\n t.Fatalf(\"%v returned %v (expected %v)\", k, val, v)\n }\n }\n\n // bytes\n bVal := LenByte()\n if bVal != BYTE_SIZE {\n t.Fatalf(\"Byte returned %v (expected %v)\", bVal, 4)\n }\n\n // uints\n uval32 := LenUint32()\n if uval32 != UINT32_SIZE {\n t.Fatalf(\"Uint32 returned %v (expected %v)\", uval32, 4)\n }\n uval64 := LenUint64()\n if uval64 != UINT64_SIZE {\n t.Fatalf(\"Uint64 returned %v (expected %v)\", uval64, 8)\n }\n\n log.Println(\"TestLengths: passed\")\n}", "func invalidLength(offset, length, sliceLength int) bool {\n\treturn offset+length < offset || offset+length > sliceLength\n}", "func (fn *formulaFuncs) LEN(argsList *list.List) formulaArg {\n\tif argsList.Len() != 1 {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, \"LEN requires 1 string argument\")\n\t}\n\treturn newStringFormulaArg(strconv.Itoa(utf8.RuneCountInString(argsList.Front().Value.(formulaArg).String)))\n}", "func checkArguments(hash string, privateKey string) bool {\n\t// easy check\n\t// if len(hash) != 46 || len(privateKey) != 64 {\n\t// \treturn false\n\t// }\n\n\treturn true\n}", "func RuneLength2(val interface{}, minLen int, maxLen ...int) bool {\n\tstr, isString := val.(string)\n\tif !isString {\n\t\treturn false\n\t}\n\n\tstrLen := utf8.RuneCountInString(str)\n\n\t// only min length check.\n\tif len(maxLen) == 0 {\n\t\treturn strLen >= minLen\n\t}\n\n\t// min and max length check\n\treturn strLen >= minLen && strLen <= maxLen[0]\n}", "func StringLength(str string, params ...string) bool {\n\tif len(params) == 2 {\n\t\tstrLength := utf8.RuneCountInString(str)\n\t\tmin, _ := ToInt(params[0])\n\t\tmax, _ := ToInt(params[1])\n\t\treturn strLength >= int(min) && strLength <= int(max)\n\t}\n\n\treturn false\n}", "func Argsize(t *Type) int", "func StringLength2(val interface{}, minLen int, maxLen ...int) bool {\n\treturn RuneLength2(val, minLen, maxLen...)\n}", "func OnlyValidArgs(cmd *Command, args []string) error {\n\tif len(cmd.ValidArgs) > 0 {\n\t\t// Remove any description that may be included in ValidArgs.\n\t\t// A description is following a tab character.\n\t\tvar validArgs []string\n\t\tfor _, v := range cmd.ValidArgs {\n\t\t\tvalidArgs = append(validArgs, strings.Split(v, \"\\t\")[0])\n\t\t}\n\t\tfor _, v := range args {\n\t\t\tif !stringInSlice(v, validArgs) {\n\t\t\t\treturn fmt.Errorf(\"invalid argument %q for %q%s\", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (args *Args) len() int {\n\treturn len(args.items)\n}", "func TruncateDescription(schema *apiext.JSONSchemaProps, maxLen int) {\n\tEditSchema(schema, descVisitor{maxLen: maxLen})\n}", "func (c *ConfigurationProtocolOption) Len() int {\r\n\treturn 3 + len(c.Contents)\r\n}", "func ValidateLength(value string, min, max int) bool {\n\tl := len(value)\n\tif min > 0 && l < min {\n\t\treturn false\n\t}\n\tif max > 0 && l > max {\n\t\treturn false\n\t}\n\treturn true\n}", "func TestCmdBuildWordChainArgsNotSameLength(t *testing.T) {\n\terr := ErrBoundsNotSameLength()\n\n\ttestArgs := [][]string{\n\t\t{\n\t\t\t\"Test Input 1\",\n\t\t\tstartFl,\n\t\t\t\"test-start\",\n\t\t\tendFl,\n\t\t\t\"test-end\",\n\t\t},\n\t\t{\n\t\t\t\"Test input 2\",\n\t\t\tstartFl,\n\t\t\t\"a\",\n\t\t\tendFl,\n\t\t\t\"abc\",\n\t\t},\n\t\t{\n\t\t\t\"Test Input 3\",\n\t\t\tstartFl,\n\t\t\t\"random-test-start-string\",\n\t\t\tendFl,\n\t\t\t\"random-test-end-string\",\n\t\t},\n\t\t{\n\t\t\t\"Test Input 4\",\n\t\t\tstartFl,\n\t\t\tfmt.Sprintf(\"a!@#$^&*()_$&346%d\", math.MaxInt64),\n\t\t\tendFl,\n\t\t\t\"some-very-long-string-input58934^&*%$75389457\",\n\t\t},\n\t}\n\n\tfor _, test := range testArgs {\n\t\tres := cmdBuildWordChain(test[1:])\n\t\tassert.Equal(t, err, res, fmt.Sprintf(\"Failed at test %s\", test[0])) \n\t}\n}", "func (p *Bare) payloadLenOk() bool {\n\tx := p.expectedPayloadLen()\n\treturn len(p.payload) == x || -1 == x\n}", "func (p Pipeline) Len() int { return len(p.Operations) }", "func truncateDesc(m string) *string {\n\tif m == \"\" {\n\t\treturn nil\n\t}\n\tif len(m) > 140 {\n\t\tm = (m)[:137] + \"...\"\n\t}\n\treturn &m\n}", "func ByteLength(str string, minLen int, maxLen ...int) bool {\n\tstrLen := len(str)\n\n\t// only min length check.\n\tif len(maxLen) == 0 {\n\t\treturn strLen >= minLen\n\t}\n\n\t// min and max length check\n\treturn strLen >= minLen && strLen <= maxLen[0]\n}", "func compareLength(a interface{}, b int) bool {\n\tswitch reflect.TypeOf(a).Kind() {\n\tcase reflect.Slice:\n\t\ts := reflect.ValueOf(a)\n\t\treturn s.Len() == b\n\tcase reflect.Map:\n\t\tm := reflect.ValueOf(a)\n\t\treturn m.Len() == b\n\tcase reflect.String:\n\t\tm := reflect.ValueOf(a).String()\n\t\treturn utf8.RuneCountInString(m) == b\n\t}\n\tpanic(\"parameter 'a' does not have a Len\")\n}", "func LongDesc(s string) string {\n\tif len(s) == 0 {\n\t\treturn s\n\t}\n\treturn normalizer{s}.Heredoc().Markdown().Trim().string\n}", "func checkAtLeastArgs(nargs int, errMsg string) func(cmd *cobra.Command, args []string) error {\n\treturn func(cmd *cobra.Command, args []string) error {\n\t\tif len(args) < nargs {\n\t\t\treturn errors.Errorf(errMsg)\n\t\t}\n\t\treturn nil\n\t}\n}", "func ErrLengthRequiredf(format string, arguments ...interface{}) *Status {\n\treturn &Status{Code: http.StatusLengthRequired, Text: fmt.Sprintf(format, arguments...)}\n}", "func (t *StringDataType) Length(n int) *StringDataType {\n\treturn t.Validate(func(s string) error {\n\t\tif len(s) != n {\n\t\t\treturn fmt.Errorf(\"length of string was not equal to the required length %d\", n)\n\t\t}\n\t\treturn nil\n\t})\n}", "func (t byDiffDesc) Len() int {\n\treturn len(t)\n}", "func RuneLength(val any, minLen int, maxLen ...int) bool {\n\tstr, isString := val.(string)\n\tif !isString {\n\t\treturn false\n\t}\n\n\t// strLen := len([]rune(str))\n\tstrLen := utf8.RuneCountInString(str)\n\n\t// only min length check.\n\tif len(maxLen) == 0 {\n\t\treturn strLen >= minLen\n\t}\n\n\t// min and max length check\n\treturn strLen >= minLen && strLen <= maxLen[0]\n}", "func Length(i interface{}) (l int, ok bool) {\n\tswitch i := i.(type) {\n\tcase string:\n\t\treturn len(i), true\n\tcase []interface{}:\n\t\treturn len(i), true\n\tcase map[string]interface{}:\n\t\treturn len(i), true\n\tcase []int64:\n\t\treturn len(i), true\n\tcase []float64:\n\t\treturn len(i), true\n\tcase []bool:\n\t\treturn len(i), true\n\tcase map[string]float64:\n\t\treturn len(i), true\n\tcase map[string]string:\n\t\treturn len(i), true\n\tcase map[string]bool:\n\t\treturn len(i), true\n\tdefault:\n\t\treturn 0, false\n\t}\n}", "func ByteLength(str string, min, max int) bool {\n\treturn len(str) >= min && len(str) <= max\n}", "func checkArgs(nargs int, errMsg string) func(cmd *cobra.Command, args []string) error {\n\treturn func(cmd *cobra.Command, args []string) error {\n\t\tif len(args) != nargs {\n\t\t\treturn errors.Errorf(errMsg)\n\t\t}\n\t\treturn nil\n\t}\n}", "func Length(s string) (n int) {\n\tfor range Strip(s) {\n\t\tn++\n\t}\n\treturn\n}", "func Length(s string) (n int) {\n\tfor range Strip(s) {\n\t\tn++\n\t}\n\treturn\n}", "func (Interface *LineInterface) ValidateArguments() {\n\tif len(os.Args) < 2 {\n\t\tInterface.PrintUsage()\n\t\truntime.Goexit()\n\t}\n}", "func TestPacket_DataLength(t *testing.T) {\n\ttearDown := setUp(t)\n\tdefer tearDown(t)\n\n\tassert.Equal(t, len(payload), packet.DataLength())\n}", "func Length(seq Seq) int {\n\treturn len(ToSlice(seq))\n}", "func (v *verifier) MaxLength(length int) *verifier {\n\treturn v.addVerification(\"MaxLength\", len(v.Query) <= length)\n}", "func hasLengthOf(fl FieldLevel) bool {\n\tfield := fl.Field()\n\tparam := fl.Param()\n\n\tswitch field.Kind() {\n\n\tcase reflect.String:\n\t\tp := asInt(param)\n\n\t\treturn int64(utf8.RuneCountInString(field.String())) == p\n\n\tcase reflect.Slice, reflect.Map, reflect.Array:\n\t\tp := asInt(param)\n\n\t\treturn int64(field.Len()) == p\n\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\tp := asIntFromType(field.Type(), param)\n\n\t\treturn field.Int() == p\n\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\tp := asUint(param)\n\n\t\treturn field.Uint() == p\n\n\tcase reflect.Float32, reflect.Float64:\n\t\tp := asFloat(param)\n\n\t\treturn field.Float() == p\n\t}\n\n\tpanic(fmt.Sprintf(\"Bad field type %T\", field.Interface()))\n}", "func TestShouldGetListLength(t *testing.T) {\n\tlst := []int{1, 2, 3, 4, 5}\n\n\tl := Length(lst)\n\n\tassert.Equal(t, l, 5, \"List size should be 5\")\n}", "func filterLength(ctx stick.Context, val stick.Value, args ...stick.Value) stick.Value {\n\tif v, ok := val.(string); ok {\n\t\treturn utf8.RuneCountInString(v)\n\t}\n\tl, _ := stick.Len(val)\n\t// TODO: Report error\n\treturn l\n}", "func (p *PCOPayload) Len() int {\r\n\tl := 1\r\n\tfor _, opt := range p.ConfigurationProtocolOptions {\r\n\t\tl += opt.Len()\r\n\t}\r\n\r\n\treturn l\r\n}", "func isOperandOfLen(i interface{}, length int) bool {\n\toperand, ok := i.(Operand)\n\treturn ok && len(operand) == length\n}", "func (b *MessagesGetLongPollHistoryBuilder) PreviewLength(v int) *MessagesGetLongPollHistoryBuilder {\n\tb.Params[\"preview_length\"] = v\n\treturn b\n}", "func (c *Console) Len() uint16 {\n\tif len(c.input) > 0 {\n\t\treturn 1\n\t}\n\treturn 0\n}", "func Length(s string) int {\n\treturn len([]rune(s))\n}", "func (cmd *Command) checkArgs(args []string) {\n\tif len(args) < cmd.MinArgs {\n\t\tsyntaxError()\n\t\tfmt.Fprintf(os.Stderr, \"Command %s needs %d arguments mininum\\n\", cmd.Name, cmd.MinArgs)\n\t\tos.Exit(1)\n\t} else if len(args) > cmd.MaxArgs {\n\t\tsyntaxError()\n\t\tfmt.Fprintf(os.Stderr, \"Command %s needs %d arguments maximum\\n\", cmd.Name, cmd.MaxArgs)\n\t\tos.Exit(1)\n\t}\n}", "func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {\n\tif h, ok := t.(tHelper); ok {\n\t\th.Helper()\n\t}\n\treturn Len(t, object, length, append([]interface{}{msg}, args...)...)\n}", "func CheckLimitNameLen(name string) error {\n\tif utf8.RuneCountInString(name) > 1000 {\n\t\treturn fmt.Errorf(\"limit up to 1000 sign for name of key\")\n\t}\n\treturn nil\n}", "func (p priorities) Len() int { return len(p) }", "func (String) Length(c *compiler.Compiler, this compiler.Expression) (expression compiler.Expression) {\n\texpression = c.NewExpression()\n\texpression.Type = Integer{}\n\texpression.Go.WriteString(`ctx.CountString(`)\n\texpression.Go.WriteB(this.Go)\n\texpression.Go.WriteString(`)`)\n\treturn expression\n}", "func (s APIParams) Len() int {\n\treturn len(s)\n}" ]
[ "0.66327566", "0.6183819", "0.61530507", "0.5897223", "0.58695334", "0.5821858", "0.5798817", "0.5707416", "0.5667413", "0.5666727", "0.5630328", "0.5620006", "0.55985975", "0.55719393", "0.553691", "0.55122423", "0.5494682", "0.5478022", "0.54566044", "0.5453439", "0.54406995", "0.5424768", "0.5368932", "0.5347746", "0.52992606", "0.52893853", "0.5286482", "0.5282415", "0.5280305", "0.52674204", "0.5247873", "0.5211842", "0.52097994", "0.5204421", "0.52013165", "0.5188788", "0.5168648", "0.5136", "0.5115868", "0.51004404", "0.50830275", "0.5051384", "0.502861", "0.50154513", "0.5009973", "0.50058675", "0.49966255", "0.49742103", "0.4972143", "0.4969529", "0.49655056", "0.49633026", "0.4950132", "0.49472925", "0.49342483", "0.4931723", "0.49116778", "0.4909526", "0.49071264", "0.49068937", "0.49048793", "0.4902046", "0.49014485", "0.48957404", "0.48940766", "0.48911825", "0.48815957", "0.48805285", "0.48678726", "0.4859899", "0.48598245", "0.48547184", "0.4848734", "0.4840415", "0.4838068", "0.4822621", "0.48168826", "0.48161423", "0.48031718", "0.4798168", "0.479064", "0.479064", "0.47903052", "0.478977", "0.47881517", "0.47801778", "0.47754937", "0.47739485", "0.4772608", "0.47599405", "0.47454748", "0.473841", "0.47356707", "0.47338152", "0.4733428", "0.47276568", "0.4721996", "0.47131062", "0.47119325", "0.47062385" ]
0.7995943
0
CheckIssueLinkPresence checks if the given PR's description contains an issue link
func CheckIssueLinkPresence(pr *gogh.PullRequest, config PluginConfiguration, logger log.Logger) string { if !issueLinkRegexp.MatchString(pr.GetBody()) { return IssueLinkMissingMessage } return "" }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func CheckDescriptionLength(pr *gogh.PullRequest, config PluginConfiguration, logger log.Logger) string {\n\tactualLength := len(strings.TrimSpace(issueLinkRegexp.ReplaceAllString(pr.GetBody(), \"\")))\n\tif actualLength < config.DescriptionContentLength {\n\t\treturn fmt.Sprintf(DescriptionLengthShortMessage, config.DescriptionContentLength, actualLength)\n\t}\n\treturn \"\"\n}", "func (tu *TwitterURL) IsLinkable() {}", "func (r *Repo) CheckIssue(comments []Comment, sender string) bool {\n\tconfig := configure.GlobalConfig.Repos\n\tapprovalsNeeded := 0\n\tapprovers := []string{}\n\tused := list.New()\n\tused.PushBack(sender) // block issue creator from approving\n\n\tfor _, relevantRepo := range config {\n\t\tif relevantRepo.Name == r.FullName {\n\t\t\tapprovalsNeeded = relevantRepo.ApprovalsNeeded\n\t\t\tapprovers = relevantRepo.Approvers\n\t\t}\n\t}\n\tif approvalsNeeded == 0 { // repo not in config\n\t\treturn false\n\t}\n\n\tfor _, comment := range comments {\n\t\tif comment.RequestApproved(approvers, used) {\n\t\t\tapprovalsNeeded -= 1\n\t\t}\n\t}\n\n\t// if not enough approvals have been made yet\n\tif approvalsNeeded > 0 {\n\t\treturn false\n\t}\n\treturn true\n}", "func (o *NiaapiNewReleaseDetailAllOf) HasReleaseNoteLinkTitle() bool {\n\tif o != nil && o.ReleaseNoteLinkTitle != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsIssueReference(name string) bool {\n\treturn regexp.MustCompile(fmt.Sprintf(\"^refs/heads/%s/[1-9]+([0-9]+)?$\", IssueBranchPrefix)).MatchString(name)\n}", "func (o *AddonStatus) Link() bool {\n\treturn o != nil && o.bitmap_&1 != 0\n}", "func IsRepoURL(value string) bool {\n\treturn Regex.Match([]byte(value))\n}", "func (o *NiaapiNewReleaseDetailAllOf) HasReleaseNoteLink() bool {\n\tif o != nil && o.ReleaseNoteLink != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (t *Tweet) IsLinkable() {}", "func (s *htmlState) checkURL(raw string) {\n\tif s.ignore&issueURL != 0 {\n\t\treturn\n\t}\n\n\tif strings.HasPrefix(raw, \"mailto:\") {\n\t\tif strings.Index(raw, \"@\") == -1 {\n\t\t\ts.err(fmt.Errorf(\"not an email address\"))\n\t\t}\n\t\treturn\n\t}\n\n\tu, err := url.Parse(raw)\n\tif err != nil {\n\t\ts.err(fmt.Errorf(\"bad URL '%s': %s\", raw, err.Error()))\n\t\treturn\n\t}\n\tif u.Opaque != \"\" {\n\t\ts.err(fmt.Errorf(\"bad URL part '%s'\", u.Opaque))\n\t\treturn\n\t}\n\n\tif strings.Index(raw, \" \") != -1 {\n\t\ts.err(fmt.Errorf(\"unencoded space in URL\"))\n\t}\n}", "func IsIssueReferencePath(name string) bool {\n\treturn regexp.MustCompile(fmt.Sprintf(\"^refs/heads/%s(/|$)?\", IssueBranchPrefix)).MatchString(name)\n}", "func (o *LaunchpadQRCode) HasLink() bool {\n\tif o != nil && o.Link != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *NiaapiNewReleaseDetailAllOf) GetReleaseNoteLinkTitleOk() (*string, bool) {\n\tif o == nil || o.ReleaseNoteLinkTitle == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ReleaseNoteLinkTitle, true\n}", "func expectedMsg(msg *prowapi.ReportMessage) bool {\n\trepos, err := getReportRepos()\n\tif err != nil {\n\t\tlog.Printf(\"Failed getting reporter's repos: %v\", err)\n\t\treturn false\n\t}\n\texpRepo := false\n\tif len(msg.Refs) > 0 {\n\t\tfor _, repo := range repos {\n\t\t\tif msg.Refs[0].Repo == repo {\n\t\t\t\texpRepo = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn expRepo && msg.Status == prowapi.FailureState && msg.JobType == prowapi.PresubmitJob\n}", "func AssureIsLink(path string) error {\n\terrPrefix := func() string {\n\t\treturn fmt.Sprintf(\"failed asserting that the path %s is a symbolic link\", path)\n\t}\n\tif err := AssureExists(path); err != nil {\n\t\treturn errors.Wrap(err, errPrefix())\n\t}\n\tfileInfoStat, err := os.Lstat(path)\n\tif err != nil {\n\t\treturn errors.Wrap(err, errPrefix())\n\t}\n\tif fileInfoStat.Mode()&os.ModeSymlink != os.ModeSymlink {\n\t\treturn errors.Wrapf(err, \"%s: unexpected file mode: got %v\", errPrefix(), fileInfoStat.Mode())\n\t}\n\treturn nil\n}", "func IsIssueTypeEstimable(issueTypeName string) bool {\n\tswitch issueTypeName {\n\tcase \"User story\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (m Model) IsPublicLinkAvailable() bool {\n\treturn len(m.Public) > 0 && m.IsPublished\n}", "func OpenPrIfNecessary(config *config.Config, todos map[int]string, prints map[int]string) (string, error) {\n\tif (config.CheckForTODOs && len(todos) > 0) || (config.CheckForPrints && len(prints) > 0) {\n\t\treader := bufio.NewReader(os.Stdin)\n\t\tfmt.Print(\"Would you still like to open the PR? (Y/n) \")\n\t\ttext, _ := reader.ReadString('\\n')\n\t\ttext = strings.TrimRight(text, \"\\n\")\n\t\tif text == \"Y\" {\n\t\t\treturn openPR(config)\n\t\t}\n\t\treturn \"Okay fix those problems\", nil\n\t}\n\treturn openPR(config)\n}", "func HasLabel(i *github.Issue, label string) bool {\n\tfor _, l := range i.Labels {\n\t\tif *l.Name == label {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func prHasSuccessfulCheck(pr *clients.PullRequest, c *checker.CheckRequest) (bool, error) {\n\tcrs, err := c.RepoClient.ListCheckRunsForRef(pr.HeadSHA)\n\tif err != nil {\n\t\treturn false, sce.WithMessage(sce.ErrScorecardInternal, fmt.Sprintf(\"Client.Checks.ListCheckRunsForRef: %v\", err))\n\t}\n\tif crs == nil {\n\t\treturn false, sce.WithMessage(sce.ErrScorecardInternal, \"cannot list check runs by ref\")\n\t}\n\n\tfor _, cr := range crs {\n\t\tif cr.Status != \"completed\" {\n\t\t\tcontinue\n\t\t}\n\t\tif cr.Conclusion != success {\n\t\t\tcontinue\n\t\t}\n\t\tif isTest(cr.App.Slug) {\n\t\t\tc.Dlogger.Debug3(&checker.LogMessage{\n\t\t\t\tPath: cr.URL,\n\t\t\t\tType: checker.FileTypeURL,\n\t\t\t\tText: fmt.Sprintf(\"CI test found: pr: %d, context: %s\", pr.Number,\n\t\t\t\t\tcr.App.Slug),\n\t\t\t})\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}", "func (r *Repo) IsGitHubRepo() bool { return strings.HasPrefix(r.URI, \"github.com/\") }", "func (o *Project) HasLink() bool {\n\tif o != nil && o.Link != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (t *Link) IsAttributedToLink(index int) (ok bool) {\n\treturn t.attributedTo[index].Link != nil\n\n}", "func (d *Doc) checkLink(address string, link Link, checkOtherDoc bool) error {\n\tif address == \"\" {\n\t\treturn errors.New(\"link address not set\")\n\t}\n\n\tswitch link.Type {\n\tcase externalFile:\n\t\tfallthrough\n\tcase externalLink:\n\t\t// Check to ensure that referenced file actually exists\n\n\t\tvar file string\n\n\t\tif link.ResolvedPath != \"\" {\n\t\t\tfile = link.ResolvedPath\n\t\t} else {\n\t\t\tfile, _, err := splitLink(address)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfile, err = d.linkAddrToPath(file)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif !fileExists(file) {\n\t\t\t\treturn d.Errorf(\"link type %v invalid: %q does not exist\",\n\t\t\t\t\tlink.Type,\n\t\t\t\t\tfile)\n\t\t\t}\n\t\t}\n\n\t\tif link.Type == externalFile {\n\t\t\tbreak\n\t\t}\n\n\t\t// Check the other document\n\t\tother, err := getDoc(file, d.Logger)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !checkOtherDoc {\n\t\t\tbreak\n\t\t}\n\n\t\t_, section, err := splitLink(address)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif section == \"\" {\n\t\t\tbreak\n\t\t}\n\n\t\tif !other.hasHeading(section) {\n\t\t\treturn other.Errorf(\"invalid link %v\", address)\n\t\t}\n\n\tcase internalLink:\n\t\t// must be a link to an existing heading\n\n\t\t// search for a heading whose LinkName == name\n\t\tfound := d.headingByLinkName(address)\n\t\tif found == nil {\n\t\t\tmsg := fmt.Sprintf(\"failed to find heading for link %q (%+v)\", address, link)\n\n\t\t\t// There is a chance the link description matches the\n\t\t\t// correct heading the link address refers to. In\n\t\t\t// which case, we can derive the correct link address!\n\t\t\tsuggestion, err2 := createHeadingID(link.Description)\n\n\t\t\tif err2 == nil && suggestion != link.Address {\n\t\t\t\tfound = d.headingByLinkName(suggestion)\n\t\t\t\tif found != nil {\n\t\t\t\t\tmsg = fmt.Sprintf(\"%s - correct link name is %q\", msg, suggestion)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn d.Errorf(\"%s\", msg)\n\t\t}\n\tcase urlLink:\n\t\t// NOP - handled by xurls\n\t}\n\n\treturn nil\n}", "func (r Recipe) IsHostedIn(title string) bool {\n\tmatched, _ := regexp.Match(fmt.Sprintf(\".*%s\\\\..*\", title), []byte(r.Repository.URL))\n\treturn matched\n}", "func CheckSemanticTitle(pr *gogh.PullRequest, config PluginConfiguration, logger log.Logger) string {\n\tchange := ghservice.NewRepositoryChangeForPR(pr)\n\tprefixes := GetValidTitlePrefixes(config)\n\tisTitleWithValidType := HasTitleWithValidType(prefixes, *pr.Title)\n\n\tif !isTitleWithValidType {\n\t\tif prefix, ok := wip.GetWorkInProgressPrefix(*pr.Title, wip.LoadConfiguration(logger, change)); ok {\n\t\t\ttrimmedTitle := strings.TrimPrefix(*pr.Title, prefix)\n\t\t\tisTitleWithValidType = HasTitleWithValidType(prefixes, trimmedTitle)\n\t\t}\n\t}\n\tif !isTitleWithValidType {\n\t\tallPrefixes := \"`\" + strings.Join(prefixes, \"`, `\") + \"`\"\n\t\treturn fmt.Sprintf(TitleFailureMessage, pr.GetTitle(), allPrefixes)\n\t}\n\treturn \"\"\n}", "func (t *Link) IsPreviewLink(index int) (ok bool) {\n\treturn t.preview[index].Link != nil\n\n}", "func (o *NiaapiNewReleaseDetailAllOf) HasLink() bool {\n\tif o != nil && o.Link != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *NiaapiNewReleaseDetailAllOf) GetReleaseNoteLinkOk() (*string, bool) {\n\tif o == nil || o.ReleaseNoteLink == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ReleaseNoteLink, true\n}", "func (p Page) validateLink(link string) error {\n\t// valid if link is email-protected\n\tif strings.Contains(link, \"email-protection\") {\n\t\treturn errEmailProtected\n\t}\n\n\t// check if given link belong to current site\n\tif !strings.HasPrefix(link, \"/\") && !strings.Contains(link, p.Url.Host) {\n\t\treturn errExternalLink\n\t}\n\treturn nil\n}", "func (o *NiaapiNewReleaseDetailAllOf) HasSoftwareDownloadLinkTitle() bool {\n\tif o != nil && o.SoftwareDownloadLinkTitle != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func isInternalLink(url string) (yes bool) {\n\tlink := []byte(url)\n\tyes = false\n\t// a tag begin with '#'\n\tif len(link) > 0 && link[0] == '#' {\n\t\tyes = true\n\t}\n\t// link begin with '/' but not '//', the second maybe a protocol relative link\n\tif len(link) >= 2 && link[0] == '/' && link[1] != '/' {\n\t\tyes = true\n\t}\n\t// only the root '/'\n\tif len(link) == 1 && link[0] == '/' {\n\t\tyes = true\n\t}\n\treturn\n}", "func prHasSuccessStatus(pr *clients.PullRequest, c *checker.CheckRequest) (bool, error) {\n\tstatuses, err := c.RepoClient.ListStatuses(pr.HeadSHA)\n\tif err != nil {\n\t\treturn false, sce.WithMessage(sce.ErrScorecardInternal, fmt.Sprintf(\"Client.Repositories.ListStatuses: %v\", err))\n\t}\n\n\tfor _, status := range statuses {\n\t\tif status.State != success {\n\t\t\tcontinue\n\t\t}\n\t\tif isTest(status.Context) {\n\t\t\tc.Dlogger.Debug3(&checker.LogMessage{\n\t\t\t\tPath: status.URL,\n\t\t\t\tType: checker.FileTypeURL,\n\t\t\t\tText: fmt.Sprintf(\"CI test found: pr: %d, context: %s\", pr.Number,\n\t\t\t\t\tstatus.Context),\n\t\t\t})\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}", "func AssureIsLinkTo(path, pointee string) error {\n\terrPrefix := func() string {\n\t\treturn fmt.Sprintf(\"failed asserting that %s is a symbolic link to %s\", path, pointee)\n\t}\n\tif err := AssureIsLink(path); err != nil {\n\t\treturn errors.Wrap(err, errPrefix())\n\t}\n\trel, err := os.Readlink(path)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"%s: failed to readlink %v\", errPrefix(), path)\n\t}\n\tif rel != pointee {\n\t\treturn errors.Errorf(\"%s: found unexpected profile path: got %v, want %v\", errPrefix(), rel, pointee)\n\t}\n\treturn nil\n}", "func IsPrerelease(x string) bool {\n\treturn parse(x).kind != \"\"\n}", "func (mt *ComJossemargtSaoDraftLink) Validate() (err error) {\n\n\tif mt.Href == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"href\"))\n\t}\n\treturn\n}", "func (mt *ComJossemargtSaoDraftLink) Validate() (err error) {\n\n\tif mt.Href == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"href\"))\n\t}\n\treturn\n}", "func (a *Application) checkRepoIsReal(name ...string) bool {\n\tvar fullname string\n\tswitch len(name) {\n\tcase 1:\n\t\tfullname = strings.TrimSpace(name[0])\n\t\tif fullname == \"\" || fullname == \"/\" {\n\t\t\treturn false\n\t\t}\n\tcase 2:\n\t\torg := strings.TrimSpace(name[0])\n\t\trepo := strings.TrimSpace(name[1])\n\t\tif org == \"\" || repo == \"\" {\n\t\t\treturn false\n\t\t}\n\t\tfullname = u.Format(\"%s/%s\", name[0], name[1])\n\tdefault:\n\t\tpanic(\"Youre doing this wrong\")\n\t}\n\turl := u.Format(\"https://github.com/%s\", fullname)\n\tif code, _, _, e := nt.HTTP(nt.HEAD, url, nt.NewHeaderBuilder().GetHeader(), nil); e != nil || code != 200 {\n\t\treturn false\n\t} else {\n\t\treturn true\n\t}\n}", "func IsLink(path string) bool {\n\tst, err := os.Stat(path)\n\treturn err == nil && (st.Mode()&os.ModeSymlink) != 0\n}", "func (l *AddonStatusList) Link() bool {\n\treturn l != nil && l.link\n}", "func (me TpubStatusInt) IsPmcRelease() bool { return me.String() == \"pmc-release\" }", "func (l *Log) IsLinkable() {}", "func isShubPullRef(shubRef string) bool {\n\t// define regex for each URI component\n\tregistryRegexp := `([-.a-zA-Z0-9/]{1,64}\\/)?` // target is very open, outside registry\n\tnameRegexp := `([-a-zA-Z0-9]{1,39}\\/)` // target valid github usernames\n\tcontainerRegexp := `([-_.a-zA-Z0-9]{1,64})` // target valid github repo names\n\ttagRegexp := `(:[-_.a-zA-Z0-9]{1,64})?` // target is very open, file extensions or branch names\n\tdigestRegexp := `((\\@[a-f0-9]{32})|(\\@[a-f0-9]{40}))?$` // target file md5 has, git commit hash, git branch\n\n\t// expression is anchored\n\tshubRegex, err := regexp.Compile(`^(shub://)` + registryRegexp + nameRegexp + containerRegexp + tagRegexp + digestRegexp + `$`)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tfound := shubRegex.FindString(shubRef)\n\n\t// sanity check\n\t// if found string is not equal to the input, input isn't a valid URI\n\treturn shubRef == found\n}", "func (o *PostWebhook) HasPrReopened() bool {\n\tif o != nil && o.PrReopened != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (rmpalr RoleManagementPolicyAssignmentListResult) hasNextLink() bool {\n\treturn rmpalr.NextLink != nil && len(*rmpalr.NextLink) != 0\n}", "func (o *SecurityProblem) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (t *Link) IsPreviewIRI(index int) (ok bool) {\n\treturn t.preview[index].IRI != nil\n\n}", "func (o *SecurityProblem) HasUrl() bool {\n\tif o != nil && o.Url != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *NotificationConfig) HasIssueType() bool {\n\tif o != nil && o.IssueType != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func resourceIssueLinkCreate(d *schema.ResourceData, m interface{}) error {\n\tconfig := m.(*Config)\n\n\tissueLink := new(jira.IssueLink)\n\n\tissueLink.InwardIssue = &jira.Issue{Key: d.Get(\"inward_key\").(string)}\n\tissueLink.OutwardIssue = &jira.Issue{Key: d.Get(\"outward_key\").(string)}\n\tissueLink.Type = jira.IssueLinkType{ID: d.Get(\"link_type\").(string)}\n\n\tresp, err := config.jiraClient.Issue.AddLink(issueLink)\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Creating Issue Link failed\")\n\t}\n\n\tlocation, err := resp.Location()\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Creating Issue Link failed\")\n\t}\n\n\tcomponents := strings.Split(location.Path, \"/\")\n\tID := components[len(components)-1]\n\n\td.SetId(ID)\n\n\treturn resourceIssueLinkRead(d, m)\n}", "func ValidateRepoURL(repoURL string) error {\n\t_, _, err := ParseRepoURL(repoURL)\n\treturn err\n}", "func (r *Repository) IsURLBusy(ctx context.Context, url string) (bool, error) {\r\n\tcount, err := r.officeCollection.CountDocuments(\r\n\t\tctx,\r\n\t\tbson.M{\r\n\t\t\t\"url\": url,\r\n\t\t})\r\n\tif err != nil {\r\n\t\treturn false, err\r\n\t}\r\n\tif count > 0 {\r\n\t\treturn false, err\r\n\t}\r\n\r\n\treturn true, nil\r\n}", "func (o *NiaapiNewReleaseDetailAllOf) GetLinkOk() (*string, bool) {\n\tif o == nil || o.Link == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Link, true\n}", "func (t *Link) IsSummaryIRI(index int) (ok bool) {\n\treturn t.summary[index].IRI != nil\n\n}", "func (wl *Workload) pullRequestVisible(pr *PullRequest) bool {\n\tif pr.Author != wl.Assignee {\n\t\t// Mismatched authors\n\t\treturn false\n\t}\n\n\tif wl.TrackingIssue.Milestone != \"\" {\n\t\tif pr.Milestone != \"\" && wl.TrackingIssue.Milestone != pr.Milestone {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (pp packagePath) IsRel() bool {\n\treturn !pp.IsAbs()\n}", "func (o *Cause) HasDocumentationUrl() bool {\n\tif o != nil && o.DocumentationUrl != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (f WithoutLabelsFilter) ApplyIssue(context operations.Context, issue *github.Issue) bool {\n\treturn !gh.HasAnyLabels(f.labels, issue.Labels)\n}", "func hasPresubmit(r []rt.Repo) bool {\n\treturn hasProwjobType(r, \"type_presubmit\")\n}", "func (o *AddOn) Link() bool {\n\treturn o != nil && o.bitmap_&1 != 0\n}", "func isPRChanged(pe github.PullRequestEvent) bool {\n\tswitch pe.Action {\n\tcase github.PullRequestActionOpened:\n\t\treturn true\n\tcase github.PullRequestActionReopened:\n\t\treturn true\n\tcase github.PullRequestActionSynchronize:\n\t\treturn true\n\tcase github.PullRequestActionEdited:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (ihdlr IotHubDescriptionListResult) hasNextLink() bool {\n\treturn ihdlr.NextLink != nil && len(*ihdlr.NextLink) != 0\n}", "func checkLinkExists(ctx context.Context, view arangodb.ArangoSearchView, colName string, t *testing.T) bool {\n\tprops, err := view.Properties(ctx)\n\trequire.NoError(t, err, \"Failed to get view properties\")\n\tlinks := props.Links\n\tif _, exists := links[colName]; !exists {\n\t\treturn false\n\t}\n\treturn true\n}", "func (o *Project) GetLinkOk() (*ProjectLink, bool) {\n\tif o == nil || o.Link == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Link, true\n}", "func (r *RepoRef) IsRemote() bool {\n\treturn r.URL != \"\"\n}", "func issueNeedsTriage(issue *Issue) bool {\n\t// Only sig-network has opted in.\n\tif !issue.hasLabel(\"sig/network\") {\n\t\treturn false\n\t}\n\n\t// Don't double-comment.\n\tif issue.hasLabel(\"triage/unresolved\") {\n\t\treturn false\n\t}\n\n\t// Don't relabel resolved issues.\n\tif issue.hasCommentWithCommand(\"/remove-triage\", \"unresolved\") {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func CheckPublic(url string) (bool, error) {\n\tcode, _, err := fetchPage(url)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\tif code/100 == 4 {\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}", "func (p *Person) HasURL() bool {\n\tif len(p.URLs) > 0 {\n\t\tfor _, u := range p.URLs {\n\t\t\tif u.URL != \"\" {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func IsValidRef(ctx context.Context, cSpecStr string, ddb *doltdb.DoltDB, rsr env.RepoStateReader) (bool, error) {\n\t// The error return value is only for propagating unhandled errors from rsr.CWBHeadRef()\n\t// All other errors merely indicate an invalid ref spec.\n\t// TODO: It's much better to enumerate the expected errors, to make sure we don't suppress any unexpected ones.\n\tcs, err := doltdb.NewCommitSpec(cSpecStr)\n\tif err != nil {\n\t\treturn false, nil\n\t}\n\n\theadRef, err := rsr.CWBHeadRef()\n\tif err == doltdb.ErrOperationNotSupportedInDetachedHead {\n\t\t// This is safe because ddb.Resolve checks if headRef is nil, but only when the value is actually needed.\n\t\t// Basically, this guarentees that resolving \"HEAD\" or similar will return an error but other resolves will work.\n\t\theadRef = nil\n\t} else if err != nil {\n\t\treturn false, err\n\t}\n\n\t_, err = ddb.Resolve(ctx, cs, headRef)\n\tif err != nil {\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}", "func (o *Commitstatus) HasUrl() bool {\n\tif o != nil && o.Url != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (t *Link) IsHreflangIRI() (ok bool) {\n\treturn t.hreflang != nil && t.hreflang.IRI != nil\n\n}", "func resourceIssueLinkRead(d *schema.ResourceData, m interface{}) error {\n\tconfig := m.(*Config)\n\n\turlStr := fmt.Sprintf(\"%s/%s\", issueLinkAPIEndpoint, d.Id())\n\tissueLink := new(jira.IssueLink)\n\n\terr := request(config.jiraClient, \"GET\", urlStr, nil, issueLink)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Request failed\")\n\t}\n\n\td.Set(\"inward_key\", issueLink.InwardIssue.Key)\n\td.Set(\"outward_key\", issueLink.OutwardIssue.Key)\n\td.Set(\"link_type\", issueLink.Type.ID)\n\n\treturn nil\n}", "func (t *Link) IsWidthIRI() (ok bool) {\n\treturn t.width != nil && t.width.IRI != nil\n\n}", "func (t *Link) HasUnknownHref() (ok bool) {\n\treturn t.unknown_ != nil && t.unknown_[\"href\"] != nil\n\n}", "func (c *helmWrapper) IsReleaseReady(waitTime time.Duration) (bool, error) {\n\n\t// Get the manifest to build resources\n\tmanifest, err := c.Manifest()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tclient := c.helmConf.KubeClient\n\tresources, _ := client.Build(bytes.NewBufferString(manifest), true)\n\n\terr = client.Wait(resources, waitTime)\n\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\n\tif err == wait.ErrWaitTimeout {\n\t\treturn false, ErrorTimedOutToWaitResource\n\t}\n\n\treturn false, err\n}", "func (t *Link) IsRelIRI(index int) (ok bool) {\n\treturn t.rel[index].IRI != nil\n\n}", "func checkLink(link string, c chan string) {\n\t_, err := http.Get(link)\n\n\tif err != nil {\n\t\tfmt.Println(link, \"might be down!\")\n\t\t// Send message to channel\n\t\tc <- link\n\t\treturn\n\t}\n\n\tfmt.Println(link, \"is up\")\n\t// Send message to channel\n\tc <- link\n}", "func (i IntransitiveActivity) IsLink() bool {\n\treturn false\n}", "func (mwlr MaintenanceWindowListResult) hasNextLink() bool {\n return mwlr.NextLink != nil && len(*mwlr.NextLink) != 0\n }", "func (o *CommentLinks) HasHtml() bool {\n\tif o != nil && o.Html != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (m *Link) Validate() (bool, []ValidationError) {\n\terrors := []ValidationError{}\n\n\t// Validate: Name\n\tif len(m.Name) == 0 {\n\t\terrors = append(errors, ValidationError{\n\t\t\t\"name\", \"Name is missing\",\n\t\t})\n\t}\n\n\t// Validate: URL\n\tif len(m.URL) == 0 {\n\t\terrors = append(errors, ValidationError{\n\t\t\t\"url\", \"URL is missing\",\n\t\t})\n\t}\n\n\treturn (len(errors) == 0), errors\n}", "func RSSAnnouncementExists(ctx context.Context, exec boil.ContextExecutor, guildID int64) (bool, error) {\n\tvar exists bool\n\tsql := \"select exists(select 1 from \\\"rss_announcements\\\" where \\\"guild_id\\\"=$1 limit 1)\"\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, sql)\n\t\tfmt.Fprintln(writer, guildID)\n\t}\n\trow := exec.QueryRowContext(ctx, sql, guildID)\n\n\terr := row.Scan(&exists)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"models: unable to check if rss_announcements exists\")\n\t}\n\n\treturn exists, nil\n}", "func IsLink(filename string) bool {\n\tfi, err := os.Lstat(filename)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn fi.Mode()&os.ModeSymlink == os.ModeSymlink\n}", "func Verify(a, b *Release, minTitleLength int) MatchResult {\n\tif a.ExtIDs.DOI != \"\" && a.ExtIDs.DOI == b.ExtIDs.DOI {\n\t\treturn MatchResult{StatusExact, ReasonDOI}\n\t}\n\tif a.WorkID != \"\" && a.WorkID == b.WorkID {\n\t\treturn MatchResult{StatusExact, ReasonWorkID}\n\t}\n\taTitleLower := strings.ToLower(a.Title)\n\tbTitleLower := strings.ToLower(b.Title)\n\tif utf8.RuneCountInString(a.Title) < minTitleLength {\n\t\treturn MatchResult{StatusAmbiguous, ReasonShortTitle}\n\t}\n\tif BlacklistTitle.Contains(aTitleLower) {\n\t\treturn MatchResult{StatusAmbiguous, ReasonBlacklisted}\n\t}\n\tif BlacklistTitle.Contains(bTitleLower) {\n\t\treturn MatchResult{StatusAmbiguous, ReasonBlacklisted}\n\t}\n\tfor _, fragment := range BlacklistTitleFragments.Slice() {\n\t\tif strings.Contains(aTitleLower, fragment) {\n\t\t\treturn MatchResult{StatusAmbiguous, ReasonBlacklistedFragment}\n\t\t}\n\t}\n\tif strings.Contains(aTitleLower, \"subject index\") && strings.Contains(bTitleLower, \"subject index\") {\n\t\tif a.ContainerID != \"\" && a.ContainerID != b.ContainerID {\n\t\t\treturn MatchResult{StatusDifferent, ReasonContainer}\n\t\t}\n\t}\n\tif a.Title != \"\" && a.Title == b.Title &&\n\t\ta.Extra.DataCite.MetadataVersion > 0 && b.Extra.DataCite.MetadataVersion > 0 &&\n\t\ta.Extra.DataCite.MetadataVersion != b.Extra.DataCite.MetadataVersion {\n\t\treturn MatchResult{StatusExact, ReasonDataciteVersion}\n\t}\n\tif strings.HasPrefix(a.ExtIDs.DOI, \"10.14288/\") && strings.HasPrefix(b.ExtIDs.DOI, \"10.14288/\") &&\n\t\ta.ExtIDs.DOI != b.ExtIDs.DOI {\n\t\treturn MatchResult{StatusDifferent, ReasonCustomPrefix1014288}\n\t}\n\tif strings.HasPrefix(a.ExtIDs.DOI, \"10.3403\") && strings.HasPrefix(b.ExtIDs.DOI, \"10.3403\") {\n\t\tif a.ExtIDs.DOI+\"u\" == b.ExtIDs.DOI || b.ExtIDs.DOI+\"u\" == a.ExtIDs.DOI {\n\t\t\treturn MatchResult{StatusStrong, ReasonCustomBSIUndated}\n\t\t}\n\t\taSubtitle := a.Subtitle()\n\t\tbSubtitle := b.Subtitle()\n\t\tif a.Title != \"\" && a.Title == b.Title &&\n\t\t\t((len(aSubtitle) > 0 && aSubtitle[0] != \"\" && len(bSubtitle) == 0) ||\n\t\t\t\t(len(aSubtitle) == 0 && len(bSubtitle) > 0 && bSubtitle[0] != \"\")) {\n\t\t\treturn MatchResult{StatusStrong, ReasonCustomBSISubdoc}\n\t\t}\n\t}\n\tif strings.HasPrefix(a.ExtIDs.DOI, \"10.1149\") && strings.HasPrefix(b.ExtIDs.DOI, \"10.1149\") {\n\t\tv := \"10.1149/ma\"\n\t\tif (strings.HasPrefix(a.ExtIDs.DOI, v) && !strings.HasPrefix(b.ExtIDs.DOI, v)) ||\n\t\t\t(!strings.HasPrefix(a.ExtIDs.DOI, v) && strings.HasPrefix(b.ExtIDs.DOI, v)) {\n\t\t\treturn MatchResult{StatusDifferent, ReasonCustomIOPMAPattern}\n\t\t}\n\t}\n\tif strings.Contains(a.Title, \"Zweckverband Volkshochschule\") && a.Title != b.Title {\n\t\treturn MatchResult{StatusDifferent, ReasonCustomVHS}\n\t}\n\tif PatAppendix.MatchString(a.Title) {\n\t\treturn MatchResult{StatusAmbiguous, ReasonAppendix}\n\t}\n\tif strings.HasPrefix(a.ExtIDs.DOI, \"10.6084/\") && strings.HasPrefix(b.ExtIDs.DOI, \"10.6084/\") {\n\t\tav := PatFigshareVersion.ReplaceAllString(a.ExtIDs.DOI, \"\")\n\t\tbv := PatFigshareVersion.ReplaceAllString(b.ExtIDs.DOI, \"\")\n\t\tif av == bv {\n\t\t\treturn MatchResult{StatusStrong, ReasonFigshareVersion}\n\t\t}\n\t}\n\tif PatVersionedDOI.MatchString(a.ExtIDs.DOI) && PatVersionedDOI.MatchString(b.ExtIDs.DOI) {\n\t\treturn MatchResult{StatusStrong, ReasonVersionedDOI}\n\t}\n\tif looksLikeComponent(a.ExtIDs.DOI, b.ExtIDs.DOI) {\n\t\treturn MatchResult{StatusStrong, ReasonVersionedDOI}\n\t}\n\tif len(a.Extra.DataCite.Relations) > 0 || len(b.Extra.DataCite.Relations) > 0 {\n\t\tgetRelatedDOI := func(rel *Release) *set.Set {\n\t\t\tss := set.New()\n\t\t\tfor _, rel := range rel.Extra.DataCite.Relations {\n\t\t\t\tif strings.ToLower(rel.RelatedIdentifierType) != \"doi\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tss.Add(rel.RelatedIdentifier())\n\t\t\t}\n\t\t\treturn ss\n\t\t}\n\t\taRelated := getRelatedDOI(a)\n\t\tbRelated := getRelatedDOI(b)\n\t\tif aRelated.Contains(b.ExtIDs.DOI) || bRelated.Contains(a.ExtIDs.DOI) {\n\t\t\treturn MatchResult{StatusStrong, ReasonDataciteRelatedID}\n\t\t}\n\t}\n\tif a.ExtIDs.Arxiv != \"\" && b.ExtIDs.Arxiv != \"\" {\n\t\taSub := PatArxivVersion.FindStringSubmatch(a.ExtIDs.Arxiv)\n\t\tbSub := PatArxivVersion.FindStringSubmatch(b.ExtIDs.Arxiv)\n\t\tif len(aSub) == 2 && len(bSub) == 2 && aSub[1] == bSub[1] {\n\t\t\treturn MatchResult{StatusStrong, ReasonArxivVersion}\n\t\t}\n\t}\n\tif a.ReleaseType != b.ReleaseType {\n\t\ttypes := set.FromSlice([]string{a.ReleaseType, b.ReleaseType})\n\t\tignoreTypes := set.FromSlice([]string{\"article\", \"article-journal\", \"report\", \"paper-conference\"})\n\t\tif types.Intersection(ignoreTypes).IsEmpty() {\n\t\t\treturn MatchResult{StatusDifferent, ReasonReleaseType}\n\t\t}\n\t\tif types.Contains(\"dataset\") && (types.Contains(\"article\") || types.Contains(\"article-journal\")) {\n\t\t\treturn MatchResult{StatusDifferent, ReasonReleaseType}\n\t\t}\n\t\tif types.Contains(\"book\") && (types.Contains(\"article\") || types.Contains(\"article-journal\")) {\n\t\t\treturn MatchResult{StatusDifferent, ReasonReleaseType}\n\t\t}\n\t}\n\tif a.ReleaseType == \"dataset\" && b.ReleaseType == \"dataset\" && a.ExtIDs.DOI != b.ExtIDs.DOI {\n\t\treturn MatchResult{StatusDifferent, ReasonDatasetDOI}\n\t}\n\tif a.ReleaseType == \"chapter\" && b.ReleaseType == \"chapter\" &&\n\t\ta.Extra.ContainerName != \"\" && a.Extra.ContainerName != b.Extra.ContainerName {\n\t\treturn MatchResult{StatusDifferent, ReasonBookChapter}\n\t}\n\tif a.Extra.Crossref.Type == \"component\" && a.Title != b.Title {\n\t\treturn MatchResult{StatusDifferent, ReasonComponent}\n\t}\n\tif a.ReleaseType == \"component\" && b.ReleaseType == \"component\" {\n\t\tif a.ExtIDs.DOI != \"\" && a.ExtIDs.DOI != b.ExtIDs.DOI {\n\t\t\treturn MatchResult{StatusDifferent, ReasonComponent}\n\t\t}\n\t}\n\taSlugTitle := strings.TrimSpace(strings.Replace(slugifyString(a.Title), \"\\n\", \" \", -1))\n\tbSlugTitle := strings.TrimSpace(strings.Replace(slugifyString(b.Title), \"\\n\", \" \", -1))\n\n\tif aSlugTitle == bSlugTitle {\n\t\tif a.ReleaseYear() != 0 && b.ReleaseYear() != 0 && absInt(a.ReleaseYear()-b.ReleaseYear()) > 40 {\n\t\t\treturn MatchResult{StatusDifferent, ReasonYear}\n\t\t}\n\t}\n\tif aSlugTitle == bSlugTitle {\n\t\tieeeArxivCheck := func(a, b *Release) (ok bool) {\n\t\t\treturn doiPrefix(a.ExtIDs.DOI) == \"10.1109\" && b.ExtIDs.Arxiv != \"\"\n\t\t}\n\t\tif ieeeArxivCheck(a, b) || ieeeArxivCheck(b, a) {\n\t\t\treturn MatchResult{StatusStrong, ReasonCustomIEEEArxiv}\n\t\t}\n\t}\n\tif aSlugTitle == bSlugTitle {\n\t\tif strings.HasPrefix(a.ExtIDs.DOI, \"10.7916/\") && strings.HasPrefix(b.ExtIDs.DOI, \"10.7916/\") {\n\t\t\treturn MatchResult{StatusAmbiguous, ReasonCustomPrefix107916}\n\t\t}\n\t}\n\tif aSlugTitle == bSlugTitle {\n\t\taSubtitle := a.Subtitle()\n\t\tbSubtitle := b.Subtitle()\n\t\tfor _, aSub := range aSubtitle {\n\t\t\tfor _, bSub := range bSubtitle {\n\t\t\t\tif slugifyString(aSub) != slugifyString(bSub) {\n\t\t\t\t\treturn MatchResult{StatusDifferent, ReasonSubtitle}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\trawAuthors := func(rel *Release) (names []string) {\n\t\tfor _, c := range rel.Contribs {\n\t\t\tname := strings.TrimSpace(c.RawName)\n\t\t\tif name == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnames = append(names, name)\n\t\t}\n\t\treturn names\n\t}\n\taAuthors := set.FromSlice(rawAuthors(a))\n\tbAuthors := set.FromSlice(rawAuthors(b))\n\taSlugAuthors := set.FromSlice(mapString(slugifyString, aAuthors.Slice()))\n\tbSlugAuthors := set.FromSlice(mapString(slugifyString, bAuthors.Slice()))\n\tif aTitleLower == bTitleLower {\n\t\tif aAuthors.Len() > 0 && aAuthors.Equals(bAuthors) {\n\t\t\tif a.ReleaseYear() > 0 && b.ReleaseYear() > 0 && absInt(a.ReleaseYear()-b.ReleaseYear()) > 4 {\n\t\t\t\treturn MatchResult{StatusDifferent, ReasonYear}\n\t\t\t}\n\t\t\treturn MatchResult{StatusExact, ReasonTitleAuthorMatch}\n\t\t}\n\t}\n\tif looksLikeFilename(a.Title) || looksLikeFilename(b.Title) {\n\t\tif a.Title != b.Title {\n\t\t\treturn MatchResult{StatusDifferent, ReasonTitleFilename}\n\t\t}\n\t}\n\tif a.Title != \"\" && a.Title == b.Title {\n\t\tif a.ReleaseYear() > 0 && b.ReleaseYear() > 0 && absInt(a.ReleaseYear()-b.ReleaseYear()) > 2 {\n\t\t\treturn MatchResult{StatusDifferent, ReasonYear}\n\t\t}\n\t}\n\t// XXX: skipping chemical formula detection (to few cases; https://git.io/Jtdax)\n\tif len(aSlugTitle) < 10 && aSlugTitle != bSlugTitle {\n\t\treturn MatchResult{StatusAmbiguous, ReasonShortTitle}\n\t}\n\tif PatDigits.MatchString(aSlugTitle) &&\n\t\taSlugTitle != bSlugTitle &&\n\t\tunifyDigits(aSlugTitle) == unifyDigits(bSlugTitle) {\n\t\treturn MatchResult{StatusDifferent, ReasonNumDiff}\n\t}\n\tif aSlugTitle != \"\" && bSlugTitle != \"\" &&\n\t\tstrings.ReplaceAll(aSlugTitle, \" \", \"\") == strings.ReplaceAll(bSlugTitle, \" \", \"\") {\n\t\tif aSlugAuthors.Intersection(bSlugAuthors).Len() > 0 {\n\t\t\tif a.ReleaseYear() > 0 && b.ReleaseYear() > 0 && absInt(a.ReleaseYear()-b.ReleaseYear()) > 4 {\n\t\t\t\treturn MatchResult{StatusDifferent, ReasonYear}\n\t\t\t}\n\t\t\treturn MatchResult{StatusStrong, ReasonSlugTitleAuthorMatch}\n\t\t}\n\t}\n\tif a.ReleaseYear() > 0 && a.ReleaseYear() == b.ReleaseYear() && aTitleLower == bTitleLower {\n\t\tif (a.ExtIDs.PMID != \"\" && b.ExtIDs.DOI != \"\") || (b.ExtIDs.PMID != \"\" && a.ExtIDs.DOI != \"\") {\n\t\t\treturn MatchResult{StatusStrong, ReasonPMIDDOIPair}\n\t\t}\n\t}\n\tif a.ExtIDs.Jstor != \"\" && b.ExtIDs.Jstor != \"\" && a.ExtIDs.Jstor != b.ExtIDs.Jstor {\n\t\treturn MatchResult{StatusDifferent, ReasonJstorID}\n\t}\n\tif a.ContainerID != \"\" && a.ContainerID == b.ContainerID && a.ExtIDs.DOI != b.ExtIDs.DOI &&\n\t\tdoiPrefix(a.ExtIDs.DOI) != \"10.1126\" &&\n\t\tdoiPrefix(a.ExtIDs.DOI) == doiPrefix(b.ExtIDs.DOI) {\n\t\treturn MatchResult{StatusDifferent, ReasonSharedDOIPrefix}\n\t}\n\tif aAuthors.Len() > 0 && aSlugAuthors.Intersection(bSlugAuthors).IsEmpty() {\n\t\tnumAuthors := set.Min(aSlugAuthors, bSlugAuthors)\n\t\tscore := averageScore(aSlugAuthors, bSlugAuthors)\n\t\tif (numAuthors < 3 && score > 0.9) || (numAuthors >= 3 && score > 0.5) {\n\t\t\treturn MatchResult{StatusStrong, ReasonTokenizedAuthors}\n\t\t}\n\t\taTok := set.FromSlice(strings.Fields(aSlugAuthors.Join(\" \")))\n\t\tbTok := set.FromSlice(strings.Fields(bSlugAuthors.Join(\" \")))\n\t\taTok = set.Filter(aTok, func(s string) bool {\n\t\t\treturn len(s) > 2\n\t\t})\n\t\tbTok = set.Filter(bTok, func(s string) bool {\n\t\t\treturn len(s) > 2\n\t\t})\n\t\tif aTok.Len() > 0 && bTok.Len() > 0 {\n\t\t\tif aTok.Jaccard(bTok) > 0.35 {\n\t\t\t\treturn MatchResult{StatusStrong, ReasonJaccardAuthors}\n\t\t\t}\n\t\t}\n\t\treturn MatchResult{StatusDifferent, ReasonContribIntersectionEmpty}\n\t}\n\tif doiPrefix(a.ExtIDs.DOI) == \"10.5860\" || doiPrefix(b.ExtIDs.DOI) == \"10.5860\" {\n\t\treturn MatchResult{StatusAmbiguous, ReasonCustomPrefix105860ChoiceReview}\n\t}\n\t// XXX: parse pages\n\taParsedPages := parsePageString(a.Pages)\n\tbParsedPages := parsePageString(b.Pages)\n\tif aParsedPages.Err != nil && bParsedPages.Err != nil {\n\t\tif absInt(aParsedPages.Count()-bParsedPages.Count()) > 5 {\n\t\t\treturn MatchResult{StatusDifferent, ReasonPageCount}\n\t\t}\n\t}\n\tif aAuthors.Equals(bAuthors) &&\n\t\ta.ContainerID == b.ContainerID &&\n\t\ta.ReleaseYear() == b.ReleaseYear() &&\n\t\ta.Title != b.Title &&\n\t\t(strings.Contains(a.Title, b.Title) || strings.Contains(b.Title, a.Title)) {\n\t\treturn MatchResult{StatusStrong, ReasonTitleArtifact}\n\t}\n\treturn MatchResult{\n\t\tStatusAmbiguous,\n\t\tReasonUnknown,\n\t}\n}", "func (t *Link) IsAttributedToIRI(index int) (ok bool) {\n\treturn t.attributedTo[index].IRI != nil\n\n}", "func (me *XsdGoPkgHasElem_LinkDescription) Walk() (err error) {\r\n\tif fn := WalkHandlers.XsdGoPkgHasElem_LinkDescription; me != nil {\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tif fn != nil {\r\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn\r\n}", "func (g *client) listOpenPRs(cfg v1.Github) ([]int, error) {\n\tvalidPrs := []int{}\n\tprs, _, err := g.Client.PullRequests.List(g.ctx, g.owner, g.repo, &github.PullRequestListOptions{})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"listing pull requests\")\n\t}\n\tfor _, pr := range prs {\n\t\t// skip PRs that don't haave the correct label\n\t\tif !prContainsLabels(pr.Labels, cfg.Labels) {\n\t\t\tcontinue\n\t\t}\n\t\t// make sure we haven't already commented on the PR\n\t\tif newCommits, err := g.newCommitsExist(pr.GetNumber()); err != nil || !newCommits {\n\t\t\tcontinue\n\t\t}\n\t\tvalidPrs = append(validPrs, pr.GetNumber())\n\t}\n\n\treturn validPrs, nil\n}", "func check_if_open(url string) bool {\n\n client := http.Client{Timeout: time.Duration(5 * time.Second)} // Create a custom client\n resp, err := client.Get(\"https://\" + url + \"/about\"); // Request the instance with the custom client\n\n ans := false; // Always assume it's closed\n\n if err == nil {\n raw_body, err := ioutil.ReadAll(resp.Body); // Parse the body to bytes\n if err == nil {\n if !strings.Contains(string(raw_body), \"closed-registrations-message\") {\n ans = true;\n }\n }\n defer resp.Body.Close(); // Close the connection\n }\n return ans;\n}", "func HasLabel(label string, issueLabels []*github.Label) bool {\n\tfor _, l := range issueLabels {\n\t\tif strings.ToLower(l.GetName()) == strings.ToLower(label) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (f AssignedFilter) ApplyIssue(context operations.Context, issue *github.Issue) bool {\n\treturn f.isAssigned == (issue.Assignee != nil)\n}", "func (t *Link) IsHeightIRI() (ok bool) {\n\treturn t.height != nil && t.height.IRI != nil\n\n}", "func (o *MessagesBaseTopicLinks) HasUrl() bool {\n\tif o != nil && o.Url != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func AssertASHandleProprietaryUplinkRequest(req as.HandleProprietaryUplinkRequest) Assertion {\n\treturn func(assert *require.Assertions, ts *IntegrationTestSuite) {\n\t\tr := <-ts.ASClient.HandleProprietaryUpChan\n\t\tif !proto.Equal(&r, &req) {\n\t\t\tassert.Equal(req, r)\n\t\t}\n\t}\n}", "func (o *Handoff) Link() bool {\n\treturn o != nil && o.bitmap_&1 != 0\n}", "func (m SecurityListRequest) HasLocaleOfIssue() bool {\n\treturn m.Has(tag.LocaleOfIssue)\n}", "func (o *Run) HasLinks() bool {\n\tif o != nil && o.Links != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *LaunchpadQRCode) GetLinkOk() (*string, bool) {\n\tif o == nil || o.Link == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Link, true\n}", "func IsShortURLAlreadyPresent(shortURL string, db *mgo.Session) bool {\n\tshortURLCount, err := db.DB(DbName).C(CollectionName).Find(bson.M{\"ShortURLEndPoint\": shortURL}).Count()\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\treturn shortURLCount != 0\n}", "func (alr AssignmentListResult) hasNextLink() bool {\n\treturn alr.NextLink != nil && len(*alr.NextLink) != 0\n}", "func TestREADME(t *testing.T) {\n\tvar readme []byte\n\tvar err error\n\n\tConvey(\"README.md is readable\", t, func() {\n\t\treadme, err = ioutil.ReadFile(\"../README.md\")\n\t\tSo(err, ShouldBeNil)\n\t})\n\n\tConvey(\"README.md has a pkg.go.dev badge\", t, func() {\n\t\tSo(strings.Contains(string(readme), \"[![Go Reference]\"), ShouldBeTrue)\n\t})\n}" ]
[ "0.5827231", "0.52759945", "0.52670836", "0.52358055", "0.5205674", "0.50942165", "0.50830615", "0.50109303", "0.5003048", "0.4981114", "0.49796918", "0.4905218", "0.48997667", "0.4889865", "0.4878953", "0.48716253", "0.48570353", "0.48182932", "0.4803739", "0.47971547", "0.47877303", "0.4752627", "0.47519436", "0.47238475", "0.47091427", "0.47057146", "0.47048333", "0.4695144", "0.46861282", "0.46740937", "0.46696228", "0.46660525", "0.4665167", "0.46536872", "0.46408036", "0.46213812", "0.46213812", "0.4602481", "0.45946798", "0.45936084", "0.4590264", "0.45862627", "0.45835942", "0.45822674", "0.45709077", "0.45680705", "0.45555747", "0.4553636", "0.45463926", "0.4545331", "0.45452648", "0.45437112", "0.452579", "0.4511686", "0.4494502", "0.44919538", "0.44835055", "0.44790328", "0.4471391", "0.44669348", "0.44667146", "0.44615987", "0.4456103", "0.4455036", "0.4449387", "0.4442491", "0.44407064", "0.4438594", "0.44340461", "0.44306895", "0.44231042", "0.44226515", "0.44164902", "0.44160488", "0.44142357", "0.44080392", "0.44028443", "0.43919596", "0.43889308", "0.43852198", "0.43828493", "0.4379232", "0.43752086", "0.43703353", "0.43691832", "0.436526", "0.43636522", "0.43633375", "0.4362346", "0.43599945", "0.4357567", "0.43538338", "0.43442798", "0.4342586", "0.43414864", "0.43393067", "0.43389347", "0.43356848", "0.4335301", "0.43334842" ]
0.7922563
0
GetValidTitlePrefixes returns list of valid prefixes
func GetValidTitlePrefixes(config PluginConfiguration) []string { prefixes := defaultTypes if len(config.TypePrefix) != 0 { if config.Combine { prefixes = append(prefixes, config.TypePrefix...) } else { prefixes = config.TypePrefix } } return prefixes }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o GoogleCloudRetailV2alphaSearchRequestFacetSpecFacetKeyPtrOutput) Prefixes() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *GoogleCloudRetailV2alphaSearchRequestFacetSpecFacetKey) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Prefixes\n\t}).(pulumi.StringArrayOutput)\n}", "func cleansePrefixes(ss []string) []string {\n\n\tret := []string{}\n\tfor _, s := range ss {\n\t\tstripped := \"\"\n\t\tfor i := len(ss) - 1; i > -1; i-- { // reversely\n\t\t\tpref := ss[i]\n\t\t\tif s != pref && strings.HasPrefix(s, pref) {\n\n\t\t\t\tstripped = strings.TrimPrefix(s, pref)\n\n\t\t\t\tstripped = strings.TrimSpace(stripped)\n\t\t\t\tstripped = strings.TrimPrefix(stripped, \"-- \")\n\t\t\t\tstripped = strings.TrimSuffix(stripped, \" --\")\n\n\t\t\t\t// log.Printf(\"stripped off\\n\\t%q \\n\\t%q \\n\\t%q\", s, pref, stripped)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif stripped == \"\" {\n\t\t\tret = append(ret, s)\n\t\t} else {\n\t\t\tret = append(ret, stripped)\n\t\t}\n\t}\n\n\treturn ret\n\n}", "func (o GoogleCloudRetailV2alphaSearchRequestFacetSpecFacetKeyOutput) Prefixes() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GoogleCloudRetailV2alphaSearchRequestFacetSpecFacetKey) []string { return v.Prefixes }).(pulumi.StringArrayOutput)\n}", "func (o GoogleCloudRetailV2alphaSearchRequestFacetSpecFacetKeyResponseOutput) Prefixes() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GoogleCloudRetailV2alphaSearchRequestFacetSpecFacetKeyResponse) []string { return v.Prefixes }).(pulumi.StringArrayOutput)\n}", "func (c *HelpCountryCode) GetPrefixes() (value []string, ok bool) {\n\tif c == nil {\n\t\treturn\n\t}\n\tif !c.Flags.Has(0) {\n\t\treturn value, false\n\t}\n\treturn c.Prefixes, true\n}", "func IsValidPathSegmentPrefix(name string) []string {\n\tvar errors []string\n\n\tfor _, illegalContent := range NameMayNotContain {\n\t\tif strings.Contains(name, illegalContent) {\n\t\t\terrors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent))\n\t\t}\n\t}\n\n\treturn errors\n}", "func makePrefixMap(ss []string, prefixLen int) prefixMap {\n\tprefixes := make(prefixMap)\n\tfor i, s := range ss {\n\t\t// We use < rather than <= because if a label matches on a prefix equal to\n\t\t// its full length, that's actually a substring match handled by\n\t\t// removeSubstrings.\n\t\tif prefixLen < len(s) {\n\t\t\tprefix := s[:prefixLen]\n\t\t\tprefixes[prefix] = append(prefixes[prefix], i)\n\t\t}\n\t}\n\n\treturn prefixes\n}", "func stripPrefixes(subj string) string {\n\tredo := true\n\tfor redo {\n\t\tredo = false\n\t\tfor _, prefix := range _BAD_PREFIXES {\n\t\t\tif strings.HasPrefix(strings.ToLower(subj), prefix) {\n\t\t\t\tsubj = subj[len(prefix):]\n\t\t\t\tredo = true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn subj\n}", "func CreatePrefixList(pkg string) []string {\n\tif pkg == \"\" {\n\t\treturn []string{\"\"}\n\t}\n\n\tnumDots := 0\n\t// one pass to pre-allocate the returned slice\n\tfor i := 0; i < len(pkg); i++ {\n\t\tif pkg[i] == '.' {\n\t\t\tnumDots++\n\t\t}\n\t}\n\tif numDots == 0 {\n\t\treturn []string{pkg, \"\"}\n\t}\n\n\tprefixes := make([]string, numDots+2)\n\t// second pass to fill in returned slice\n\tfor i := 0; i < len(pkg); i++ {\n\t\tif pkg[i] == '.' {\n\t\t\tprefixes[numDots] = pkg[:i]\n\t\t\tnumDots--\n\t\t}\n\t}\n\tprefixes[0] = pkg\n\n\treturn prefixes\n}", "func AnnouncedPrefixes(asn string) (*RipeAnnouncedPrefixesResponse, error) {\n\tfetchURL := \"https://stat.ripe.net/data/announced-prefixes/data.json?soft_limit=ignore&resource=AS%s\"\n\turl := fmt.Sprintf(fetchURL, asn)\n\n\tresp, err := http.Get(url)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar result RipeAnnouncedPrefixesResponse\n\terr = json.NewDecoder(resp.Body).Decode(&result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &result, nil\n}", "func HasTitleWithValidType(prefixes []string, title string) bool {\n\tpureTitle := strings.TrimSpace(title)\n\tfor _, prefix := range prefixes {\n\t\tprefixRegexp := regexp.MustCompile(`(?i)^` + prefix + `(:| |\\()+`)\n\t\tif prefixRegexp.MatchString(pureTitle) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o BucketLifecycleRuleConditionOutput) MatchesPrefixes() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v BucketLifecycleRuleCondition) []string { return v.MatchesPrefixes }).(pulumi.StringArrayOutput)\n}", "func (kps *KubernetesPrefixSource) Prefixes() []string {\n\treturn kps.prefixes.Load()\n}", "func Prefixes(s string) []string {\n\tprefixes := make(map[string]struct{})\n\n\trunes := make([]rune, 0, 64)\n\n\tfor _, w := range strings.Split(s, \" \") {\n\t\tif w == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\trunes = runes[0:0]\n\n\t\tfor _, c := range w {\n\t\t\trunes = append(runes, c)\n\t\t\tprefixes[string(runes)] = struct{}{}\n\t\t}\n\t}\n\n\ttokens := make([]string, 0, 32)\n\n\tfor pref := range prefixes {\n\t\ttokens = append(tokens, pref)\n\t}\n\n\treturn tokens\n}", "func Prefixes(s string) []string {\n\tprefixes := make(map[string]struct{})\n\n\trunes := make([]rune, 0, 64)\n\n\tfor _, w := range strings.Split(s, \" \") {\n\t\tif w == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\trunes = runes[0:0]\n\n\t\tfor _, c := range w {\n\t\t\trunes = append(runes, c)\n\t\t\tprefixes[string(runes)] = struct{}{}\n\t\t}\n\t}\n\n\ttokens := make([]string, 0, 32)\n\n\tfor pref := range prefixes {\n\t\ttokens = append(tokens, pref)\n\t}\n\n\treturn tokens\n}", "func prefixListing() error {\n\tinputDir := getUserHome() + \"/sequence_lists/genbank_prefixes\"\n\tfiles, err := ioutil.ReadDir(inputDir)\n\tif err != nil {\n\t\treturn handle(\"Error in reading dir\", err)\n\t}\n\t// Gets a list of the prefixes found in the files and puts them into a\n\t// sorted list.\n\tres := []string{}\n\tfor _, f := range files {\n\t\tfname := f.Name()\n\t\tfileResult, err := prefixListForFile(inputDir+\"/\"+fname, fname)\n\t\tif err != nil {\n\t\t\treturn handle(\"Error in getting prefix list from file\", err)\n\t\t}\n\t\tres = append(res, fileResult)\n\t}\n\tsort.Sort(naturalsort.NaturalSort(res))\n\tfor _, v := range res {\n\t\tfmt.Println(v)\n\t}\n\treturn err\n}", "func (c *config) PrefixKeys(prefix string) []string {\n c.m.Lock()\n defer c.m.Unlock()\n\n keys := []string{}\n for k, _ := range c.conf {\n if strings.HasPrefix(k, prefix) {\n keys = append(keys, k)\n }\n }\n return keys\n}", "func NamePrefixes() []string {\n\ts := UserID.String()\n\treturn []string{\"<@\" + s + \">\", \"<@!\" + s + \">\", \"1dot\"}\n}", "func (o GatewayAssociationProposalOutput) AllowedPrefixes() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *GatewayAssociationProposal) pulumi.StringArrayOutput { return v.AllowedPrefixes }).(pulumi.StringArrayOutput)\n}", "func KnownDoHPrefixes() []string {\n\tpopulateOnce.Do(populate)\n\tret := make([]string, 0, len(dohIPsOfBase))\n\tfor b := range dohIPsOfBase {\n\t\tret = append(ret, b)\n\t}\n\tsort.Strings(ret)\n\treturn ret\n}", "func (r IPRange) Prefixes() []IPPrefix {\n\tif !r.Valid() {\n\t\treturn nil\n\t}\n\tvar makePrefix prefixMaker\n\tif r.From.Is4() {\n\t\tmakePrefix = func(ip16 ip16, bits uint8) IPPrefix {\n\t\t\treturn IPPrefix{IPFrom16([16]byte(ip16)), bits - 12*8}\n\t\t}\n\t} else {\n\t\tmakePrefix = func(ip16 ip16, bits uint8) IPPrefix {\n\t\t\treturn IPPrefix{IPv6Raw([16]byte(ip16)), bits}\n\t\t}\n\t}\n\ta16, b16 := ip16(r.From.As16()), ip16(r.To.As16())\n\treturn appendRangePrefixes(nil, makePrefix, a16, b16)\n}", "func packagePrefixes(packageName string) []string {\n\tparts := strings.Split(packageName, \"/\")\n\tprefixes := make([]string, len(parts))\n\n\tfor i := 1; i <= len(parts); i++ {\n\t\tprefixes[len(parts)-i] = strings.Join(parts[:i], \"/\")\n\t}\n\n\treturn prefixes\n}", "func TrimPrefix(objects []string, prefix string) []string {\n\tvar results []string\n\tfor _, object := range objects {\n\t\tresults = append(results, strings.TrimPrefix(object, prefix))\n\t}\n\treturn results\n}", "func (t *TST) Prefix(p string) []string {\n\tif p == \"\" {\n\t\treturn nil\n\t}\n\tn := t.root.get(p)\n\tif n == nil {\n\t\treturn nil // nothing has this prefix\n\t}\n\tmatches := []string{}\n\tif n.val != nil {\n\t\tmatches = append(matches, p)\n\t}\n\tn.eqkid.rprefix(p, &matches)\n\tif len(matches) > 0 {\n\t\treturn matches\n\t}\n\treturn nil\n}", "func MatchPrefix(prefixes ...string) MatcherFunc { return MatchPrefixes(prefixes) }", "func (wk *wkAPI) GetPrefixResults(pfx string, limit int) ([]WikipediaPage, error) {\n\tif limit == 0 {\n\t\tlimit = 50\n\t}\n\n\tf := url.Values{\n\t\t\"action\": {\"query\"},\n\t\t\"generator\": {\"prefixsearch\"},\n\t\t\"prop\": {\"pageprops|pageimages|description\"},\n\t\t\"ppprop\": {\"displaytitle\"},\n\t\t\"gpssearch\": {pfx},\n\t\t\"gpsnamespace\": {\"0\"},\n\t\t\"gpslimit\": {strconv.Itoa(limit)},\n\t}\n\n\tres, err := wk.w.Query(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar values []WikipediaPage\n\tfor _, p := range res.Query.Pages {\n\t\tvalues = append(values, WikipediaPage{\n\t\t\tID: p.PageId,\n\t\t\tTitle: p.Title,\n\t\t\tURL: getWikipediaURL(p.Title),\n\t\t})\n\t}\n\n\treturn values, nil\n}", "func (k *proxy) GetResourceNames(ctx context.Context, groupVersion, kind string, options []client.ListOption, prefix string) ([]string, error) {\n\tclient, err := k.NewClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tobjList, err := listObjByGVK(ctx, client, groupVersion, kind, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar comps []string\n\tfor _, item := range objList.Items {\n\t\tname := item.GetName()\n\n\t\tif strings.HasPrefix(name, prefix) {\n\t\t\tcomps = append(comps, name)\n\t\t}\n\t}\n\n\treturn comps, nil\n}", "func (t *Trie) Prefix(s string) []string {\n\tnode := t.Find(s)\n\tarr := make([]string, 0)\n\tnode.allSuffix(&arr, \"\")\n\treturn arr\n}", "func (o BucketLifecycleRuleItemConditionResponseOutput) MatchesPrefix() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v BucketLifecycleRuleItemConditionResponse) []string { return v.MatchesPrefix }).(pulumi.StringArrayOutput)\n}", "func MatchPrefixes(prefixes []string) MatcherFunc {\n\treturn func(el Elem) bool {\n\t\tfor _, pfx := range prefixes {\n\t\t\tif strings.HasPrefix(el.Name(), pfx) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n}", "func checkAnnotationNameIsValid(list []string, name string, prefix string) bool {\n\tif strings.HasPrefix(name, prefix) {\n\t\treturn regexpContains(list, strings.TrimPrefix(name, prefix))\n\t}\n\n\treturn true\n}", "func (ctx NSContext) Prefixes() map[string]string {\n\tprefixes := make(map[string]string, len(ctx.prefixes))\n\tfor k, v := range ctx.prefixes {\n\t\tprefixes[k] = v\n\t}\n\n\treturn prefixes\n}", "func CompletionFromList(valid []string, args []string) []string {\n\tout := make([]string, 0)\n\tif len(args) == 0 {\n\t\treturn valid\n\t}\n\tfor _, v := range valid {\n\t\tif hasCaseSmartPrefix(v, args[0]) {\n\t\t\tout = append(out, v)\n\t\t}\n\t}\n\treturn out\n}", "func (o BucketLifecycleRuleItemConditionOutput) MatchesPrefix() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v BucketLifecycleRuleItemCondition) []string { return v.MatchesPrefix }).(pulumi.StringArrayOutput)\n}", "func (a *App) GetPrefixedImageNames() []string {\n\tvar prefixedNames []string\n\tfor _, image := range a.GetImages() {\n\t\tprefixedNames = append(prefixedNames, image.GetFullName())\n\t}\n\treturn prefixedNames\n}", "func (o SecurityGroupRuleOutput) PrefixListIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *SecurityGroupRule) pulumi.StringArrayOutput { return v.PrefixListIds }).(pulumi.StringArrayOutput)\n}", "func HasAllowedImageAsPrefix(str string, imageList []string) bool {\n\tfor _, imagePrefix := range imageList {\n\t\tif strings.HasPrefix(str, imagePrefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func TitleHasPrefix(v string) predicate.Ethnicity {\n\treturn predicate.Ethnicity(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldTitle), v))\n\t})\n}", "func TestGetJobNamePrefix(t *testing.T) {\n\tassert.Equal(t, \"abcd\", getJobNamePrefix(\"abcd-efg\"))\n\tassert.Equal(t, \"abcd\", getJobNamePrefix(\"abcd.efg\"))\n\tassert.Equal(t, \"abcd\", getJobNamePrefix(\"abcd-e.fg\"))\n\tassert.Equal(t, \"abc\", getJobNamePrefix(\"abc.d-efg\"))\n\tassert.Equal(t, \"abcd\", getJobNamePrefix(\"abcd-.efg\"))\n\tassert.Equal(t, \"abcd\", getJobNamePrefix(\"abcd.-efg\"))\n\tassert.Equal(t, \"abcdefg\", getJobNamePrefix(\"abcdefg\"))\n\tassert.Equal(t, \"abcdefg\", getJobNamePrefix(\"abcdefg-\"))\n\tassert.Equal(t, \"\", getJobNamePrefix(\".abcd-efg\"))\n\tassert.Equal(t, \"\", getJobNamePrefix(\"\"))\n}", "func (o BucketLifecycleRuleItemConditionPtrOutput) MatchesPrefix() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *BucketLifecycleRuleItemCondition) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.MatchesPrefix\n\t}).(pulumi.StringArrayOutput)\n}", "func GetTimezonesForPrefix(number string) ([]string, error) {\n\tvar err error\n\ttimezoneOnce.Do(func() {\n\t\ttimezoneMap, err = loadIntStringArrayMap(timezoneMapData)\n\t})\n\n\tif timezoneMap == nil {\n\t\treturn nil, fmt.Errorf(\"error loading timezone map: %v\", err)\n\t}\n\n\t// strip any leading +\n\tnumber = strings.TrimLeft(number, \"+\")\n\n\tmatchLength := len(number) // maxLength: min( len(number), timezoneMap.MaxLength )\n\tif matchLength > timezoneMap.MaxLength {\n\t\tmatchLength = timezoneMap.MaxLength\n\t}\n\n\tfor i := matchLength; i > 0; i-- {\n\t\tindex, err := strconv.Atoi(number[0:i])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttzs, found := timezoneMap.Map[index]\n\t\tif found {\n\t\t\treturn tzs, nil\n\t\t}\n\t}\n\treturn []string{UNKNOWN_TIMEZONE}, nil\n}", "func cleanNS(l []*net.NS) []string {\n\tvar r []string\n\tfor _, i := range l {\n\t\tr = append(r, i.Host)\n\t}\n\tsort.Strings(r)\n\treturn (r)\n\n}", "func PrefixMatch(key string) (res []interface{}) {\n\tglobalStore.RLock()\n\tdefer globalStore.RUnlock()\n\n\tfor k, v := range globalStore.store {\n\t\tif strings.HasPrefix(k, key) {\n\t\t\tres = append(res, v)\n\t\t}\n\t}\n\n\treturn\n}", "func filterByPrefix[T types.ResourceWithLabels](resources []T, prefix string, altNameFns ...altNameFn[T]) []T {\n\treturn filterResources(resources, func(r T) bool {\n\t\tif strings.HasPrefix(r.GetName(), prefix) {\n\t\t\treturn true\n\t\t}\n\t\tfor _, altName := range altNameFns {\n\t\t\tif strings.HasPrefix(altName(r), prefix) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t})\n}", "func (s *IPSet) Prefixes() []IPPrefix {\n\tvar out []IPPrefix\n\tfor _, r := range s.Ranges() {\n\t\tout = append(out, r.Prefixes()...)\n\t}\n\treturn out\n}", "func (s *IPSet) Prefixes() []IPPrefix {\n\tvar out []IPPrefix\n\tfor _, r := range s.Ranges() {\n\t\tout = append(out, r.Prefixes()...)\n\t}\n\treturn out\n}", "func (o TransferJobTransferSpecObjectConditionsPtrOutput) IncludePrefixes() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *TransferJobTransferSpecObjectConditions) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.IncludePrefixes\n\t}).(pulumi.StringArrayOutput)\n}", "func FindHostsByPrefix(prefix string) []string {\n\treturn index.Find(strings.ToLower(prefix), 10)\n}", "func FundTitleHasPrefix(v string) predicate.CoveredPerson {\n\treturn predicate.CoveredPerson(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldFundTitle), v))\n\t})\n}", "func internalStartsWith(str string, prefix string, ignoreCase bool) bool {\n\tif str == \"\" || prefix == \"\" {\n\t\treturn (str == \"\" && prefix == \"\")\n\t}\n\tif utf8.RuneCountInString(prefix) > utf8.RuneCountInString(str) {\n\t\treturn false\n\t}\n\tif ignoreCase {\n\t\treturn strings.HasPrefix(strings.ToLower(str), strings.ToLower(prefix))\n\t}\n\treturn strings.HasPrefix(str, prefix)\n}", "func ListHasPrefix(list, prefix []string) bool {\n\tif len(prefix) == 0 {\n\t\treturn false\n\t}\n\tif len(prefix) > len(list) {\n\t\treturn false\n\t}\n\treturn ListEquals(list[:len(prefix)], prefix)\n}", "func FilterPrefix(stringSet sets.String, prefix string, ignoreCase bool) sets.String {\n\tif prefix == \"\" {\n\t\treturn stringSet\n\t}\n\treturn filterSet(stringSet, prefix, ignoreCase, strings.HasPrefix)\n}", "func getASPrefixes(as int, ipv4Slice *[]string, ipv6Slice *[]string) (int, int, error) {\n ann4 := 0; ann6 := 0\n url := fmt.Sprintf(\"https://stat.ripe.net//data/announced-prefixes/data.json?resource=AS%d\", as);\n res, err := http.Get(url);\n if err == nil {\n bytes, err := ioutil.ReadAll(res.Body)\n res.Body.Close()\n if err == nil {\n var data map[string]interface{}\n if err := json.Unmarshal(bytes, &data); err != nil {\n err := errors.New(\"JSON parsing failed\")\n return 0, 0, err\n }\n if data[\"status\"] == \"ok\" {\n prefixes := data[\"data\"].(map[string]interface{})[\"prefixes\"].([]interface{})\n for j := 0; j < len(prefixes); j++ {\n prefix := prefixes[j].(map[string]interface{})[\"prefix\"].(string)\n if strings.ContainsRune(prefix, ':') {\n //fmt.Printf(\"# IPv6: %s\\n\", prefix)\n *ipv6Slice=append(*ipv6Slice, prefix);\n ann6++\n } else {\n //fmt.Printf(\"# IPv4: %s\\n\", prefix)\n *ipv4Slice=append(*ipv4Slice, prefix);\n ann4++\n }\n }\n }\n } else {\n return 0, 0, errors.New(\"Reading document body failed\")\n }\n } else {\n return 0, 0, errors.New(\"HTTP request failed\")\n }\n return ann4, ann6, nil\n}", "func getSanitizedNamespaceList() []string {\n\tprovided := strings.Split(flagNamespaces, \",\")\n\tvar selected []string\n\n\tfor _, v := range provided {\n\t\tv = strings.TrimSpace(v)\n\n\t\tif v != \"\" {\n\t\t\tselected = append(selected, v)\n\t\t}\n\t}\n\n\treturn selected\n}", "func (o TransferJobTransferSpecObjectConditionsOutput) IncludePrefixes() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v TransferJobTransferSpecObjectConditions) []string { return v.IncludePrefixes }).(pulumi.StringArrayOutput)\n}", "func (o *WhatsAppNameWhatsAppApiContent) GetNamePrefixOk() (*string, bool) {\n\tif o == nil || o.NamePrefix == nil {\n\t\treturn nil, false\n\t}\n\treturn o.NamePrefix, true\n}", "func ListPrefix(ctx context.Context, prefix string) (KeyValuePairs, error) {\n\tv, err := Client().ListPrefix(ctx, prefix)\n\tTrace(\"ListPrefix\", err, logrus.Fields{fieldPrefix: prefix, fieldNumEntries: len(v)})\n\treturn v, err\n}", "func TitleHasPrefix(v string) predicate.Job {\n\treturn predicate.Job(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldTitle), v))\n\t})\n}", "func getValidRegions() []string {\n\tvrs := make([]string, len(validRegions)*2)\n\ti := 0\n\tfor k, v := range validRegions {\n\t\tvrs[i] = k\n\t\tvrs[i+1] = v\n\t\ti += 2\n\t}\n\tsort.Strings(vrs)\n\treturn vrs\n}", "func (m *WordModel) FindAllPrefixesOf(input string) []WordFreq {\n\tmatches := m.finder.FindAllPrefixesOf(input)\n\tresults := make([]WordFreq, len(matches), len(matches))\n\tfor i, match := range matches {\n\t\tresults[i].Word = match.Word\n\t\tresults[i].LogProbability = m.frequencies[match.Index]\n\t}\n\n\treturn results\n}", "func (re *reSyntax) leadingPhrases() []phrase {\n\tswitch re.op {\n\tdefault:\n\t\tpanic(\"bad op in phrases\")\n\n\tcase opWild:\n\t\treturn []phrase{{BadWord, BadWord}, {AnyWord, BadWord}, {AnyWord, AnyWord}}\n\n\tcase opEmpty:\n\t\treturn []phrase{{BadWord, BadWord}}\n\n\tcase opWords:\n\t\tw := re.w\n\t\tvar p phrase\n\t\tif len(w) == 0 {\n\t\t\tp = phrase{BadWord, BadWord}\n\t\t} else if len(w) == 1 {\n\t\t\tp = phrase{w[0], BadWord}\n\t\t} else {\n\t\t\tp = phrase{w[0], w[1]}\n\t\t}\n\t\treturn []phrase{p}\n\n\tcase opQuest:\n\t\tlist := re.sub[0].leadingPhrases()\n\t\tfor _, l := range list {\n\t\t\tif l[0] == BadWord {\n\t\t\t\treturn list\n\t\t\t}\n\t\t}\n\t\tlist = append(list, phrase{BadWord, BadWord})\n\t\treturn list\n\n\tcase opAlternate:\n\t\tvar list []phrase\n\t\thave := make(map[phrase]bool)\n\t\tfor _, sub := range re.sub {\n\t\t\tfor _, p := range sub.leadingPhrases() {\n\t\t\t\tif !have[p] {\n\t\t\t\t\thave[p] = true\n\t\t\t\t\tlist = append(list, p)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn list\n\n\tcase opConcat:\n\t\txs := []phrase{{BadWord, BadWord}}\n\t\tfor _, sub := range re.sub {\n\t\t\tok := true\n\t\t\tfor _, x := range xs {\n\t\t\t\tif x[1] == BadWord {\n\t\t\t\t\tok = false\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ok {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tys := sub.leadingPhrases()\n\t\t\thave := make(map[phrase]bool)\n\t\t\tvar xys []phrase\n\t\t\tfor _, x := range xs {\n\t\t\t\tif x[1] != BadWord {\n\t\t\t\t\tif !have[x] {\n\t\t\t\t\t\thave[x] = true\n\t\t\t\t\t\txys = append(xys, x)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor _, y := range ys {\n\t\t\t\t\tvar xy phrase\n\t\t\t\t\tif x[0] == BadWord {\n\t\t\t\t\t\txy = y\n\t\t\t\t\t} else {\n\t\t\t\t\t\txy = phrase{x[0], y[0]}\n\t\t\t\t\t}\n\t\t\t\t\tif !have[xy] {\n\t\t\t\t\t\thave[xy] = true\n\t\t\t\t\t\txys = append(xys, xy)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\txs = xys\n\t\t}\n\t\treturn xs\n\t}\n}", "func TitleHasPrefix(v string) predicate.User {\n\treturn predicate.User(sql.FieldHasPrefix(FieldTitle, v))\n}", "func LookupPrefixList(ctx *pulumi.Context, args *GetPrefixListArgs) (*GetPrefixListResult, error) {\n\tinputs := make(map[string]interface{})\n\tif args != nil {\n\t\tinputs[\"name\"] = args.Name\n\t\tinputs[\"prefixListId\"] = args.PrefixListId\n\t}\n\toutputs, err := ctx.Invoke(\"aws:index/getPrefixList:getPrefixList\", inputs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &GetPrefixListResult{\n\t\tCidrBlocks: outputs[\"cidrBlocks\"],\n\t\tName: outputs[\"name\"],\n\t\tPrefixListId: outputs[\"prefixListId\"],\n\t\tId: outputs[\"id\"],\n\t}, nil\n}", "func prependIfMissing(str string, prefix string, ignoreCase bool, prefixes ...string) string {\n if IsEmpty(prefix) || internalStartsWith(str, prefix, ignoreCase) {\n\t\treturn str\n\t}\n\tfor _, pref := range prefixes {\n\t\tif pref == \"\" || internalStartsWith(str, pref, ignoreCase) {\n\t\t\treturn str\n\t\t}\n\t}\n\treturn prefix + str\n}", "func (it *Iterator) ValidForPrefix(prefix []byte) bool {\n\treturn it.item != nil && bytes.HasPrefix(it.item.key, prefix)\n}", "func StringsHasPrefix(s []string, p string) bool {\n\tfor _, x := range s {\n\t\tif !strings.HasPrefix(x, p) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (p *Prefix) currentPrefixes() ([]netaddr.IPPrefix, error) {\n\t// Expand ::/N to all unique, non-link local prefixes with matching length\n\t// on this interface.\n\taddrs, err := p.Addrs()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to fetch IP addresses: %v\", err)\n\t}\n\n\tvar prefixes []netaddr.IPPrefix\n\tseen := make(map[netaddr.IPPrefix]struct{})\n\tfor _, a := range addrs {\n\t\tipn, ok := a.(*net.IPNet)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tipp, ok := netaddr.FromStdIPNet(ipn)\n\t\tif !ok {\n\t\t\tpanicf(\"corerad: invalid net.IPNet: %+v\", a)\n\t\t}\n\n\t\t// Only advertise non-link-local IPv6 prefixes that also have a\n\t\t// matching mask:\n\t\t// https://tools.ietf.org/html/rfc4861#section-4.6.2.\n\t\tif ipp.IP().Is4() || ipp.IP().IsLinkLocalUnicast() || ipp.Bits() != p.Prefix.Bits() {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Found a match, mask and keep the prefix bits of the address, and only\n\t\t// add each prefix once.\n\t\tpfx := ipp.Masked()\n\t\tif _, ok := seen[pfx]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tseen[pfx] = struct{}{}\n\n\t\tprefixes = append(prefixes, pfx)\n\t}\n\n\t// For output consistency.\n\tsort.SliceStable(prefixes, func(i, j int) bool {\n\t\treturn prefixes[i].IP().Less(prefixes[j].IP())\n\t})\n\n\treturn prefixes, nil\n}", "func HasPrefix(s string, prefixes ...string) bool {\n\tfor _, p := range prefixes {\n\t\tif strings.HasPrefix(s, p) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func PrefixInList(list []string, prefix string) bool {\n\tfor _, s := range list {\n\t\tif strings.HasPrefix(s, prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func getListPrefix(opt *Options, s *goquery.Selection) string {\n\tif isWrapperListItem(s) {\n\t\treturn \"\"\n\t}\n\n\tparent := s.Parent()\n\tif parent.Is(\"ul\") {\n\t\treturn opt.BulletListMarker + \" \"\n\t} else if parent.Is(\"ol\") {\n\t\tcurrentIndex := s.Index() + 1\n\n\t\tlastIndex := parent.Children().Last().Index() + 1\n\t\tmaxLength := len(strconv.Itoa(lastIndex))\n\n\t\t// pad the numbers so that all prefix numbers in the list take up the same space\n\t\t// `%02d.` -> \"01. \"\n\t\tformat := `%0` + strconv.Itoa(maxLength) + `d. `\n\t\treturn fmt.Sprintf(format, currentIndex)\n\t}\n\t// If the HTML is malformed and the list element isn't in a ul or ol, return no prefix\n\treturn \"\"\n}", "func ValidateDNSPrefix(dnsName string) error {\n\tdnsNameRegex := `^([A-Za-z][A-Za-z0-9-]{1,43}[A-Za-z0-9])$`\n\tre, err := regexp.Compile(dnsNameRegex)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !re.MatchString(dnsName) {\n\t\treturn errors.Errorf(\"DNSPrefix '%s' is invalid. The DNSPrefix must contain between 3 and 45 characters and can contain only letters, numbers, and hyphens. It must start with a letter and must end with a letter or a number. (length was %d)\", dnsName, len(dnsName))\n\t}\n\treturn nil\n}", "func (m *MockRegionAPIClassDao) GetPrefixesByClass(apiClass string) ([]*model.RegionAPIClass, error) {\n\tret := m.ctrl.Call(m, \"GetPrefixesByClass\", apiClass)\n\tret0, _ := ret[0].([]*model.RegionAPIClass)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (dc *IPMap) checkPrefixAllMatches(sIP string) ([]ServiceID, bool) {\n\tservices := []ServiceID{}\n\tip := net.ParseIP(sIP)\n\tfound := false\n\tfor _, entry := range dc.prefixes {\n\t\tif entry.prefix.Contains(ip) {\n\t\t\tservices = append(services, entry.services...)\n\t\t\tfound = true\n\t\t}\n\t}\n\treturn services, found\n}", "func TestBrokenPrefix(t *testing.T) {\n\tsrc := [128]byte{}\n\tsrc[64] = 1\n\tdata := [10000][]byte{}\n\tfor i := range data {\n\t\tdata[i] = src[:]\n\t}\n\t// last 64 entries have a 1 in a pseudorandom position, breaking the\n\t// pattern\n\tfor i := 10000 - 64; i < 10000; i++ {\n\t\tdata[i] = src[64-((i*11)%64):]\n\t}\n\tforceRadix(BytesSlice(data[:]).Sort)\n\tif !BytesAreSorted(data[:]) {\n\t\tt.Errorf(\"broken-prefix data didn't sort\")\n\t}\n\n\tsrcStr := string(src[:])\n\tdataStr := [10000]string{}\n\tfor i := range dataStr {\n\t\tdataStr[i] = srcStr\n\t}\n\tfor i := 10000 - 64; i < 10000; i++ {\n\t\tdata[i] = src[64-((i*11)%64):]\n\t}\n\tforceRadix(StringSlice(dataStr[:]).Sort)\n\tif !StringsAreSorted(dataStr[:]) {\n\t\tt.Errorf(\"broken-prefix data didn't sort\")\n\t}\n}", "func (a *Assertions) NotHasPrefix(corpus, prefix string, userMessageComponents ...interface{}) bool {\n\ta.assertion()\n\tif didFail, message := shouldNotHasPrefix(corpus, prefix); didFail {\n\t\treturn a.fail(message, userMessageComponents...)\n\t}\n\treturn true\n}", "func (psrc *PSRC) FullPrefixSearch(prefix string) ([]string, error) {\n\tvar (\n\t\tlenPrefix = uint64(len(prefix) * 8) // |prefix|\n\t\ttotalStrings = uint64(len(psrc.strings))\n\t\tstringBuffer = []string{}\n\t\tprefixBuffer = []uint64{}\n\t)\n\n\tfor i := uint64(0); i < totalStrings; i++ {\n\t\tretrievalI, err := psrc.Retrieval(i, lenPrefix)\n\t\tif err != nil && err != ErrTooShortString { // If the string is too short, then we simply skip it\n\t\t\treturn nil, err // if error was found\n\t\t}\n\t\tif retrievalI == prefix { // we found the first node having\n\t\t\tprefixBuffer = append(prefixBuffer, i)\n\t\t}\n\t}\n\tif len(prefixBuffer) == 0 {\n\t\treturn []string{}, nil\n\t}\n\n\tfor _, index := range prefixBuffer {\n\t\tstringLength, err := psrc.getStringLength(index)\n\t\tif err != nil {\n\t\t\treturn []string{}, err\n\t\t}\n\t\tstringLength -= 8\n\t\tprefixedString, err := psrc.Retrieval(index, stringLength)\n\t\tif err != nil {\n\t\t\treturn []string{}, err\n\t\t}\n\t\tstringBuffer = append(stringBuffer, prefixedString)\n\t}\n\n\treturn stringBuffer, nil\n}", "func TitleHasPrefix(v string) predicate.Task {\n\treturn predicate.Task(sql.FieldHasPrefix(FieldTitle, v))\n}", "func TitleHasPrefix(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldTitle), v))\n\t})\n}", "func (t *Trie) PrefixesOfWord(word string) []string {\r\n\tpreviousWords := make([]string, 0)\r\n\r\n\tcurrentNode := t.root\r\n\r\n\tfor i := 0; i < len(word); i++ {\r\n\t\tchar := word[i]\r\n\t\tif currentNode.isWord {\r\n\t\t\tpreviousWords = append(previousWords, word[:i])\r\n\t\t}\r\n\r\n\t\tchild := currentNode.children[char]\r\n\t\tif child == nil {\r\n\t\t\tbreak\r\n\t\t}\r\n\t\tcurrentNode = child\r\n\t}\r\n\r\n\treturn previousWords\r\n}", "func (c *HelpCountryCode) SetPrefixes(value []string) {\n\tc.Flags.Set(0)\n\tc.Prefixes = value\n}", "func checkAddressPrefixesAreConsistent(t *testing.T, privateKeyPrefix string, params *chaincfg.Params) {\n\tP := params.NetworkAddressPrefix\n\n\t// Desired prefixes\n\tPk := P + \"k\"\n\tPs := P + \"s\"\n\tPe := P + \"e\"\n\tPS := P + \"S\"\n\tPc := P + \"c\"\n\tpk := privateKeyPrefix\n\n\tcheckInterval(t, Pk, 33, params.Name, params.PubKeyAddrID)\n\tcheckInterval(t, Ps, 20, params.Name, params.PubKeyHashAddrID)\n\tcheckInterval(t, Pe, 20, params.Name, params.PKHEdwardsAddrID)\n\tcheckInterval(t, PS, 20, params.Name, params.PKHSchnorrAddrID)\n\tcheckInterval(t, Pc, 20, params.Name, params.ScriptHashAddrID)\n\tcheckInterval(t, pk, 33, params.Name, params.PrivateKeyID)\n}", "func List(path_prefix string) []string {\n pth := strings.ToLower(path.Clean(path_prefix))\n if pth == \"/\" { pth = \"\" }\n pths := strings.Split(pth,\"/\")\n if pths[0] == \"\" { pths = pths[1:] } // if pth starts with \"/\"\n pil := assets\n for _, p := range pths {\n pil = pil.sub[p]\n if pil == nil { return nil }\n }\n \n res := make([]string,0,len(assets.sub))\n list(&res, pil, pths)\n return res\n}", "func getCommonPrefix(p []string, f []int, lmin int) string {\n r := []rune(p[f[0]])\n newR := make([]rune, lmin)\n for j := 0; j < lmin; j++ {\n newR[j] = r[j]\n }\n return string(newR)\n}", "func prefixesListing(pfx string, prefixes []string) []listingItem {\n\tout := make([]listingItem, 0, len(prefixes)+1)\n\tif pfx != \"\" {\n\t\tparent := \"\"\n\t\tif idx := strings.LastIndex(pfx, \"/\"); idx != -1 {\n\t\t\tparent = pfx[:idx]\n\t\t}\n\t\tout = append(out, listingItem{\n\t\t\tBack: true,\n\t\t\tHref: prefixPageURL(parent),\n\t\t})\n\t}\n\treturn pathListing(pfx, prefixes, out, func(p string) listingItem {\n\t\treturn listingItem{\n\t\t\tHref: prefixPageURL(p),\n\t\t}\n\t})\n}", "func (o *setNamePrefixOptions) Validate(args []string) error {\n\tif len(args) != 1 {\n\t\treturn errors.New(\"must specify exactly one prefix value\")\n\t}\n\t// TODO: add further validation on the value.\n\to.prefix = args[0]\n\treturn nil\n}", "func IsValidPrefix(prefix string) bool {\n\tif strings.TrimSpace(prefix) == \"\" {\n\t\treturn true\n\t}\n\treturn IsValidObjectName(prefix)\n}", "func prefixIsLessThan(b []byte, s string) bool {\n\tfor i := 0; i < len(s); i++ {\n\t\tif i >= len(b) {\n\t\t\treturn true\n\t\t}\n\t\tif b[i] != s[i] {\n\t\t\treturn b[i] < s[i]\n\t\t}\n\t}\n\treturn false\n}", "func (o *SignalPersonName) GetPrefixOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Prefix.Get(), o.Prefix.IsSet()\n}", "func HasOneOfPrefixesFold(str string, prefixes ...string) bool {\n\tfor _, pre := range prefixes {\n\t\tif HasPrefixFold(str, pre) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func FilterEntriesByPrefix(prefix string, entries []string) []string {\n\tvar result []string\n\tfor _, entry := range entries {\n\t\tif strings.HasPrefix(entry, prefix) {\n\t\t\tresult = append(result, entry)\n\t\t}\n\t}\n\treturn result\n}", "func TitleHasPrefix(v string) predicate.Announcement {\n\treturn predicate.Announcement(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldTitle), v))\n\t})\n}", "func validateNominees(i interface{}) error {\n\tconst paramName = \"nominees\"\n\n\tv, ok := i.([]string)\n\tif !ok {\n\t\treturn fmt.Errorf(\"%s param: invalid type: %T\", paramName, i)\n\t}\n\n\tnomineeSet := make(map[string]struct{})\n\tfor _, nominee := range v {\n\t\tif _, found := nomineeSet[nominee]; found {\n\t\t\treturn fmt.Errorf(\"%s param: nominee (%s): duplicated\", paramName, nominee)\n\t\t}\n\t\tnomineeSet[nominee] = struct{}{}\n\n\t\tif _, err := sdk.AccAddressFromBech32(nominee); err != nil {\n\t\t\treturn fmt.Errorf(\"%s param: nominee (%s): invalid Bech32 accAddress: %w\", paramName, nominee, err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (dc *IPMap) checkPrefixFirstMatch(sIP string) ([]ServiceID, bool) {\n\tip := net.ParseIP(sIP)\n\tfor _, entry := range dc.prefixes {\n\t\tif entry.prefix.Contains(ip) {\n\t\t\treturn entry.services, true\n\t\t}\n\t}\n\treturn nil, false\n}", "func HasPrefix(s string, p ...string) bool {\n\tfor _, i := range p {\n\t\tif strings.HasPrefix(s, i) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (sk *SkipList) PrefixScan(prefix store.Key, n int) []interface{} {\n\tx := sk.head\n\tfor i := sk.level - 1; i >= 0; i-- {\n\t\tfor x.next[i] != nil && x.next[i].key.Less(prefix) {\n\t\t\tx = x.next[i]\n\t\t}\n\t}\n\t//now x is the biggest element which is less than key\n\tx = x.next[0]\n\tvar res []interface{}\n\tfor n > 0 && x != nil && x.key.HasPrefix(prefix) {\n\t\tres = append(res, x.key)\n\t\tn--\n\t\tx = x.next[0]\n\t}\n\treturn res\n}", "func ListAllForPrefix(db *badger.DB, sk StorageKey, id string) ([]string, error) {\n\ttotal := make([]string, 0, 20)\n\n\tpfx := MakeKey(sk, id)\n\topts := badger.DefaultIteratorOptions\n\topts.PrefetchValues = false\n\topts.Prefix = pfx\n\terr := db.View(func(tx *badger.Txn) error {\n\t\tit := tx.NewIterator(opts)\n\t\tdefer it.Close()\n\t\tfor it.Seek(pfx); it.ValidForPrefix(pfx); it.Next() {\n\t\t\tkeybuf := it.Item().Key()\n\t\t\tk := string(keybuf[len(pfx):])\n\t\t\ttotal = append(total, string(k))\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn total, err\n}", "func HasPrefix(s, prefix string) bool {\n\tif len(s) < len(prefix) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(prefix); i++ {\n\t\tif s[i] != prefix[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func BaselineClassTitleHasPrefix(v string) predicate.BaselineClass {\n\treturn predicate.BaselineClass(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldBaselineClassTitle), v))\n\t})\n}", "func processFilePrefixes(pathName string, outFile *os.File) error {\n\tfmt.Println(\"File: \" + pathName)\n\n\t// Open the file\n\tfile, err := os.Open(pathName)\n\tif err != nil {\n\t\treturn handle(\"Error in opening file\", err)\n\t}\n\tdefer file.Close()\n\tscanner := bufio.NewScanner(file)\n\n\tprefixSet := make(map[string]bool)\n\n\t// Go line by line\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\t// Get set of seen prefixes\n\t\tif prefix := getPrefix(line); prefix != \"\" {\n\t\t\tprefixSet[prefix] = true\n\t\t}\n\t}\n\t// Write results to file\n\tfor k := range prefixSet {\n\t\toutFile.WriteString(fmt.Sprintf(\"%s\\n\", k))\n\t}\n\tif err = scanner.Err(); err != nil {\n\t\treturn handle(\"Error in scanning lines\", err)\n\t}\n\treturn err\n}", "func TestVisitPrefixes(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tpcc := buildTestPrefixConfigMap()\n\ttestData := []struct {\n\t\tstart, end proto.Key\n\t\texpRanges [][2]proto.Key\n\t\texpConfigs []ConfigUnion\n\t}{\n\t\t{proto.KeyMin, proto.KeyMax,\n\t\t\t[][2]proto.Key{\n\t\t\t\t{proto.KeyMin, proto.Key(\"/db1\")},\n\t\t\t\t{proto.Key(\"/db1\"), proto.Key(\"/db1/table\")},\n\t\t\t\t{proto.Key(\"/db1/table\"), proto.Key(\"/db1/tablf\")},\n\t\t\t\t{proto.Key(\"/db1/tablf\"), proto.Key(\"/db2\")},\n\t\t\t\t{proto.Key(\"/db2\"), proto.Key(\"/db3\")},\n\t\t\t\t{proto.Key(\"/db3\"), proto.Key(\"/db4\")},\n\t\t\t\t{proto.Key(\"/db4\"), proto.KeyMax},\n\t\t\t}, []ConfigUnion{config1, config2, config3, config2, config1, config4, config1}},\n\t\t{proto.Key(\"/db0\"), proto.Key(\"/db1/table/foo\"),\n\t\t\t[][2]proto.Key{\n\t\t\t\t{proto.Key(\"/db0\"), proto.Key(\"/db1\")},\n\t\t\t\t{proto.Key(\"/db1\"), proto.Key(\"/db1/table\")},\n\t\t\t\t{proto.Key(\"/db1/table\"), proto.Key(\"/db1/table/foo\")},\n\t\t\t}, []ConfigUnion{config1, config2, config3}},\n\t}\n\tfor i, test := range testData {\n\t\tranges := [][2]proto.Key{}\n\t\tconfigs := []ConfigUnion{}\n\t\tif err := pcc.VisitPrefixes(test.start, test.end, func(start, end proto.Key, config ConfigUnion) (bool, error) {\n\t\t\tranges = append(ranges, [2]proto.Key{start, end})\n\t\t\tconfigs = append(configs, config)\n\t\t\treturn false, nil\n\t\t}); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !reflect.DeepEqual(test.expRanges, ranges) {\n\t\t\tt.Errorf(\"%d: expected ranges %+v; got %+v\", i, test.expRanges, ranges)\n\t\t}\n\t\tif !reflect.DeepEqual(test.expConfigs, configs) {\n\t\t\tt.Errorf(\"%d: expected configs %+v; got %+v\", i, test.expConfigs, configs)\n\t\t}\n\t}\n\n\t// Now, stop partway through by returning done=true.\n\t{\n\t\tconfigs := []ConfigUnion{}\n\t\tif err := pcc.VisitPrefixes(proto.Key(\"/db2\"), proto.Key(\"/db4\"), func(start, end proto.Key, config ConfigUnion) (bool, error) {\n\t\t\tconfigs = append(configs, config)\n\t\t\tif len(configs) == 2 {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\texpConfigs := []ConfigUnion{config1, config4}\n\t\tif !reflect.DeepEqual(expConfigs, configs) {\n\t\t\tt.Errorf(\"expected configs %+v; got %+v\", expConfigs, configs)\n\t\t}\n\t}\n\n\t// Now, stop partway through by returning an error.\n\t{\n\t\tconfigs := []ConfigUnion{}\n\t\tif err := pcc.VisitPrefixes(proto.Key(\"/db2\"), proto.Key(\"/db4\"), func(start, end proto.Key, config ConfigUnion) (bool, error) {\n\t\t\tconfigs = append(configs, config)\n\t\t\tif len(configs) == 2 {\n\t\t\t\treturn false, util.Errorf(\"foo\")\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}); err == nil {\n\t\t\tt.Fatalf(\"expected an error, but didn't get one\")\n\t\t}\n\t\texpConfigs := []ConfigUnion{config1, config4}\n\t\tif !reflect.DeepEqual(expConfigs, configs) {\n\t\t\tt.Errorf(\"expected configs %+v; got %+v\", expConfigs, configs)\n\t\t}\n\t}\n}" ]
[ "0.6098155", "0.6092304", "0.602812", "0.58553684", "0.5671215", "0.5661387", "0.5588173", "0.5572023", "0.54787356", "0.54342926", "0.54087603", "0.5395631", "0.53830993", "0.5359162", "0.5359162", "0.5341388", "0.5303424", "0.5253235", "0.5222545", "0.52096283", "0.51101464", "0.50723493", "0.50598305", "0.50381416", "0.50353354", "0.50287586", "0.501296", "0.5012803", "0.5007567", "0.5004823", "0.49948278", "0.49784493", "0.4976946", "0.49769032", "0.49614426", "0.4959163", "0.49541628", "0.49536172", "0.49393895", "0.49384612", "0.49183422", "0.4915766", "0.4876525", "0.486657", "0.4864406", "0.4864406", "0.48579842", "0.4853309", "0.48484138", "0.48286515", "0.48257527", "0.48141223", "0.48136064", "0.4806961", "0.48062864", "0.4803994", "0.4798876", "0.4781617", "0.47772416", "0.47615677", "0.47507122", "0.4749705", "0.47437087", "0.4724673", "0.4720046", "0.47182733", "0.47180912", "0.47146267", "0.4713709", "0.47130615", "0.4705301", "0.47038835", "0.47017777", "0.46937042", "0.46891633", "0.46883503", "0.46805483", "0.46754465", "0.46729046", "0.46602583", "0.4658005", "0.4657377", "0.46555033", "0.46443155", "0.46412465", "0.46361622", "0.46324167", "0.46259835", "0.46179327", "0.46105856", "0.460842", "0.46057186", "0.46037352", "0.45993218", "0.45966706", "0.45919052", "0.4590859", "0.4587392", "0.4583759", "0.4571118" ]
0.8025123
0
HasTitleWithValidType checks if title prefix conforms with semantic message style.
func HasTitleWithValidType(prefixes []string, title string) bool { pureTitle := strings.TrimSpace(title) for _, prefix := range prefixes { prefixRegexp := regexp.MustCompile(`(?i)^` + prefix + `(:| |\()+`) if prefixRegexp.MatchString(pureTitle) { return true } } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func CheckSemanticTitle(pr *gogh.PullRequest, config PluginConfiguration, logger log.Logger) string {\n\tchange := ghservice.NewRepositoryChangeForPR(pr)\n\tprefixes := GetValidTitlePrefixes(config)\n\tisTitleWithValidType := HasTitleWithValidType(prefixes, *pr.Title)\n\n\tif !isTitleWithValidType {\n\t\tif prefix, ok := wip.GetWorkInProgressPrefix(*pr.Title, wip.LoadConfiguration(logger, change)); ok {\n\t\t\ttrimmedTitle := strings.TrimPrefix(*pr.Title, prefix)\n\t\t\tisTitleWithValidType = HasTitleWithValidType(prefixes, trimmedTitle)\n\t\t}\n\t}\n\tif !isTitleWithValidType {\n\t\tallPrefixes := \"`\" + strings.Join(prefixes, \"`, `\") + \"`\"\n\t\treturn fmt.Sprintf(TitleFailureMessage, pr.GetTitle(), allPrefixes)\n\t}\n\treturn \"\"\n}", "func IsTitle(rune int) bool {\n\tif rune < 0x80 {\t// quick ASCII check\n\t\treturn false\n\t}\n\treturn Is(Title, rune);\n}", "func (o *SecurityProblem) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TSearchHITsSortProperty) IsTitle() bool { return me.String() == \"Title\" }", "func (me TGetReviewableHITsSortProperty) IsTitle() bool { return me.String() == \"Title\" }", "func (o *GroupWidgetDefinition) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsTitle(r rune) bool", "func (o *Content) HasTitle() bool {\n\tif o != nil && o.Title.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *Snippet) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (d UserData) HasTitle() bool {\n\treturn d.ModelData.Has(models.NewFieldName(\"Title\", \"title_id\"))\n}", "func (o *InlineResponse2004People) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *InlineResponse20049Post) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *InlineResponse200115) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func GetValidTitlePrefixes(config PluginConfiguration) []string {\n\tprefixes := defaultTypes\n\tif len(config.TypePrefix) != 0 {\n\t\tif config.Combine {\n\t\t\tprefixes = append(prefixes, config.TypePrefix...)\n\t\t} else {\n\t\t\tprefixes = config.TypePrefix\n\t\t}\n\t}\n\treturn prefixes\n}", "func (o *CatalogEntry) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func isTitleElement(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"title\"\n}", "func (o *InlineResponse20027Person) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *InlineResponse20033Milestones) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *InlineResponse20034Milestone) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (t *TitleInput) Validate() error {\n\tif t.FirstName.Nil() && t.LastName.Nil() {\n\t\treturn fmt.Errorf(\"Either first name or last name (or both) must be provided\")\n\t}\n\treturn nil\n}", "func (o *EventAttributes) HasTitle() bool {\n\treturn o != nil && o.Title != nil\n}", "func (o *WorkbookChart) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func testFrontMatterTitle(mdBytes []byte) error {\n\tfm, _, err := frontparser.ParseFrontmatterAndContent(mdBytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, exists := fm[\"title\"]; exists == false {\n\t\treturn errors.New(\"can't find title in frontmatter\")\n\t}\n\treturn nil\n}", "func (o *TeamPermissionSettingAttributes) HasTitle() bool {\n\treturn o != nil && o.Title != nil\n}", "func (o *NiaapiNewReleaseDetailAllOf) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me *XElemTitlealldescTitleMetadataschemaTitleTtitleType) Walk() (err error) {\n\tif fn := WalkHandlers.XElemTitlealldescTitleMetadataschemaTitleTtitleType; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err = me.Title.Walk(); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\treturn\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (o *NotificationConfig) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *LaunchpadClicks) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *ViewUserDashboard) HasTitle() bool {\n\tif o != nil && o.Title != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *GroupWidgetDefinition) HasShowTitle() bool {\n\tif o != nil && o.ShowTitle != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *Content) GetTitleOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title.Get(), o.Title.IsSet()\n}", "func (t *Title) SchemaTitle() *schema.Title {\n\t// Check for self being nil so we can safely chain this function\n\tif t == nil {\n\t\treturn nil\n\t}\n\n\tvar name, loc = t.MARCTitle, t.MARCLocation\n\n\t// Not great, but this does the trick well enough when we haven't gotten a\n\t// valid MARC record\n\tif !t.ValidLCCN {\n\t\tname = t.Name\n\t}\n\n\treturn &schema.Title{\n\t\tLCCN: t.LCCN,\n\t\tName: name,\n\t\tPlaceOfPublication: loc,\n\t}\n}", "func (o *TransactionStore) HasGroupTitle() bool {\n\tif o != nil && o.GroupTitle.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *PostWebhook) GetTitleOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Title, true\n}", "func (o *SecurityProblem) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (o *Snippet) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (o *GroupWidgetDefinition) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (m *NametitleMutation) Title() (r string, exists bool) {\n\tv := m._Title\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}", "func IsNamingValid(namingStyle string) (NamingStyle, bool) {\n\tif len(namingStyle) == 0 {\n\t\tnamingStyle = namingLower\n\t}\n\tswitch namingStyle {\n\tcase namingLower, namingCamel, namingSnake:\n\t\treturn namingStyle, true\n\tdefault:\n\t\treturn \"\", false\n\t}\n}", "func (o *EventAttributes) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (o *HealthIncident) GetTitleOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Title, true\n}", "func (t *Trip) Validate() bool {\n\tif t.Title != \"\" {\n\t\treturn true\n\t}\n\treturn false\n}", "func (o *InlineResponse20049Post) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func TitleHasSuffix(v string) predicate.Ethnicity {\n\treturn predicate.Ethnicity(func(s *sql.Selector) {\n\t\ts.Where(sql.HasSuffix(s.C(FieldTitle), v))\n\t})\n}", "func (o *GroupWidgetDefinition) GetShowTitleOk() (*bool, bool) {\n\tif o == nil || o.ShowTitle == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ShowTitle, true\n}", "func (o *GroupWidgetDefinition) HasTitleAlign() bool {\n\tif o != nil && o.TitleAlign != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func TitleHasSuffix(v string) predicate.Task {\n\treturn predicate.Task(sql.FieldHasSuffix(FieldTitle, v))\n}", "func textContainsTitle(text, title string) bool {\n\tre := regexp.MustCompile(\"(?i)\" + strings.Join(strings.Split(title, \" \"), `\\s+`))\n\treturn re.MatchString(text)\n}", "func (o *User) HasJobTitle() bool {\n\tif o != nil && o.JobTitle != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *WorkbookChart) GetTitleOk() (AnyOfmicrosoftGraphWorkbookChartTitle, bool) {\n\tif o == nil || o.Title == nil {\n\t\tvar ret AnyOfmicrosoftGraphWorkbookChartTitle\n\t\treturn ret, false\n\t}\n\treturn *o.Title, true\n}", "func (o *CatalogEntry) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (r *HousingTypology) Valid() error {\n\tif r.Name == \"\" {\n\t\treturn fmt.Errorf(\"Nom vide\")\n\t}\n\treturn nil\n}", "func (p Params) Title() (title string, found bool) {\n\ttitle, found = p[titleKey]\n\treturn\n}", "func (h *HazardType) Validate(tx *pop.Connection) (*validate.Errors, error) {\n\terrors := validate.Validate(\n\t\t&validators.StringIsPresent{Name: \"Label\", Field: h.Label, Message: \"A label is required.\"},\n\t\t&validators.StringIsPresent{Name: \"Description\", Field: h.Description, Message: \"Please provide a brief description.\"},\n\t)\n\n\treturn errors, nil\n}", "func (ln LabelName) IsValid() bool {\n\tif len(ln) == 0 {\n\t\treturn false\n\t}\n\tfor i, b := range ln {\n\t\tif !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (m *RoleMutation) Title() (r string, exists bool) {\n\tv := m.title\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}", "func (gist *Gist) Validate() bool {\n\tgist.Errors = make(map[string]string)\n\n\tif gist.Title == \"\" {\n\t\tgist.Errors[\"Title\"] = \"You must provide a title.\"\n\t}\n\n\tif gist.Content == \"\" {\n\t\tgist.Errors[\"Content\"] = \"You must provide content.\"\n\t}\n\n\treturn len(gist.Errors) == 0\n}", "func ValidateSlug(fl v.FieldLevel) bool {\n\tm, _ := regexp.MatchString(\"^[a-z0-9]+[a-z0-9-]+[a-z0-9]+$\", fl.Field().String())\n\treturn m\n}", "func (e *Event) Validate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.Validate(\n\t\t&validators.StringIsPresent{Field: e.Title, Name: \"Title\"},\n\t), nil\n}", "func TitleHasSuffix(v string) predicate.User {\n\treturn predicate.User(sql.FieldHasSuffix(FieldTitle, v))\n}", "func (me *XsdGoPkgHasElem_TitlesequenceCreateHITRequestschema_Title_XsdtString_) Walk() (err error) {\n\tif fn := WalkHandlers.XsdGoPkgHasElem_TitlesequenceCreateHITRequestschema_Title_XsdtString_; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (ut *TodoPayload) Validate() (err error) {\n\tif ut.Title != nil {\n\t\tif utf8.RuneCountInString(*ut.Title) < 8 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`response.title`, *ut.Title, utf8.RuneCountInString(*ut.Title), 8, true))\n\t\t}\n\t}\n\treturn\n}", "func (o *InlineResponse2004People) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (r *RuleConfig) HasValidType() bool {\n\treturn r.Type == \"whitelist\" || r.Type == \"required\" || r.Type == \"disallow\"\n}", "func (tt BlockTitleText) HasEmoji() bool {\n\treturn true\n}", "func Title(a interfaces.AssumeCredentialProcess, emoji string, prefix string, message string) {\n\ts := a.GetDestination()\n\tf := a.GetFlags()\n\tif f.Verbose {\n\t\tformatted := format(a, textColorTitle, emoji, prefix, message)\n\t\tfmt.Fprint(s, formatted)\n\t}\n}", "func (o *ViewUserDashboard) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func TitleContains(v string) predicate.Ethnicity {\n\treturn predicate.Ethnicity(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldTitle), v))\n\t})\n}", "func CheckUsernameFormal(name string) bool {\n\tif len(name) <= 4 || len(name) >= 30 {\n\t\treturn false\n\t}\n\n\tnameRegExp := regexp.MustCompile(\"^[A-Za-z0-9]+(?:[ _-][A-Za-z0-9]+)*$\")\n\treturn nameRegExp.MatchString(name)\n}", "func Title(props *TitleProps, children ...Element) *TitleElem {\n\trProps := &_TitleProps{\n\t\tBasicHTMLElement: newBasicHTMLElement(),\n\t}\n\n\tif props != nil {\n\t\tprops.assign(rProps)\n\t}\n\n\treturn &TitleElem{\n\t\tElement: createElement(\"title\", rProps, children...),\n\t}\n}", "func (ut *todoPayload) Validate() (err error) {\n\tif ut.Title != nil {\n\t\tif utf8.RuneCountInString(*ut.Title) < 8 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`response.title`, *ut.Title, utf8.RuneCountInString(*ut.Title), 8, true))\n\t\t}\n\t}\n\treturn\n}", "func (h Headline) ValidateHeadlineCreation() error {\n\tif h.Title == \"\" {\n\t\treturn errors.New(\"The title is mandatory\")\n\t}\n\n\tif h.Content == \"\" {\n\t\treturn errors.New(\"The content is mandatory\")\n\t}\n\n\tif h.VisaType == \"\" {\n\t\treturn errors.New(\"The visatype is mandatory\")\n\t}\n\n\tif h.CountryID == 0 {\n\t\treturn errors.New(\"The country_id is mandatory\")\n\t}\n\n\tif h.CategoryID == 0 {\n\t\treturn errors.New(\"The category_id is mandatory\")\n\t}\n\n\treturn nil\n}", "func (t *Title) Parse(page []byte) error {\n\ts := schemaRE.FindSubmatch(page)\n\tif s == nil {\n\t\treturn NewErrParse(\"schema\")\n\t}\n\tvar v schemaJSON\n\tif err := json.Unmarshal(s[1], &v); err != nil {\n\t\treturn NewErrParse(err.Error() + \"; schema was: \" + string(s[1]))\n\t}\n\n\tm := titleIDRE.FindStringSubmatch(v.URL)\n\tif len(m) != 2 {\n\t\treturn NewErrParse(\"id\")\n\t}\n\tt.ID = m[1]\n\n\tt.URL = fmt.Sprintf(titleURL, t.ID)\n\tt.Name = decode(v.Name)\n\tt.Type = v.Type\n\n\ttitleYear := titleYearRE.FindSubmatch(page)\n\ttitleYear2 := titleYear2RE.FindSubmatch(page)\n\ttitleYear3 := titleYear3RE.FindSubmatch(page)\n\ttitleYear4 := titleYear4RE.FindSubmatch(page)\n\n\tif len(v.DatePublished) >= 4 {\n\t\tyear, err := strconv.Atoi(v.DatePublished[:4])\n\t\tif err != nil {\n\t\t\treturn NewErrParse(fmt.Sprintf(\"date: %v\", err))\n\t\t}\n\t\tt.Year = year\n\t} else if titleYear != nil {\n\t\tt.Year, _ = strconv.Atoi(string(titleYear[1])) // regexp matches digits\n\t} else if titleYear2 != nil {\n\t\tt.Year, _ = strconv.Atoi(string(titleYear2[1])) // regexp matches digits\n\t} else if titleYear3 != nil {\n\t\tt.Year, _ = strconv.Atoi(string(titleYear3[1])) // regexp matches digits\n\t} else if titleYear4 != nil {\n\t\tt.Year, _ = strconv.Atoi(string(titleYear4[1])) // regexp matches digits\n\t} else {\n\t\t// sometimes there's just no year, e.g. https://www.imdb.com/title/tt12592252/\n\t}\n\n\tvar rating string\n\tif err := json.Unmarshal(v.AggregateRating.RatingValue, &rating); err == nil {\n\t\tt.Rating = rating\n\t}\n\tvar ratingf float64\n\tif err := json.Unmarshal(v.AggregateRating.RatingValue, &ratingf); err == nil && ratingf > 0 {\n\t\tt.Rating = fmt.Sprintf(\"%.1f\", ratingf)\n\t}\n\tt.RatingCount = v.AggregateRating.RatingCount\n\n\tif v.Duration != \"\" {\n\t\tt.Duration = strings.ToLower(strings.TrimLeft(v.Duration, \"PT\"))\n\t} else {\n\t\tduration1 := titleDurationRE.FindSubmatch(page)\n\t\tduration2 := titleDuration2RE.FindSubmatch(page)\n\t\tduration3 := titleDuration3RE.FindSubmatch(page)\n\t\tif duration1 != nil {\n\t\t\tt.Duration = strings.ToLower(string(duration1[1]))\n\t\t} else if duration2 != nil {\n\t\t\tt.Duration = string(duration2[1])\n\t\t} else if duration3 != nil {\n\t\t\tt.Duration = string(duration3[1]) + \"m\"\n\t\t}\n\t}\n\n\tt.Directors = nil\n\tfor _, e := range v.Director {\n\t\tif e.Type != \"Person\" {\n\t\t\tcontinue\n\t\t}\n\t\tm = nameIDRE.FindStringSubmatch(e.URL)\n\t\tif len(m) != 2 {\n\t\t\treturn NewErrParse(\"director id\")\n\t\t}\n\t\tid := m[1]\n\t\tif nameSlice(t.Directors).Has(id) {\n\t\t\tcontinue\n\t\t}\n\t\tt.Directors = append(t.Directors, Name{\n\t\t\tID: id,\n\t\t\tURL: fmt.Sprintf(nameURL, id),\n\t\t\tFullName: e.Name,\n\t\t})\n\t}\n\n\tt.Writers = nil\n\tfor _, e := range v.Creator {\n\t\tif e.Type != \"Person\" {\n\t\t\tcontinue\n\t\t}\n\t\tm = nameIDRE.FindStringSubmatch(e.URL)\n\t\tif len(m) != 2 {\n\t\t\treturn NewErrParse(\"writer id\")\n\t\t}\n\t\tid := m[1]\n\t\tif nameSlice(t.Writers).Has(id) {\n\t\t\tcontinue\n\t\t}\n\t\tt.Writers = append(t.Writers, Name{\n\t\t\tID: id,\n\t\t\tURL: fmt.Sprintf(nameURL, id),\n\t\t\tFullName: e.Name,\n\t\t})\n\t}\n\n\tt.Actors = nil\n\tfor _, e := range v.Actor {\n\t\tif e.Type != \"Person\" {\n\t\t\tcontinue\n\t\t}\n\t\tm = nameIDRE.FindStringSubmatch(e.URL)\n\t\tif len(m) != 2 {\n\t\t\treturn NewErrParse(\"actor id\")\n\t\t}\n\t\tid := m[1]\n\t\tif nameSlice(t.Actors).Has(id) {\n\t\t\tcontinue\n\t\t}\n\t\tt.Actors = append(t.Actors, Name{\n\t\t\tID: id,\n\t\t\tURL: fmt.Sprintf(nameURL, id),\n\t\t\tFullName: e.Name,\n\t\t})\n\t}\n\n\tt.Genres = v.Genre\n\n\ts = titleLanguagesRE.FindSubmatch(page)\n\tif s != nil {\n\t\ts := titleLanguageRE.FindAllSubmatch(s[1], -1)\n\t\tif s == nil {\n\t\t\treturn NewErrParse(\"languages\")\n\t\t}\n\t\tt.Languages = nil\n\t\tfor _, m := range s {\n\t\t\tlanguage := decode(string(m[1]))\n\t\t\tif stringSlice(t.Languages).Has(language) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt.Languages = append(t.Languages, language)\n\t\t}\n\t}\n\n\tas := titleNationalitiesRE.FindAllSubmatch(page, -1)\n\tif as != nil {\n\t\tt.Nationalities = nil\n\t\tfor _, m := range as {\n\t\t\tnationality := decode(string(m[1]))\n\t\t\tif stringSlice(t.Nationalities).Has(nationality) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt.Nationalities = append(t.Nationalities, nationality)\n\t\t}\n\t}\n\n\tt.Description = v.Description\n\n\ts = titlePosterRE.FindSubmatch(page)\n\tif s != nil {\n\t\tid := string(s[1])\n\t\tt.Poster = Media{\n\t\t\tID: id,\n\t\t\tTitleID: t.ID,\n\t\t\tURL: fmt.Sprintf(mediaURL, t.ID, id),\n\t\t\tContentURL: string(s[2]),\n\t\t}\n\t} else {\n\t\ts = titlePoster2RE.FindSubmatch(page)\n\t\tif s != nil {\n\t\t\tid := string(s[1])\n\t\t\tre, err := regexp.Compile(`(?s)\"primaryImage\":{\"id\":\"` + id + `\",\"width\":\\d+,\"height\":\\d+,\"url\":\"([^\"]+)\"`)\n\t\t\tif err != nil {\n\t\t\t\treturn NewErrParse(\"poster RE\")\n\t\t\t}\n\t\t\ts = re.FindSubmatch(page)\n\t\t\tif s != nil {\n\t\t\t\tt.Poster = Media{\n\t\t\t\t\tID: id,\n\t\t\t\t\tTitleID: t.ID,\n\t\t\t\t\tURL: fmt.Sprintf(mediaURL, t.ID, id),\n\t\t\t\t\tContentURL: string(s[1]),\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (me *XsdGoPkgHasElem_TitlechoiceContentTypeschema_Title_XsdtString_) Walk() (err error) {\n\tif fn := WalkHandlers.XsdGoPkgHasElem_TitlechoiceContentTypeschema_Title_XsdtString_; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (o *InlineResponse200115) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func ValidFormat(format string) bool {\n\tfor _, f := range fmtsByStandard {\n\t\tif f == format {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (n Name) HasType() bool {\n\t_, s := n.GetLookupAndType()\n\treturn s != \"\"\n}", "func (o *InlineResponse20027Person) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (o *NiaapiNewReleaseDetailAllOf) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (o *InlineResponse20034Milestone) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (q BaseQuestion) Valid() bool {\n\treturn q.QuestionID != \"\" && q.QuestionTitle != \"\"\n}", "func validateAnotation(a Annotation) error {\n\tif a.Title == \"\" {\n\t\treturn errors.New(\"Annotation property Title can't be empty\")\n\t}\n\n\treturn nil\n}", "func (m Name) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := validate.Pattern(\"\", \"body\", string(m), `^[A-Za-z0-1.\\-_]*$`); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Title(embed *discordgo.MessageEmbed) *discordgo.MessageEmbed {\n\tembed.Author.Name = \"Command: title\"\n\tembed.Description = \"`title <text>` will create the text into title form.\"\n\tembed.Fields = []*discordgo.MessageEmbedField{\n\t\t{\n\t\t\tName: \"<text>\",\n\t\t\tValue: \"The text to change into title form.\",\n\t\t\tInline: true,\n\t\t},\n\t\t{\n\t\t\tName: \"Related commands:\",\n\t\t\tValue: \"`caps`, `lower`, `randomcaps`, `swap`\",\n\t\t},\n\t}\n\treturn embed\n}", "func (o *TeamPermissionSettingAttributes) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (o *InlineResponse20033Milestones) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func testFrontmatterTitle(path string) error {\n\tif strings.HasSuffix(path, \".md\") {\n\t\tfileBytes, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// if file has frontmatter\n\t\tif frontparser.HasFrontmatterHeader(fileBytes) {\n\t\t\tfm, _, err := frontparser.ParseFrontmatterAndContent(fileBytes)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// skip markdowns that are not published\n\t\t\tif published, exists := fm[\"published\"]; exists {\n\t\t\t\tif publishedBool, ok := published.(bool); ok {\n\t\t\t\t\tif publishedBool == false {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif _, exists := fm[\"title\"]; exists == false {\n\t\t\t\treturn errors.New(\"can't find title in frontmatter\")\n\t\t\t}\n\t\t} else {\n\t\t\t// no frontmatter is not an error\n\t\t\t// markdown files without frontmatter won't be considered\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn nil\n}", "func (p *PlainTextBuilder) CreateTitle(title string) {\n\t// ˅\n\tp.buffer.WriteString(\"--------------------------------\\n\") // Decoration line\n\tp.buffer.WriteString(\"[\" + title + \"]\\n\") // Title\n\tp.buffer.WriteString(\"\\n\") // Blank line\n\t// ˄\n}", "func (s *Style) Has(ttype TokenType) bool {\n\treturn !s.get(ttype).IsZero() || s.synthesisable(ttype)\n}", "func BaselineClassTitleHasSuffix(v string) predicate.BaselineClass {\n\treturn predicate.BaselineClass(func(s *sql.Selector) {\n\t\ts.Where(sql.HasSuffix(s.C(FieldBaselineClassTitle), v))\n\t})\n}", "func hasName(t Type) bool {\n\tswitch t.(type) {\n\tcase *Basic, *Named, *TypeParam:\n\t\treturn true\n\t}\n\treturn false\n}", "func TitleHasSuffix(v string) predicate.Announcement {\n\treturn predicate.Announcement(func(s *sql.Selector) {\n\t\ts.Where(sql.HasSuffix(s.C(FieldTitle), v))\n\t})\n}", "func isMissingKind(err error) bool {\n\treturn strings.Contains(err.Error(), \"Object 'Kind' is missing in\")\n}", "func (c *TitleType) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {\n\tvar v string\n\td.DecodeElement(&v, &start)\n\tfor _, attr := range start.Attr {\n\t\tif attr.Name.Local == \"datestamp\" {\n\t\t\tc.Datestamp = DtDotDateOrDateTime(attr.Value)\n\t\t}\n\t\tif attr.Name.Local == \"sourcetype\" {\n\t\t\tc.Sourcetype = SourceTypeCode(attr.Value)\n\t\t}\n\t\tif attr.Name.Local == \"sourcename\" {\n\t\t\tc.Sourcename = DtDotNonEmptyString(attr.Value)\n\t\t}\n\t}\n\tswitch v {\n\n // Undefined\n case \"00\":\n\t\tc.Body = `Undefined`\n\n // The full text of the distinctive title of the item, without abbreviation or abridgement. For books, where the title alone is not distinctive, elements may be taken from a set or series title and part number etc to create a distinctive title. Where the item is an omnibus edition containing two or more works by the same author, and there is no separate combined title, a distinctive title may be constructed by concatenating the individual titles, with suitable punctuation, as in ‘Pride and prejudice / Sense and sensibility / Northanger Abbey’\n case \"01\":\n\t\tc.Body = `Distinctive title (book); Cover title (serial); Title on item (serial content item or reviewed resource)`\n\n // Serials only\n case \"02\":\n\t\tc.Body = `ISSN key title of serial`\n\n // Where the subject of the ONIX record is a translated item\n case \"03\":\n\t\tc.Body = `Title in original language`\n\n // For serials: an acronym or initialism of Title Type 01, eg ‘JAMA’, ‘JACM’\n case \"04\":\n\t\tc.Body = `Title acronym or initialism`\n\n // An abbreviated form of Title Type 01\n case \"05\":\n\t\tc.Body = `Abbreviated title`\n\n // A translation of Title Type 01 into another language\n case \"06\":\n\t\tc.Body = `Title in other language`\n\n // Serials only: when a journal issue is explicitly devoted to a specified topic\n case \"07\":\n\t\tc.Body = `Thematic title of journal issue`\n\n // Books or serials: when an item was previously published under another title\n case \"08\":\n\t\tc.Body = `Former title`\n\n // For books: the title carried in a book distributor’s title file: frequently incomplete, and may include elements not properly part of the title\n case \"10\":\n\t\tc.Body = `Distributor’s title`\n\n // An alternative title that appears on the cover of a book\n case \"11\":\n\t\tc.Body = `Alternative title on cover`\n\n // An alternative title that appears on the back of a book\n case \"12\":\n\t\tc.Body = `Alternative title on back`\n\n // An expanded form of the title, eg the title of a school text book with grade and type and other details added to make the title meaningful, where otherwise it would comprise only the curriculum subject. This title type is required for submissions to the Spanish ISBN Agency\n case \"13\":\n\t\tc.Body = `Expanded title`\n\n // An alternative title that the book is widely known by, whether it appears on the book or not\n case \"14\":\n\t\tc.Body = `Alternative title`\n\tdefault:\n\t\treturn fmt.Errorf(\"undefined code for TitleType has been passed, got [%s]\", v)\n\t}\n\treturn nil\n}", "func (o *NotificationConfig) GetTitleOk() (*string, bool) {\n\tif o == nil || o.Title == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Title, true\n}", "func (e *DiscordWebhookEmbed) SetTitle(title string) {\n\te.Title = title\n}", "func (e *GraphQLErrors) HasType(t string) bool {\n\tfor _, anError := range e.errors {\n\t\tif anError.Type == t {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (me *XsdGoPkgHasElems_TitlesequenceHITschema_Title_XsdtString_) Walk() (err error) {\n\tif fn := WalkHandlers.XsdGoPkgHasElems_TitlesequenceHITschema_Title_XsdtString_; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (m *ReviewMutation) Title() (r string, exists bool) {\n\tv := m.title\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}", "func (m *TodoItemMutation) Title() (r string, exists bool) {\n\tv := m.title\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}" ]
[ "0.6505668", "0.63963515", "0.63822585", "0.60263205", "0.58802074", "0.5860582", "0.58390754", "0.57830197", "0.57806534", "0.57744485", "0.57152456", "0.5694083", "0.56921774", "0.5678252", "0.56778", "0.56730336", "0.5671352", "0.56649643", "0.5638103", "0.5576951", "0.55644256", "0.5563344", "0.5510562", "0.54581666", "0.5439548", "0.5419806", "0.53748566", "0.53740007", "0.5356892", "0.53367764", "0.5336004", "0.53169066", "0.5254531", "0.5223191", "0.5213461", "0.5131368", "0.5049155", "0.5021278", "0.501118", "0.5002222", "0.4995769", "0.49447748", "0.49444106", "0.4944069", "0.4923183", "0.49162865", "0.48802903", "0.48587385", "0.48505127", "0.48353007", "0.48166987", "0.4807552", "0.47993973", "0.47965363", "0.47958413", "0.47774637", "0.47568113", "0.4753656", "0.47500682", "0.47486344", "0.47467634", "0.4746448", "0.47331113", "0.47289255", "0.47246101", "0.4717201", "0.47165447", "0.46981955", "0.46828863", "0.46770304", "0.46741563", "0.46729308", "0.46728143", "0.46726206", "0.46717092", "0.46584037", "0.46557674", "0.46420932", "0.463094", "0.46263316", "0.46202236", "0.46046346", "0.45912963", "0.4588619", "0.4576704", "0.4575517", "0.4574228", "0.45706606", "0.45659712", "0.45602888", "0.45599082", "0.4554894", "0.45330307", "0.45286593", "0.45269364", "0.4518903", "0.45184237", "0.45148596", "0.4504038", "0.44997323" ]
0.80496615
0
CountCSVRowsGo returns a count of the number of rows in the give csv file
func CountCSVRowsGo(source string) (int, error) { defer un(trace("CountCSVRowsGo")) err := assertValidFilename(source) if err != nil { return 0, err } f, _ := os.Open(source) r := csv.NewReader(bufio.NewReader(f)) rowCount := 0 for { _, err := r.Read() if err == io.EOF { break } rowCount++ } return rowCount, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func CSVFileInfo(f0 string) (size int64, nRec int64) {\n\tfd0, err := os.Open(f0)\n\tdefer fd0.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfi0, err := fd0.Stat()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tbuf0 := bufio.NewReader(fd0)\n\tl0, _, err := buf0.ReadLine()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tsize = fi0.Size()\n\tnRec = size / int64(len(l0))\n\treturn\n}", "func (table *Table) NumberOfRows() (int, int) {\n\tvar numberOfRows int\n\tvar dataFileInfo *os.FileInfo\n\tdataFileInfo, err := table.DataFile.Stat()\n\tif err != nil {\n\t\tlogg.Err(\"table\", \"NumberOfRows\", err.String())\n\t\treturn 0, st.CannotStatTableDataFile\n\t}\n\tnumberOfRows = int(dataFileInfo.Size) / table.RowLength\n\treturn numberOfRows, st.OK\n}", "func rowsInFile(fileName string) (int, error) {\n\tfileReader, err := os.Open(fileName)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer fileReader.Close()\n\treturn lineCounter(fileReader)\n}", "func SQLRowCount(rows *sql.Rows) int {\n\tcount := 0\n\tfor rows.Next() {\n\t\tcount++\n\t}\n\n\tfmt.Println(strconv.Itoa(count))\n\treturn count\n\n}", "func (c *Collection) ImportCSV(buf io.Reader, idCol int, skipHeaderRow bool, overwrite bool, verboseLog bool) (int, error) {\n\tvar (\n\t\tfieldNames []string\n\t\tkey string\n\t\terr error\n\t)\n\tr := csv.NewReader(buf)\n\tr.FieldsPerRecord = -1\n\tr.TrimLeadingSpace = true\n\tlineNo := 0\n\tif skipHeaderRow == true {\n\t\tlineNo++\n\t\tfieldNames, err = r.Read()\n\t\tif err != nil {\n\t\t\treturn lineNo, fmt.Errorf(\"Can't read header csv table at %d, %s\", lineNo, err)\n\t\t}\n\t}\n\tfor {\n\t\tlineNo++\n\t\trow, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn lineNo, fmt.Errorf(\"Can't read row csv table at %d, %s\", lineNo, err)\n\t\t}\n\t\tvar fieldName string\n\t\trecord := map[string]interface{}{}\n\t\tif idCol < 0 {\n\t\t\tkey = fmt.Sprintf(\"%d\", lineNo)\n\t\t}\n\t\tfor i, val := range row {\n\t\t\tif i < len(fieldNames) {\n\t\t\t\tfieldName = fieldNames[i]\n\t\t\t\tif idCol == i {\n\t\t\t\t\tkey = val\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfieldName = fmt.Sprintf(fmtColumnName, i+1)\n\t\t\t}\n\t\t\t//Note: We need to convert the value\n\t\t\tif i, err := strconv.ParseInt(val, 10, 64); err == nil {\n\t\t\t\trecord[fieldName] = i\n\t\t\t} else if f, err := strconv.ParseFloat(val, 64); err == nil {\n\t\t\t\trecord[fieldName] = f\n\t\t\t} else if strings.ToLower(val) == \"true\" {\n\t\t\t\trecord[fieldName] = true\n\t\t\t} else if strings.ToLower(val) == \"false\" {\n\t\t\t\trecord[fieldName] = false\n\t\t\t} else {\n\t\t\t\tval = strings.TrimSpace(val)\n\t\t\t\tif len(val) > 0 {\n\t\t\t\t\trecord[fieldName] = val\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(key) > 0 && len(record) > 0 {\n\t\t\tif c.HasKey(key) {\n\t\t\t\tif overwrite == true {\n\t\t\t\t\terr = c.Update(key, record)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn lineNo, fmt.Errorf(\"can't update %+v to %s, %s\", record, key, err)\n\t\t\t\t\t}\n\t\t\t\t} else if verboseLog {\n\t\t\t\t\tlog.Printf(\"Skipping row %d, key %q, already exists\", lineNo, key)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terr = c.Create(key, record)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn lineNo, fmt.Errorf(\"can't create %+v to %s, %s\", record, key, err)\n\t\t\t\t}\n\t\t\t}\n\t\t} else if verboseLog {\n\t\t\tlog.Printf(\"Skipping row %d, key value missing\", lineNo)\n\t\t}\n\t\tif verboseLog == true && (lineNo%1000) == 0 {\n\t\t\tlog.Printf(\"%d rows processed\", lineNo)\n\t\t}\n\t}\n\treturn lineNo, nil\n}", "func (c *CSVInterpolator) numRows() int64 {\n\tif c.row == 1 {\n\t\treturn rowsNeededToInterpolate - 1\n\t}\n\n\treturn rowsNeededToInterpolate\n}", "func (fw *Writer) NumRows() int { return fw.nrows }", "func LineCount(r io.Reader, skipEmpty bool) int {\n\tif r == nil {\n\t\treturn -1\n\t}\n\n\tsc := bufio.NewScanner(r)\n\tvar i int\n\n\tif skipEmpty {\n\t\tfor sc.Scan() {\n\t\t\tif len(sc.Bytes()) > 0 {\n\t\t\t\ti++\n\t\t\t}\n\t\t}\n\n\t\tif sc.Err() != nil {\n\t\t\treturn -1\n\t\t}\n\n\t\treturn i\n\t}\n\n\tfor i = 0; sc.Scan(); i++ {\n\t}\n\n\treturn i\n}", "func LineCounter(r io.Reader) (int, error) {\n buf := make([]byte, 32*1024)\n count := 0\n lineSep := []byte{'\\n'}\n\n for {\n c, err := r.Read(buf)\n count = count + bytes.Count(buf[:c], lineSep)\n switch {\n case err == io.EOF:\n return count, nil\n case err != nil:\n return count, err\n }\n }\n}", "func (*mySQL) GetRowCount(r RequestAgGrid, rows int) int64 {\n\tif rows == 0 {\n\t\treturn 0\n\t}\n\n\tcurrentLastRow := r.StartRow + int64(rows)\n\n\tif currentLastRow <= r.EndRow {\n\t\treturn currentLastRow\n\t}\n\treturn -1\n}", "func main() {\n\n\tcsvFile, err := os.Open(\"./uids.csv\")\n\tif err != nil {\n\t\tfmt.Println(\"open error...,err=\", err)\n\t\treturn\n\t}\n\tdefer csvFile.Close()\n\tcsvReader := csv.NewReader(csvFile)\n\traws, err := csvReader.ReadAll()\n\tif err != nil {\n\t\tfmt.Println(\"read file error:\", err)\n\t\treturn\n\t}\n\n\tvar count int\n\tfor _, raw := range raws {\n\t\tif ok, _ := regexp.MatchString(reg, raw[0]); !ok {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Println(raw[0])\n\t\tcount++\n\t}\n\n\tfmt.Println(\"count:\", count)\n\n}", "func ImportCSVFile(DB services.Database, filePath, tableName string) {\n\tlog.Printf(\"start ImportCSVFile for filePath %s\", filePath)\n\ttimeStampCurrent := time.Now()\n\tlog.Printf(\"time started: %v\", timeStampCurrent)\n\tmysql.RegisterLocalFile(filePath)\n\tres, err := DB.DatabaseConnection.Exec(\"LOAD DATA LOCAL INFILE '\" + filePath + \"' INTO TABLE \" + tableName + \" FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\\\n'\")\n\tif err != nil {\n\t\tlog.Fatalf(\"importCSVFile load err %v\", err)\n\t}\n\tval, err := res.RowsAffected()\n\tif err != nil {\n\t\tlog.Printf(\"res.RowsAffected err %v\", err)\n\t}\n\tmysql.DeregisterLocalFile(filePath)\n\tlog.Printf(\"LOAD DATA LOCAL INFILE took %s time\", time.Since(timeStampCurrent).String())\n\tlog.Printf(\"Rows affected %d\", val)\n\tlog.Printf(\"finished ImportCSVFile for filePath %s\", filePath)\n\n}", "func (bkr *Broker) RowCount(tableName string) (int, error) {\n\ttableName = strings.TrimSpace(tableName)\n\n\tif len(tableName) == 0 {\n\t\treturn -1, errors.New(\"`tableName` cannot be empty or whitespace\")\n\t}\n\n\trow := bkr.database.QueryRow(fmt.Sprintf(\"SELECT COUNT(*) FROM %s\", tableName))\n\n\tvar count int\n\n\tif err := row.Scan(&count); err != nil {\n\t\treturn -1, fmt.Errorf(\"Failed to count rows for table %s: %w\", tableName, err)\n\t}\n\n\treturn count, nil\n}", "func CountNbLines(filename string) int {\n\treader, file := ReturnReader(filename, 0)\n\tdefer CloseFile(file)\n\n\tnbLines := 0\n\n\ttStart := time.Now()\n\n\tfor reader.Scan() {\n\t\tnbLines++\n\t}\n\n\ttDiff := time.Since(tStart)\n\tfmt.Printf(\"Count nb lines done in time: %f s \\n\", tDiff.Seconds())\n\n\treturn nbLines\n}", "func readCSV(csvFileName string) []Record {\n\tvar records []Record\n\n\tcsvFile, err := os.Open(csvFileName)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer csvFile.Close()\n\n\tr := csv.NewReader(bufio.NewReader(csvFile))\n\tr.Comma = ';'\n\tfor {\n\t\trecord, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\t// fmt.Printf(\"%T: %v\\n\", record, record)\n\t\tcount, err := strconv.Atoi(record[2])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\trecords = append(records, Record{\n\t\t\tDate: record[0],\n\t\t\tTerm: record[1],\n\t\t\tCount: count,\n\t\t})\n\t}\n\n\treturn records\n}", "func CountLines(path string) (int, error) {\n\tvar lines int\n\tfile, err := os.Open(path)\n\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tlines++\n\t}\n\treturn lines, err\n}", "func (n *Neo4j) StreamCSVRows(ctx context.Context, instanceID, filterID string, filters *observation.DimensionFilters, limit *int) (observation.StreamRowReader, error) {\n\n\theaderRowQuery := fmt.Sprintf(\"MATCH (i:`_%s_Instance`) RETURN i.header as row\", instanceID)\n\n\tunionQuery := headerRowQuery + \" UNION ALL \" + createObservationQuery(ctx, instanceID, filterID, filters)\n\n\tif limit != nil {\n\t\tlimitAsString := strconv.Itoa(*limit)\n\t\tunionQuery += \" LIMIT \" + limitAsString\n\t}\n\n\tlog.Info(ctx, \"neo4j query\", log.Data{\n\t\t\"filterID\": filterID,\n\t\t\"instanceID\": instanceID,\n\t\t\"query\": unionQuery,\n\t})\n\n\treturn n.StreamRows(unionQuery)\n}", "func (d *AddressCacheItem) NumRows() (int, *BlockID) {\n\td.mtx.RLock()\n\tdefer d.mtx.RUnlock()\n\tif d.rows == nil {\n\t\treturn -1, nil\n\t}\n\treturn len(d.rows), d.blockID()\n}", "func CountCampaignRows(ctx context.Context, db SQLHandle, cond CampaignValues) (count int, err error) {\n\tif _, err = queryWithJSONArgs(ctx, db, func(int) []interface{} { return []interface{}{&count} }, SQLCountCampaignRows, cond); err != nil {\n\t\treturn 0, formatError(\"CountCampaignRows\", err)\n\t}\n\treturn count, nil\n}", "func CountCampaignRows(ctx context.Context, db SQLHandle, cond CampaignValues) (count int, err error) {\n\tif _, err = queryWithJSONArgs(ctx, db, func(int) []interface{} { return []interface{}{&count} }, SQLCountCampaignRows, cond); err != nil {\n\t\treturn 0, formatError(\"CountCampaignRows\", err)\n\t}\n\treturn count, nil\n}", "func lineCounter(r io.Reader) (int, error) {\n\tbuf := make([]byte, 32*1024)\n\tcount := 0\n\tlineSep := []byte{'\\n'}\n\n\tfor {\n\t\tc, err := r.Read(buf)\n\t\tcount += bytes.Count(buf[:c], lineSep)\n\n\t\tswitch {\n\t\tcase err == io.EOF:\n\t\t\treturn count, nil\n\n\t\tcase err != nil:\n\t\t\treturn count, err\n\t\t}\n\t}\n}", "func lineCounter(r io.Reader) (int, error) {\n\tbuf := make([]byte, 32*1024)\n\tcount := 0\n\tlineSep := []byte{'\\n'}\n\n\tfor {\n\t\tc, err := r.Read(buf)\n\t\tcount += bytes.Count(buf[:c], lineSep)\n\n\t\tswitch {\n\t\tcase err == io.EOF:\n\t\t\treturn count, nil\n\n\t\tcase err != nil:\n\t\t\treturn count, err\n\t\t}\n\t}\n}", "func CountOfLines(inFile io.Reader) (int) {\n \n var err error\n reader := bufio.NewReader(inFile)\n i := 0\n eof := false\n \n for ;!eof ; i++{\n var line string\n line, err = reader.ReadString('\\n')\n fmt.Println(line)\n if err != nil {\n if err == io.EOF {\n break\n }\n log.Fatal(\"Failed to finish reading the file: \", err)\n }\n }\n return i\n}", "func (ac *AddressCache) NumRows(addr string) (int, *BlockID) {\n\taci := ac.addressCacheItem(addr)\n\tif aci == nil {\n\t\treturn -1, nil\n\t}\n\treturn aci.NumRows()\n}", "func LineCount(fileName string) (int, error) {\n\tbuf := make([]byte, 32*1024)\n\tcount := 0\n\tlineSep := []byte{'\\n'}\n\n\tfile, err := os.Open(fileName)\n\tdefer file.Close()\n\tif err != nil {\n\t\treturn count, err\n\t}\n\n\tfor {\n\t\tc, err := file.Read(buf)\n\t\tcount += bytes.Count(buf[:c], lineSep)\n\n\t\tswitch {\n\t\tcase err == io.EOF:\n\t\t\tif count <= 0 {\n\t\t\t\treturn 1, errors.New(\"no newline found\")\n\t\t\t} else {\n\t\t\t\treturn count, nil\n\t\t\t}\n\n\t\tcase err != nil:\n\t\t\treturn count, err\n\t\t}\n\t}\n}", "func loadOneSiteCSVRow(csvHeadersIndex map[string]int, data []string) (bool, CSVRow) {\n\tcsvRow := reflect.New(reflect.TypeOf(CSVRow{}))\n\trowLoaded := false\n\n\tfor header, index := range csvHeadersIndex {\n\t\tvalue := strings.TrimSpace(data[index])\n\t\tcsvRow.Elem().FieldByName(header).Set(reflect.ValueOf(value))\n\t}\n\n\t// if blank data has not been passed then only need to return true\n\tif (CSVRow{}) != csvRow.Elem().Interface().(CSVRow) {\n\t\trowLoaded = true\n\t}\n\n\treturn rowLoaded, csvRow.Elem().Interface().(CSVRow)\n}", "func streamCsv(csv *csv.Reader, buffer int) (lines chan *CsvLine) {\n lines = make(chan *CsvLine, buffer)\n\n go func(){\n // get Header\n header, err := csv.Read()\n if err != nil {\n close(lines)\n return\n }\n\n i := 0\n\n for {\n line, err := csv.Read()\n\n if len(line) > 0 {\n i++\n lines <- &CsvLine{Header: header, Line: line}\n }\n\n if err != nil {\n fmt.Printf(\"Sent %d lines\\n\",i)\n close(lines)\n return\n }\n }\n }()\n\n return\n}", "func readCSVFile() {\n\tfile, err := os.Open(config.Config.IngestFolderLocation + \"/output_1.csv\")\n\tif err != nil {\n\t\tlog.Fatalf(\"os.Open() Error: %v\", err)\n\t}\n\n\tfor records := range processCSV(file) {\n\t\tlog.Print(records)\n\t}\n\n}", "func CountPackageRows(ctx context.Context, db SQLHandle, cond PackageValues) (count int, err error) {\n\tif _, err = queryWithJSONArgs(ctx, db, func(int) []interface{} { return []interface{}{&count} }, SQLCountPackageRows, cond); err != nil {\n\t\treturn 0, formatError(\"CountPackageRows\", err)\n\t}\n\treturn count, nil\n}", "func parseCSV(csvFile multipart.File, filename string, column int) (data string, err error) {\n\tvar totalRows int = 0\n\tvar distributionMap = make(map[int]int, 9)\n\n\tr := csvd.NewReader(csvFile)\n\n\tfor {\n\t\trecord, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\t// Out of range error, i.e. that column is not in use.\n\t\tif len(record) <= column {\n\t\t\tbreak\n\t\t}\n\t\t// Skip over any empty records.\n\t\tif record[column] == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tfirstDigit := retrieveFirstDigit(record[column])\n\t\tif firstDigit != 0 {\n\t\t\tdistributionMap[firstDigit]++\n\t\t\ttotalRows++\n\t\t}\n\t}\n\n\tsortedKeys := sortMap(distributionMap)\n\n\tpayload := &Payload{}\n\tpayload.Filename = filename\n\n\tfor _, digit := range sortedKeys {\n\t\tvar count int = distributionMap[digit]\n\t\tvar percent float64 = calculatePercent(count, totalRows)\n\n\t\tif digit != 0 {\n\t\t\tvalues := Digit{Value: digit, Count: count, Percent: percent}\n\t\t\tpayload.AddItem(values)\n\t\t}\n\t\tif digit == 1 {\n\t\t\tpayload.BenfordValidation = benfordValidator(percent)\n\t\t}\n\t}\n\n\toutput, err := json.Marshal(payload)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdata = string(output)\n\treturn\n}", "func (t *Table) RowCount() int {\r\n\r\n\treturn len(t.rows)\r\n}", "func parseCSV(s string, tw timeWindow) []csvLine {\n\t// open CSV file\n\tfile, err := os.Open(s)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"There was a problem opening the file! :. %s\", err))\n\t}\n\t// make sure we eventually close the CSV file\n\tdefer func() {\n\t\tif err = file.Close(); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"There was a problem closing the file! :. %s\", err))\n\t\t}\n\t}()\n\t// create a reader for the file\n\treader := csv.NewReader(file)\n\tvar content []csvLine\n\n\t// if the read line is in the specified time window put it into the content slice\n\tfor {\n\t\tEOF := readCSVLine(&content, reader, tw)\n\t\tif EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn content\n}", "func (f *FieldValues) NumRow() int {\n\treturn f.rows\n}", "func (t *Table) RowCount() int {\n\treturn len(t.Row)\n}", "func (h *HandlersApp01sqVendor) TableLoadCSV(w http.ResponseWriter, r *http.Request) {\n\tvar err error\n\tvar rcd App01sqVendor.App01sqVendor\n\tvar fileIn multipart.File\n\tvar cnt int\n\tvar maxMem int64\n\tvar handler *multipart.FileHeader\n\n\tlog.Printf(\"hndlrVendor.TableLoadCSV(%s)\\n\", r.Method)\n\tif r.Method != \"POST\" {\n\t\thttp.Error(w, http.StatusText(405), http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\t// ParseMultipartForm parses a request body as multipart/form-data.\n\t// The whole request body is parsed and up to a total of maxMemory\n\t// bytes of its file parts are stored in memory, with the remainder\n\t// stored on disk in temporary files. ParseMultipartForm calls ParseForm\n\t// if necessary. After one call to ParseMultipartForm, subsequent\n\t// calls have no effect.\n\tname := \"csvFile\" // Must match Name parameter of Form's \"<input type=file name=???>\"\n\tmaxMem = 64 << 20 // 64mb\n\tr.ParseMultipartForm(maxMem)\n\n\t// FormFile returns the first file for the given key which was\n\t// specified on the Form Input Type=file Name parameter.\n\t// it also returns the FileHeader so we can get the Filename,\n\t// the Header and the size of the file\n\tfileIn, handler, err = r.FormFile(name)\n\tif err != nil {\n\t\tlog.Printf(\"...end hndlrVendor.TableLoadCSV(Error:500) - %s\\n\", util.ErrorString(err))\n\t\thttp.Error(w, http.StatusText(500), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer fileIn.Close() //close the file when we finish\n\tlog.Printf(\"\\tUploaded File: %+v\\n\", handler.Filename)\n\tlog.Printf(\"\\tFile Size: %+v\\n\", handler.Size)\n\tlog.Printf(\"\\tMIME Header: %+v\\n\", handler.Header)\n\trdr := csv.NewReader(fileIn)\n\n\t// Create the table.\n\terr = h.db.TableCreate()\n\tif err != nil {\n\t\tw.Write([]byte(\"Table creation had an error of:\" + util.ErrorString(err)))\n\t}\n\n\tlog.Printf(\"\\tLoading data...\\n\")\n\tfor {\n\t\trecord, err := rdr.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tstr := fmt.Sprintf(\"ERROR: Reading row %d from csv - %s\\n\", cnt, util.ErrorString(err))\n\t\t\tw.Write([]byte(str))\n\t\t\treturn\n\t\t}\n\n\t\trcd.Id, _ = strconv.ParseInt(record[0], 0, 64)\n\n\t\trcd.Name = record[1]\n\n\t\trcd.Addr1 = record[2]\n\n\t\trcd.Addr2 = record[3]\n\n\t\trcd.City = record[4]\n\n\t\trcd.State = record[5]\n\n\t\trcd.Zip = record[6]\n\n\t\trcd.Curbal = record[7]\n\n\t\terr = h.db.RowInsert(&rcd)\n\t\tif err != nil {\n\t\t\tstr := fmt.Sprintf(\"ERROR: Table creation had an error of: %s\\n\", util.ErrorString(err))\n\t\t\tw.Write([]byte(str))\n\t\t\treturn\n\t\t}\n\t\tcnt++\n\t\tlog.Printf(\"\\t...Added row %d\\n\", cnt)\n\t}\n\tfor i := 1; i > 0; i-- {\n\t\tstr := fmt.Sprintf(\"Added %d rows\\n\", cnt)\n\t\tw.Write([]byte(str))\n\t}\n\n\tlog.Printf(\"...end hndlrVendor.TableLoadCSV(ok) - %d\\n\", cnt)\n\n}", "func (p *partitionImpl) GetNumRows() int {\n\treturn int(p.internalData.NumRows)\n}", "func parseCsv(file string) ([][]string, error) {\n\tf, err := os.Open(file)\n\tdefer f.Close()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlines, err := csv.NewReader(f).ReadAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn lines, nil\n}", "func (d *Dataset) LoadCSV(filename string) bool {\n\tif data, err := godatamining.FromCSV(filename); err == nil {\n\t\ttmp := (*data)[1:]\n\t\td.Data = &tmp\n\t\td.Size = uint16(len(*data) - 1)\n\t\treturn true\n\t}\n\treturn false\n}", "func (f *File) LineCount() int {\n\tf.mutex.RLock()\n\tn := len(f.lines)\n\tf.mutex.RUnlock()\n\treturn n\n}", "func processCSV(rc io.Reader) (ch chan []string) {\n\tch = make(chan []string, 10)\n\tgo func() {\n\t\tr := csv.NewReader(rc)\n\t\tif _, err := r.Read(); err != nil { //read header\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdefer close(ch)\n\t\tfor {\n\t\t\trec, err := r.Read()\n\t\t\tif err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tlog.Fatal(err)\n\n\t\t\t}\n\t\t\tch <- rec\n\t\t}\n\t}()\n\treturn\n}", "func (sheet *SheetData) NumberRows() int {\n\treturn len(sheet.Values)\n\n}", "func lineCounter(r io.Reader) (int, error) {\n\tbuf := make([]byte, 32*1024)\n\tcount := 0\n\tlineSep := []byte{'\\n'}\n\tvar lastByte byte\n\tlastByte = '\\n'\n\n\tfor {\n\t\tc, err := r.Read(buf)\n\t\tif c > 0 {\n\t\t\tlastByte = buf[c-1]\n\t\t}\n\t\tcount += bytes.Count(buf[:c], lineSep)\n\n\t\tswitch {\n\t\tcase err == io.EOF:\n\t\t\tif lastByte != '\\n' {\n\t\t\t\tlog.Warn(fmt.Sprintf(\"Last byte in buffer is '%v'\", lastByte))\n\t\t\t\tcount += 1\n\t\t\t}\n\t\t\treturn count, nil\n\n\t\tcase err != nil:\n\t\t\treturn count, err\n\t\t}\n\t}\n}", "func ImportCSVFiles(DB services.Database, folderPath string, tableName string) {\n\tlog.Printf(\"start ImportCSVFile for folderPath %s\", folderPath)\n\ttimeStampCurrent := time.Now()\n\tlog.Printf(\"time started: %v\", timeStampCurrent)\n\tfileList := fetchFilesFromFolder(folderPath)\n\tfor _, fs := range fileList {\n\t\tabsolutepath := config.Config.IngestFolderLocation + \"/\" + fs.Name()\n\t\tlog.Printf(\"importing file : %s\", absolutepath)\n\t\ttimeStampStartImportForSingleFile := time.Now()\n\t\tImportCSVFile(DB, absolutepath, tableName)\n\t\tlog.Printf(\"ImportCSVFile() took %s time\", time.Since(timeStampStartImportForSingleFile).String())\n\n\t}\n\tlog.Printf(\"finished ImportCSVFile() took %s time\", time.Since(timeStampCurrent).String())\n}", "func (s *TransactionRows) Count() int {\n\t// return s.iter.\n\treturn 0\n}", "func ParseIntCsv() (result []int, err error) {\n\trecord, err := ParseCsvInput()\n\tif err != nil {\n\t\treturn\n\t}\n\tresult, err = MapAtoi(record)\n\treturn\n}", "func readCsvFile(csvIn io.Reader) ([][]string, error) {\n\n\tcsvReader := csv.NewReader(csvIn)\n\tresult, err := csvReader.ReadAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result, nil\n}", "func (s *Service) UploadCSV() http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\treader := csv.NewReader(r.Body)\n\n\t\tlines, err := reader.ReadAll()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\trecords := make([]User, 0)\n\n\t\tfor i, line := range lines {\n\t\t\tfmt.Println(\"line: \", line)\n\t\t\t// skip header\n\t\t\tif i == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tzipCode, err := strconv.Atoi(line[3])\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\trecord := User{\n\t\t\t\tEmail: line[0],\n\t\t\t\tFirstName: line[1],\n\t\t\t\tLastName: line[2],\n\t\t\t\tZipCode: zipCode,\n\t\t\t}\n\n\t\t\trecords = append(records, record)\n\t\t}\n\n\t\t// todo: dbService.InsertRecords(records)\n\t})\n}", "func writeCSV(w *Writer, dataChan chan []sql.RawBytes, goChan chan bool, verbose bool) uint {\n\tvar rowsWritten uint\n\tvar verboseCount uint\n\n\tif verbose {\n\t\tfmt.Println(\"A '.' will be shown for every 10,000 CSV rows written\")\n\t}\n\n\t// Range over row results from readRows()\n\tfor data := range dataChan {\n\t\t// Format the data to CSV and write\n\t\tsize, err := w.Write(data)\n\t\tcheckErr(err)\n\n\t\t// Visual write indicator when verbose is enabled\n\t\trowsWritten++\n\t\tif verbose {\n\t\t\tverboseCount++\n\t\t\tif verboseCount == 10000 {\n\t\t\t\tfmt.Printf(\".\")\n\t\t\t\tverboseCount = 0\n\t\t\t}\n\t\t}\n\n\t\t// Flush CSV writer contents once it exceeds flushBufferSize\n\t\tif size > flushBufferSize {\n\t\t\tw.Flush()\n\t\t\terr = w.Error()\n\t\t\tcheckErr(err)\n\t\t}\n\n\t\t// Signal back to readRows() it can loop and scan the next row\n\t\tgoChan <- true\n\t}\n\n\t// Flush remaining CSV writer contents\n\tw.Flush()\n\terr := w.Error()\n\tcheckErr(err)\n\n\treturn rowsWritten\n}", "func ProcessCount(chunks []*bytes.Buffer, position uint64, condition string) (result int64) {\n\n\trows := bytes.Count(chunks[0].Bytes(), []byte{0x0a}) * 2\n\n\trowIndices := make([]uint32, rows)\n\tmaskCommas := make([]uint64, ((len(chunks[0].Bytes()) + 63) >> 6))\n\tcolIndices := make([]uint32, rows)\n\tequal := make([]uint64, (rows+63)>>6)\n\n\tfor _, chunk := range chunks {\n\t\tr := ParseCsvAdjusted(chunk.Bytes(), rowIndices, maskCommas)\n\t\tExtractIndexForColumn(maskCommas, rowIndices[:r], colIndices[:r], position)\n\t\tEvaluateCompareString(chunk.Bytes(), colIndices[:r], condition, equal[:(r+63)>>6])\n\t\tresult += int64(count64(equal[:(r+63)>>6]))\n\t}\n\treturn\n}", "func ParseCSV(filename string) (*WhoisResults, error) {\n\tentries, err := dry.FileGetCSV(filename, time.Second*5)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp := pool.NewLimited(10)\n\tbatch := p.Batch()\n\n\tgo func() {\n\t\tfor pos, entry := range entries {\n\t\t\tbatch.Queue(asyncWhoisRecord(pos, &WhoisRecordRequest{\n\t\t\t\tentry[0],\n\t\t\t\tentry[1],\n\t\t\t}))\n\t\t}\n\t\tbatch.QueueComplete()\n\t}()\n\treturn handleBatchResults(batch)\n}", "func importCSV(name string, courseName string) error {\n\n\tdb, err := sql.Open(\"mysql\", DB_USER_NAME+\":\"+DB_PASSWORD+\"@unix(/var/run/mysql/mysql.sock)/\"+DB_NAME)\n\tif err != nil {\n\t\treturn errors.New(\"No connection\")\n\t}\n\n\tmysql.RegisterLocalFile(name)\n\n\t// TODO : Solve password @dummy issue, also CSV quotation issue, trailing comma issue\n\tres, err := db.Exec(\"LOAD DATA LOCAL INFILE '\" + name + \"' INTO TABLE Users FIELDS TERMINATED BY ',' ENCLOSED BY '\\\"' LINES TERMINATED BY '\\n' IGNORE 1 LINES (@dummy, FirstName, MiddleInitial, LastName, UserName, Password, @dummy, @dummy, @dummy, @dummy, @dummy)\")\n\n\tif err != nil {\n\t\treturn errors.New(\"Import failed.\")\n\t}\n\n\trowsAffected, err := res.RowsAffected()\n\n\trows, err := db.Query(\"select UserID from Users order by UserID DESC Limit ?\", rowsAffected)\n\n\tif err != nil {\n\t\treturn errors.New(\"Query error.\")\n\t}\n\n\tfor i := 0; ; i++ {\n\n\t\tvar userID int\n\n\t\tif rows.Next() == false {\n\t\t\tbreak\n\t\t}\n\n\t\trows.Scan(&userID)\n\n\t\t_, err = db.Exec(\"INSERT INTO StudentCourses(Student, CourseName) VALUES (?, ?)\", userID, courseName)\n\n\t\tif err != nil {\n\t\t\treturn errors.New(\"User unable to be added to student courses.\")\n\t\t}\n\n\t\tuserName, _ := getUserName(userID)\n\t\tsendRandomPassword(userName)\n\t}\n\n\treturn nil\n}", "func countFileLine(fileName string) int {\n\tfile, err := os.Open(fileName)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer file.Close()\n\tvar lineCount int\n\treader := bufio.NewReader(file)\n\tfor {\n\t\t_, isPrefix, err := reader.ReadLine()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif !isPrefix {\n\t\t\tlineCount++\n\t\t}\n\t}\n\treturn lineCount\n}", "func readCSVFile(csvFile *string) [][]string {\n\tfile, err := os.Open(*csvFile)\n\tif err != nil {\n\t\texit(fmt.Sprintf(\"Failed to open %s\\n\", *csvFile))\n\t}\n\n\tr := csv.NewReader(file)\n\n\tlines, err := r.ReadAll()\n\tif err != nil {\n\t\texit(\"Failed to parse provided CSV file.\")\n\t}\n\n\treturn lines\n}", "func ReadCsvFile(filename string) (SimpleCsv, bool) {\n\tok := true\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\tok = false\n\t}\n\tdefer file.Close()\n\treader := csv.NewReader(file)\n\treader.Comma = ','\n\t// lineCount := 0\n\n\tallRecords, err := reader.ReadAll()\n\tif err != nil {\n\t\tok = false\n\t}\n\treturn allRecords, ok\n}", "func countFromSheet(cel, row, totCells, header bool, file string) int {\n\tvar celNumber int\n\tvar rowNumber int\n\tvar totalCells int\n\tvar value int\n\n\txlFile, err := xlsx.OpenFile(file)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tfor _, sheet := range xlFile.Sheets {\n\t\tfor r, row := range sheet.Rows {\n\t\t\trowNumber = r + 1\n\t\t\tfor c, _ := range row.Cells {\n\t\t\t\tcelNumber = c + 1\n\t\t\t}\n\t\t}\n\t}\n\n\tif header {\n\t\ttotalCells = rowNumber * celNumber\n\t} else {\n\t\trowNumber = rowNumber - 1\n\t\ttotalCells = rowNumber * celNumber\n\t}\n\n\tif cel {\n\t\tvalue = celNumber\n\t} else if row {\n\t\tvalue = rowNumber\n\t} else if totCells {\n\t\tvalue = totalCells\n\t}\n\treturn value\n}", "func LineCount(buf string) int {\n\treturn bytes.Count([]byte(buf), []byte{'\\n'})\n}", "func getLinesFromCSV(filePath string) (lines [][]string, err error) {\n\t// open file\n\tfile, err := os.Open(filePath)\n\tcheck(err)\n\tdefer file.Close() // defer closing the file until function returns\n\n\t// create CSV Reader from file\n\treader := csv.NewReader(file)\n\treturn reader.ReadAll()\n}", "func ReadCSV(r io.Reader, f func(record []string) error) error {\n\tstream := readCSVLines(r)\n\tfor {\n\t\tselect {\n\t\tcase <-DieChannel:\n\t\t\treturn nil\n\t\tdefault:\n\t\t}\n\n\t\tselect {\n\t\tcase x, ok := <-stream:\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t} else if x.err != nil {\n\t\t\t\treturn x.err\n\t\t\t}\n\t\t\tif err := f(x.record); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase <-DieChannel:\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func lines(path string) int {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// defer keyword - to ensure operations always happen\n\tdefer f.Close()\n\n\tsc := bufio.NewScanner(f)\n\tvar lines int\n\tfor sc.Scan() {\n\t\tlines++\n\t}\n\tif err := sc.Err(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn lines\n}", "func (client *HTTPClient) StreamCSV(url string, handler func(row []string) error, opts CSVStreamOptions) error {\n\tresp, err := client.Get(url, opts.RequestOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tdefer io.Copy(io.Discard, resp.Body) //nolint:errcheck\n\tvar reader io.Reader = resp.Body\n\tif opts.Decoder != nil {\n\t\treader = transform.NewReader(resp.Body, opts.Decoder)\n\t}\n\tcsvReader := csv.NewReader(reader)\n\tcsvReader.ReuseRecord = true\n\tcsvReader.FieldsPerRecord = opts.NumColumns\n\tif opts.Comma != 0 {\n\t\tcsvReader.Comma = opts.Comma\n\t}\n\tskippedHeader := opts.HeaderHeight\n\tvar row []string\n\tfor {\n\t\trow, err = csvReader.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if e, ok := err.(*csv.ParseError); ok && e.Err == csv.ErrFieldCount {\n\t\t\tskippedHeader--\n\t\t\tcontinue\n\t\t} else if err != nil {\n\t\t\treturn WrapErr(err, \"csv stream error\")\n\t\t}\n\t\tif skippedHeader > 0 {\n\t\t\tskippedHeader--\n\t\t\tcontinue\n\t\t}\n\t\tif opts.NumColumns != 0 && len(row) != opts.NumColumns {\n\t\t\treturn NewErr(fmt.Errorf(\"unexpected csv row with %d columns insteas of %d\", len(row), opts.NumColumns)).With(\"row\", row)\n\t\t}\n\t\tif err = handler(row); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func numberOfLines(input []byte) int {\n\tvar count int\n\tfor _, v := range input {\n\t\tif v == '\\n' {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}", "func (s *ServicesWidget) RowCount() int {\n\treturn len(s.filteredRows)\n}", "func Count(sql string, args ...interface{}) int64 {\n\tvar total int64\n\terr := QueryRow(sql, args...).Scan(&total)\n\tif err != nil {\n\t\tfmt.Errorf(\"%v\", err)\n\t\treturn 0\n\t}\n\treturn total\n}", "func (togglCSVImporter *TogglCSVImporter) Import(input io.Reader) error {\n\n\t// read the CSV data\n\tcsvReader := csv.NewReader(input)\n\trows, csvError := csvReader.ReadAll()\n\tif csvError != nil {\n\t\treturn fmt.Errorf(\"Failed to read time records from CSV: %s\", csvError.Error())\n\t}\n\n\ttimeRecords, timeRecordsError := togglCSVImporter.csvMapper.GetTimeRecords(rows)\n\tif timeRecordsError != nil {\n\t\treturn timeRecordsError\n\t}\n\n\t// abort if no time records were returned\n\tif len(timeRecords) == 0 {\n\t\treturn nil\n\t}\n\n\t// upload the time entries to toggl\n\tprogressbar := pb.New(len(timeRecords))\n\tprogressbar.ShowTimeLeft = true\n\tif togglCSVImporter.output != nil {\n\t\tprogressbar.Output = togglCSVImporter.output\n\t\tprogressbar.Start()\n\t}\n\n\t// create the records\n\tfor recordIndex, record := range timeRecords {\n\n\t\tif err := togglCSVImporter.timeRecordRepository.CreateTimeRecord(record); err != nil {\n\t\t\treturn errors.Wrap(err, fmt.Sprintf(\"Failed to create time record %d of %d\", recordIndex+1, len(timeRecords)))\n\t\t}\n\n\t\tif togglCSVImporter.output != nil {\n\t\t\tprogressbar.Increment()\n\t\t}\n\n\t}\n\n\tif togglCSVImporter.output != nil {\n\t\t// FinishPrint writes to os.Stdout\n\t\t// see:\n\t\t// https://github.com/cheggaaa/pb/issues/87\n\t\t// https://github.com/cheggaaa/pb/commit/7f4253899ba18226b3c52aca004d298182360edc#commitcomment-18923803\n\t\t// progressbar.FinishPrint(\"Import complete.\")\n\t\tprogressbar.Finish()\n\t}\n\n\treturn nil\n}", "func (mock *IGraphMock) StreamCSVRowsCalls() []struct {\n\tCtx context.Context\n\tInstanceID string\n\tFilterID string\n\tFilters *observation.DimensionFilters\n\tLimit *int\n} {\n\tvar calls []struct {\n\t\tCtx context.Context\n\t\tInstanceID string\n\t\tFilterID string\n\t\tFilters *observation.DimensionFilters\n\t\tLimit *int\n\t}\n\tmock.lockStreamCSVRows.RLock()\n\tcalls = mock.calls.StreamCSVRows\n\tmock.lockStreamCSVRows.RUnlock()\n\treturn calls\n}", "func LineCounter(r io.Reader) (int64, error) {\n\tvar newLineChr byte\n\tvar readSizeTmp int\n\tvar readSize int64\n\tvar err error\n\tvar count int64\n\tbuf := make([]byte, 1024)\n\tnewLineChr = '\\n'\n\n\tfor {\n\t\treadSizeTmp, err = r.Read(buf)\n\t\treadSize = int64(readSizeTmp)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tvar buffPosition int64\n\t\tfor {\n\t\t\ti := int64(bytes.IndexByte(buf[buffPosition:], newLineChr))\n\t\t\tif i == -1 || readSize == buffPosition {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tbuffPosition += i + 1\n\t\t\tcount++\n\t\t}\n\t}\n\tif readSize > 0 && count == 0 || count > 0 {\n\t\tcount++\n\t}\n\tif err == io.EOF {\n\t\treturn count - 1, nil\n\t}\n\n\treturn count - 1, err\n}", "func (c ColDate) Rows() int {\n\treturn len(c)\n}", "func (c *Conn) ImportCSV(in io.Reader, ic ImportConfig, dbName, table string) error {\n\tcolumns, err := c.Columns(dbName, table)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr := yacr.NewReader(in, ic.Separator, ic.Quoted, ic.Guess)\n\tr.Trim = ic.Trim\n\tr.Comment = ic.Comment\n\tnCol := len(columns)\n\tif nCol == 0 { // table does not exist, let's create it\n\t\tvar sql string\n\t\tif len(dbName) == 0 {\n\t\t\tsql = fmt.Sprintf(`CREATE TABLE \"%s\" `, escapeQuote(table))\n\t\t} else {\n\t\t\tsql = fmt.Sprintf(`CREATE TABLE %s.\"%s\" `, doubleQuote(dbName), escapeQuote(table))\n\t\t}\n\t\tsep := '('\n\t\t// TODO if headers flag is false...\n\t\tfor i := 0; r.Scan(); i++ {\n\t\t\tif i == 0 && r.EndOfRecord() && len(r.Bytes()) == 0 { // empty line\n\t\t\t\ti = -1\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsql += fmt.Sprintf(\"%c\\n \\\"%s\\\" %s\", sep, r.Text(), ic.getType(i))\n\t\t\tsep = ','\n\t\t\tnCol++\n\t\t\tif r.EndOfRecord() {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif err = r.Err(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif sep == '(' {\n\t\t\treturn errors.New(\"empty file/input\")\n\t\t}\n\t\tsql += \"\\n)\"\n\t\tif err = c.FastExec(sql); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if ic.Headers { // skip headers line\n\t\tfor r.Scan() {\n\t\t\tif r.EndOfRecord() {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif err = r.Err(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar sql string\n\tif len(dbName) == 0 {\n\t\tsql = fmt.Sprintf(`INSERT INTO \"%s\" VALUES (?%s)`, escapeQuote(table), strings.Repeat(\", ?\", nCol-1))\n\t} else {\n\t\tsql = fmt.Sprintf(`INSERT INTO %s.\"%s\" VALUES (?%s)`, doubleQuote(dbName), escapeQuote(table), strings.Repeat(\", ?\", nCol-1))\n\t}\n\ts, err := c.prepare(sql)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer s.Finalize()\n\tac := c.GetAutocommit()\n\tif ac {\n\t\tif err = c.Begin(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tdefer func() {\n\t\tif err != nil && ac {\n\t\t\t_ = c.Rollback()\n\t\t}\n\t}()\n\tstartLine := r.LineNumber()\n\tfor i := 1; r.Scan(); i++ {\n\t\tif i == 1 && r.EndOfRecord() && len(r.Bytes()) == 0 { // empty line\n\t\t\ti = 0\n\t\t\tstartLine = r.LineNumber()\n\t\t\tcontinue\n\t\t}\n\t\tif i <= nCol {\n\t\t\tif err = s.BindByIndex(i, r.Text()); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif r.EndOfRecord() {\n\t\t\tif i < nCol {\n\t\t\t\tif ic.Log != nil {\n\t\t\t\t\t_, _ = fmt.Fprintf(ic.Log, \"%s:%d: expected %d columns but found %d - filling the rest with NULL\\n\", ic.Name, startLine, nCol, i)\n\t\t\t\t}\n\t\t\t\tfor ; i <= nCol; i++ {\n\t\t\t\t\tif err = s.BindByIndex(i, nil); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if i > nCol && ic.Log != nil {\n\t\t\t\t_, _ = fmt.Fprintf(ic.Log, \"%s:%d: expected %d columns but found %d - extras ignored\\n\", ic.Name, startLine, nCol, i)\n\t\t\t}\n\t\t\tif _, err = s.Next(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ti = 0\n\t\t\tstartLine = r.LineNumber()\n\t\t}\n\t}\n\tif err = r.Err(); err != nil {\n\t\treturn err\n\t}\n\tif ac {\n\t\tif err = c.Commit(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (m *minioStorage) Count(ctx context.Context, params FileFilterParams) (int64, error) {\n\treturn -1, nil\n}", "func (this *TableCol) LineCount(maxWidth ...int) int {\n\tif len(maxWidth) == 0 || maxWidth[0] == 0 {\n\t\treturn this.lineCount\n\t}\n\treturn strings.Count(this.Content(maxWidth[0]), \"\\n\") + 1\n}", "func countLines(f *os.File, counts map[string]int) {\n\tinput := bufio.NewScanner(f)\n\tfor input.Scan() {\n\t\tcounts[input.Text()]++\n\t}\n\n\tfor line, count := range counts {\n\t\tif count > 1 {\n\t\t\tfmt.Printf(\"%d \\t %s \\n\", count, line)\n\t\t}\n\t}\n}", "func validateCsvInput(csvIn [][]string) error {\n\tif len(csvIn) <= 1 {\n\t\treturn errors.New(`There must be at least 2 lines in the csv file - \n\t\t\t\t1 for headers and at least 1 row of data`)\n\t}\n\trow1Len := len(csvIn[0])\n\t// all should have same number of columns\n\tfor i, v := range csvIn {\n\t\tif len(v) != row1Len {\n\t\t\treturn errors.New(fmt.Sprintf(\"Discrepancy in row length for row %d. Expected %d but was %d\",\n\t\t\t\ti, row1Len, len(v)))\n\t\t}\n\t}\n\treturn nil\n}", "func getFileCount(dir string) int {\n\tfiles, err := ioutil.ReadDir(tempdir)\n\tif err != nil {\n\t\tlog.Printf(\"getFileCount error: %v\", err)\n\t}\n\treturn len(files)\n}", "func ImportLogsFromCSV(fileName string) (DataCollection, error) {\n\tfile, err := os.Open(fileName)\n\tif err != nil {\n\t\treturn DataCollection{}, fmt.Errorf(ImportCSVError, err.Error())\n\t}\n\tdefer file.Close()\n\n\tdata := DataCollection{}\n\tvar skipHeader bool\n\tr := csv.NewReader(file)\n\tfor {\n\t\trecord, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn DataCollection{}, err\n\t\t}\n\t\tif !skipHeader {\n\t\t\tskipHeader = true\n\t\t\tcontinue\n\t\t}\n\n\t\t// CSV is missing Data\n\t\tif len(record) < 3 {\n\t\t\treturn DataCollection{}, fmt.Errorf(ImportCSVError, \"Expected at least 3 rows\")\n\t\t}\n\n\t\t// Parsing the endTime\n\t\tendTime, err := ParseDate(record[1])\n\t\tif err != nil {\n\t\t\treturn DataCollection{}, err\n\t\t}\n\n\t\ttimeTaken, err := strconv.Atoi(record[2])\n\t\tif err != nil {\n\t\t\treturn DataCollection{}, err\n\t\t}\n\n\t\tstartTime := endTime.Add(time.Duration(time.Duration(-timeTaken) * time.Millisecond))\n\t\tdata = append(data, Data{record[0], startTime, endTime, timeTaken, record[1]})\n\t}\n\n\tsort.Sort(data)\n\treturn data, nil\n}", "func TestCountInputFile(t *testing.T) {\n\t_o = opts{\n\t\t\"./final.txt\",\n\t\tfalse,\n\t}\n\tgot := valueByInput(_o.filePath)\n\twant := 484\n\n\tif got != want {\n\t\tt.Errorf(\"Count orbits = %d; want %d\", got, want)\n\t}\n}", "func CountPopRows(ctx context.Context, db SQLHandle, cond PopValues) (count int, err error) {\n\tif _, err = queryWithJSONArgs(ctx, db, func(int) []interface{} { return []interface{}{&count} }, SQLCountPopRows, cond); err != nil {\n\t\treturn 0, formatError(\"CountPopRows\", err)\n\t}\n\treturn count, nil\n}", "func CsvToJSON(csvSrc io.Reader, jsonDst io.Writer) error {\n\tenc := json.NewEncoder(jsonDst)\n\tr := csv.NewReader(csvSrc)\n\tn := 0\n\t// TODO: should probably compare header to expected schema\n\tvar headerFields []string\n\tfor {\n\t\tp := &profiles.ChurnProfile{}\n\t\tvar err error\n\t\tvar record []string\n\t\tif record, err = r.Read(); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tif n == 0 {\n\t\t\theaderFields = record\n\t\t} else {\n\t\t\terr := loadProfile(p, record, headerFields)\n\t\t\tif err == ErrBadCsvCol {\n\t\t\t\t// TODO: handle bad csv rows, whether logging, adding to skip file\n\t\t\t\t// emitting metrics/notification etc, for now just skip this row\n\t\t\t\tlog.Println(\"Encountered bad column, skipping row\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := enc.Encode(&p); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tn++\n\t\tlog.Println(\"seen\", n)\n\t}\n\t//TODO: emit metrics around lines processed, skipped, etc\n\treturn nil\n}", "func Count(it *Iterator) (int, error) {\n\tcount := 0\n\tfor {\n\t\t_, err := it.Next()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn count, err\n\t\t}\n\t\tcount += 1\n\t}\n\treturn count, nil\n}", "func GetExistingTaskCount(fileName string) int{\r\n\r\n\tcontent, err := ioutil.ReadFile(fileName)\r\n\tif err != nil {\r\n\t\tlog.Fatal(err)\r\n\t}\t\r\n\tscanner := bufio.NewScanner(strings.NewReader(string(content)))\r\n\tscanner.Split(bufio. ScanLines)\r\n\tcount := 0\r\n\tfor scanner.Scan() {\r\n\t\tcount++\r\n\t}\t\r\n\tif err := scanner.Err(); err != nil {\r\n\t\tfmt.Fprintln(os.Stderr, \"reading input:\", err)\r\n\t}\r\n\treturn count\r\n}", "func Lines(r io.ReadSeeker) (count uint64, err error) {\n\tdefer r.Seek(0, 0)\n\tbuf := make([]byte, 32*1024)\n\tlineSep := []byte{'\\n'}\n\n\tfor {\n\t\tc, err := r.Read(buf)\n\t\tcount += uint64(bytes.Count(buf[:c], lineSep))\n\n\t\tswitch {\n\t\tcase err == io.EOF:\n\t\t\treturn count, nil\n\n\t\tcase err != nil:\n\t\t\treturn count, err\n\t\t}\n\t}\n}", "func parsePrimeListCSV(primeChan chan int) {\n\tdefer close(primeChan)\n\t//open file logic\n\topenFile, err := os.Open(\"list/list.prime\")\n\tcheckError(\"Failed to open prime list file. \", err)\n\tdefer openFile.Close()\n\n\treader := csv.NewReader(bufio.NewReader(openFile))\n\tfor {\n\t\tstringSlice, error := reader.Read()\n\t\tif error == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tfor i := 0; i < len(stringSlice); i++ {\n\t\t\tprime, _ := strconv.Atoi(stringSlice[i])\n\t\t\tprimeChan <- prime\n\t\t}\n\t}\n}", "func ParseCSV(r io.Reader) ([]Entry, error) {\n\treader := csv.NewReader(r)\n\n\t// skip header\n\treader.Read()\n\n\tvar entries []Entry\n\tfor {\n\t\tline, err := reader.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, fmt.Errorf(\"encountered error while parsing csv: %v\", err)\n\t\t}\n\n\t\tentries = append(entries, entryFor(line))\n\t}\n\n\treturn entries, nil\n}", "func ReadCSV(fpath string) (*CSV, error) {\n\tf, err := fileutil.OpenToRead(fpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\trd := csv.NewReader(f)\n\n\t// in case that rows have Deltaerent number of fields\n\trd.FieldsPerRecord = -1\n\n\trows, err := rd.ReadAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(rows) <= 1 {\n\t\treturn nil, fmt.Errorf(\"expected len(rows)>1, got %d\", len(rows))\n\t}\n\tif rows[0][0] != \"UNIX-NANOSECOND\" {\n\t\treturn nil, fmt.Errorf(\"expected header at top, got %+v\", rows[0])\n\t}\n\n\t// remove header\n\trows = rows[1:len(rows):len(rows)]\n\tmin, err := strconv.ParseInt(rows[0][0], 10, 64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmax, err := strconv.ParseInt(rows[len(rows)-1][0], 10, 64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := &CSV{\n\t\tFilePath: fpath,\n\t\tPID: 0,\n\t\tDiskDevice: \"\",\n\t\tNetworkInterface: \"\",\n\n\t\tHeader: ProcHeader,\n\t\tHeaderIndex: ProcHeaderIndex,\n\t\tMinUnixNanosecond: min,\n\t\tMinUnixSecond: nanoToUnix(min),\n\t\tMaxUnixNanosecond: max,\n\t\tMaxUnixSecond: nanoToUnix(max),\n\n\t\tRows: make([]Proc, 0, len(rows)),\n\t}\n\tfor _, row := range rows {\n\t\tts, err := strconv.ParseInt(row[ProcHeaderIndex[\"UNIX-NANOSECOND\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttss, err := strconv.ParseInt(row[ProcHeaderIndex[\"UNIX-SECOND\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpid, err := strconv.ParseInt(row[ProcHeaderIndex[\"PID\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tppid, err := strconv.ParseInt(row[ProcHeaderIndex[\"PPID\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfd, err := strconv.ParseUint(row[ProcHeaderIndex[\"FD\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tthreads, err := strconv.ParseUint(row[ProcHeaderIndex[\"THREADS\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvolCtxNum, err := strconv.ParseUint(row[ProcHeaderIndex[\"VOLUNTARY-CTXT-SWITCHES\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tnonVolCtxNum, err := strconv.ParseUint(row[ProcHeaderIndex[\"NON-VOLUNTARY-CTXT-SWITCHES\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcpuNum, err := strconv.ParseFloat(row[ProcHeaderIndex[\"CPU-NUM\"]], 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvmRssNum, err := strconv.ParseUint(row[ProcHeaderIndex[\"VMRSS-NUM\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvmSizeNum, err := strconv.ParseUint(row[ProcHeaderIndex[\"VMSIZE-NUM\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tloadAvg1min, err := strconv.ParseFloat(row[ProcHeaderIndex[\"LOAD-AVERAGE-1-MINUTE\"]], 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tloadAvg5min, err := strconv.ParseFloat(row[ProcHeaderIndex[\"LOAD-AVERAGE-5-MINUTE\"]], 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tloadAvg15min, err := strconv.ParseFloat(row[ProcHeaderIndex[\"LOAD-AVERAGE-15-MINUTE\"]], 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treadsCompleted, err := strconv.ParseUint(row[ProcHeaderIndex[\"READS-COMPLETED\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsectorsRead, err := strconv.ParseUint(row[ProcHeaderIndex[\"SECTORS-READ\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\twritesCompleted, err := strconv.ParseUint(row[ProcHeaderIndex[\"WRITES-COMPLETED\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsectorsWritten, err := strconv.ParseUint(row[ProcHeaderIndex[\"SECTORS-WRITTEN\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttimeSpentOnReadingMs, err := strconv.ParseUint(row[ProcHeaderIndex[\"MILLISECONDS(READS)\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttimeSpentOnWritingMs, err := strconv.ParseUint(row[ProcHeaderIndex[\"MILLISECONDS(WRITES)\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treadsCompletedDelta, err := strconv.ParseUint(row[ProcHeaderIndex[\"READS-COMPLETED-DELTA\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsectorsReadDelta, err := strconv.ParseUint(row[ProcHeaderIndex[\"SECTORS-READ-DELTA\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\twritesCompletedDelta, err := strconv.ParseUint(row[ProcHeaderIndex[\"WRITES-COMPLETED-DELTA\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsectorsWrittenDelta, err := strconv.ParseUint(row[ProcHeaderIndex[\"SECTORS-WRITTEN-DELTA\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treadBytesDelta, err := strconv.ParseUint(row[ProcHeaderIndex[\"READ-BYTES-DELTA\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treadMegabytesDelta, err := strconv.ParseUint(row[ProcHeaderIndex[\"READ-MEGABYTES-DELTA\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\twriteBytesDelta, err := strconv.ParseUint(row[ProcHeaderIndex[\"WRITE-BYTES-DELTA\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\twriteMegabytesDelta, err := strconv.ParseUint(row[ProcHeaderIndex[\"WRITE-MEGABYTES-DELTA\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treceivePackets, err := strconv.ParseUint(row[ProcHeaderIndex[\"RECEIVE-PACKETS\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttransmitPackets, err := strconv.ParseUint(row[ProcHeaderIndex[\"TRANSMIT-PACKETS\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treceiveBytesNum, err := strconv.ParseUint(row[ProcHeaderIndex[\"RECEIVE-BYTES-NUM\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttransmitBytesNum, err := strconv.ParseUint(row[ProcHeaderIndex[\"TRANSMIT-BYTES-NUM\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treceivePacketsDelta, err := strconv.ParseUint(row[ProcHeaderIndex[\"RECEIVE-PACKETS-DELTA\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttransmitPacketsDelta, err := strconv.ParseUint(row[ProcHeaderIndex[\"TRANSMIT-PACKETS-DELTA\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treceiveBytesNumDelta, err := strconv.ParseUint(row[ProcHeaderIndex[\"RECEIVE-BYTES-NUM-DELTA\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttransmitBytesNumDelta, err := strconv.ParseUint(row[ProcHeaderIndex[\"TRANSMIT-BYTES-NUM-DELTA\"]], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpc := Proc{\n\t\t\tUnixNanosecond: ts,\n\t\t\tUnixSecond: tss,\n\n\t\t\tPSEntry: PSEntry{\n\t\t\t\tProgram: row[ProcHeaderIndex[\"PROGRAM\"]],\n\t\t\t\tState: row[ProcHeaderIndex[\"STATE\"]],\n\t\t\t\tPID: pid,\n\t\t\t\tPPID: ppid,\n\t\t\t\tCPU: row[ProcHeaderIndex[\"CPU\"]],\n\t\t\t\tVMRSS: row[ProcHeaderIndex[\"VMRSS\"]],\n\t\t\t\tVMSize: row[ProcHeaderIndex[\"VMSIZE\"]],\n\t\t\t\tFD: fd,\n\t\t\t\tThreads: threads,\n\t\t\t\tVoluntaryCtxtSwitches: volCtxNum,\n\t\t\t\tNonvoluntaryCtxtSwitches: nonVolCtxNum,\n\t\t\t\tCPUNum: cpuNum,\n\t\t\t\tVMRSSNum: vmRssNum,\n\t\t\t\tVMSizeNum: vmSizeNum,\n\t\t\t},\n\n\t\t\tLoadAvg: proc.LoadAvg{\n\t\t\t\tLoadAvg1Minute: loadAvg1min,\n\t\t\t\tLoadAvg5Minute: loadAvg5min,\n\t\t\t\tLoadAvg15Minute: loadAvg15min,\n\t\t\t},\n\n\t\t\tDSEntry: DSEntry{\n\t\t\t\tDevice: row[ProcHeaderIndex[\"DEVICE\"]],\n\t\t\t\tReadsCompleted: readsCompleted,\n\t\t\t\tSectorsRead: sectorsRead,\n\t\t\t\tTimeSpentOnReading: row[ProcHeaderIndex[\"TIME(READS)\"]],\n\t\t\t\tWritesCompleted: writesCompleted,\n\t\t\t\tSectorsWritten: sectorsWritten,\n\t\t\t\tTimeSpentOnWriting: row[ProcHeaderIndex[\"TIME(WRITES)\"]],\n\t\t\t\tTimeSpentOnReadingMs: timeSpentOnReadingMs,\n\t\t\t\tTimeSpentOnWritingMs: timeSpentOnWritingMs,\n\t\t\t},\n\t\t\tReadsCompletedDelta: readsCompletedDelta,\n\t\t\tSectorsReadDelta: sectorsReadDelta,\n\t\t\tWritesCompletedDelta: writesCompletedDelta,\n\t\t\tSectorsWrittenDelta: sectorsWrittenDelta,\n\n\t\t\tReadBytesDelta: readBytesDelta,\n\t\t\tReadMegabytesDelta: readMegabytesDelta,\n\t\t\tWriteBytesDelta: writeBytesDelta,\n\t\t\tWriteMegabytesDelta: writeMegabytesDelta,\n\n\t\t\tNSEntry: NSEntry{\n\t\t\t\tInterface: row[ProcHeaderIndex[\"INTERFACE\"]],\n\t\t\t\tReceiveBytes: row[ProcHeaderIndex[\"RECEIVE-BYTES\"]],\n\t\t\t\tReceivePackets: receivePackets,\n\t\t\t\tTransmitBytes: row[ProcHeaderIndex[\"TRANSMIT-BYTES\"]],\n\t\t\t\tTransmitPackets: transmitPackets,\n\t\t\t\tReceiveBytesNum: receiveBytesNum,\n\t\t\t\tTransmitBytesNum: transmitBytesNum,\n\t\t\t},\n\t\t\tReceiveBytesDelta: row[ProcHeaderIndex[\"RECEIVE-BYTES-DELTA\"]],\n\t\t\tReceivePacketsDelta: receivePacketsDelta,\n\t\t\tTransmitBytesDelta: row[ProcHeaderIndex[\"TRANSMIT-BYTES-DELTA\"]],\n\t\t\tTransmitPacketsDelta: transmitPacketsDelta,\n\t\t\tReceiveBytesNumDelta: receiveBytesNumDelta,\n\t\t\tTransmitBytesNumDelta: transmitBytesNumDelta,\n\n\t\t\tExtra: []byte(row[ProcHeaderIndex[\"EXTRA\"]]),\n\t\t}\n\t\tc.PID = pc.PSEntry.PID\n\t\tc.DiskDevice = pc.DSEntry.Device\n\t\tc.NetworkInterface = pc.NSEntry.Interface\n\n\t\tc.Rows = append(c.Rows, pc)\n\t}\n\n\treturn c, nil\n}", "func LoadCSV() {\n\n\tsession := util.MongoSession.Copy()\n\tdefer session.Close()\n\n\tsession.SetMode(mgo.Monotonic, true)\n\n\tcollection := session.DB(util.Config.DbName).C(\"csvload\")\n\tabsPath, _ := filepath.Abs(\"../Go_Docker/data/convertcsv.csv\")\n\tfile, err := os.Open(absPath)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer file.Close()\n\n\treader := csv.NewReader(file)\n\n\tfor {\n\t\trecord, err := reader.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif record[0] != \"key\" {\n\t\t\terr = collection.Insert(&Mongo{Key: record[0], Value: record[1]})\n\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tlog.Printf(\"%#v\", record)\n\t\t}\n\n\t}\n}", "func CountLines(s string) (int, error) {\n\tstringReader := strings.NewReader(s)\n\treader := bufio.NewReader(stringReader)\n\tvar count int\n\tfor {\n\t\t_, isPrefix, err := reader.ReadLine()\n\n\t\tif !isPrefix {\n\t\t\tcount++\n\t\t}\n\n\t\tif err == io.EOF {\n\t\t\treturn count - 1, nil\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn count, err\n\t\t}\n\t}\n}", "func (q cmfTurntableQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count cmf_turntable rows\")\n\t}\n\n\treturn count, nil\n}", "func (this *activitiesStruct) ImportCSV(data string) error {\n\trstr := strings.NewReader(data)\n\trcsv := csv.NewReader(rstr)\n\trecords, err := rcsv.ReadAll()\n\n\t/*\n\t * Check if an error occured.\n\t */\n\tif err != nil {\n\t\tmsg := err.Error()\n\t\treturn fmt.Errorf(\"Error importing activity data from CSV: %s\", msg)\n\t} else {\n\t\tthis.mutex.Lock()\n\t\tgroups := this.groups\n\t\tnumGroups := len(groups)\n\t\tgroupsCopy := make([]activityGroupStruct, numGroups)\n\t\tcopy(groupsCopy, groups)\n\t\tfirstError := error(nil)\n\t\tidxFirstErr := uint64(0)\n\t\tnumErrors := uint64(0)\n\n\t\t/*\n\t\t * Iterate over all records and parse activity data.\n\t\t */\n\t\tfor idx, record := range records {\n\t\t\trecordHasErrors := false\n\t\t\tnumFields := len(record)\n\n\t\t\t/*\n\t\t\t * Check that sufficient number of fields is present.\n\t\t\t */\n\t\t\tif numFields < EXPECTED_NUM_FIELDS {\n\n\t\t\t\t/*\n\t\t\t\t * Store first error occuring.\n\t\t\t\t */\n\t\t\t\tif firstError == nil {\n\t\t\t\t\tfirstError = fmt.Errorf(\"Expected %d fields, found %d.\", EXPECTED_NUM_FIELDS, numFields)\n\t\t\t\t\tidxFirstErr = uint64(idx)\n\t\t\t\t}\n\n\t\t\t\t/*\n\t\t\t\t * Increment error count.\n\t\t\t\t */\n\t\t\t\tif !recordHasErrors && numErrors < math.MaxUint64 {\n\t\t\t\t\tnumErrors++\n\t\t\t\t\trecordHasErrors = true\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tbeginString := record[0]\n\t\t\t\tbegin, err := filter.ParseTime(beginString, false, false)\n\n\t\t\t\t/*\n\t\t\t\t * Check if begin time could be parsed.\n\t\t\t\t */\n\t\t\t\tif err != nil {\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Store first error occuring.\n\t\t\t\t\t */\n\t\t\t\t\tif firstError == nil {\n\t\t\t\t\t\tmsg := err.Error()\n\t\t\t\t\t\tfirstError = fmt.Errorf(\"Failed to parse begin time stamp: %s\", msg)\n\t\t\t\t\t\tidxFirstErr = uint64(idx)\n\t\t\t\t\t}\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Increment error count.\n\t\t\t\t\t */\n\t\t\t\t\tif !recordHasErrors && numErrors < math.MaxUint64 {\n\t\t\t\t\t\tnumErrors++\n\t\t\t\t\t\trecordHasErrors = true\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\tweightKG := record[1]\n\n\t\t\t\t/*\n\t\t\t\t * Allow for empty weight.\n\t\t\t\t */\n\t\t\t\tif weightKG == \"\" {\n\t\t\t\t\tweightKG = \"0.0\"\n\t\t\t\t}\n\n\t\t\t\trunningDurationString := record[2]\n\t\t\t\trunningDuration := time.Duration(0)\n\n\t\t\t\t/*\n\t\t\t\t * Allow for empty running duration.\n\t\t\t\t */\n\t\t\t\tif runningDurationString != \"\" {\n\t\t\t\t\trunningDuration, err = time.ParseDuration(runningDurationString)\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Check if running duration could be parsed.\n\t\t\t\t\t */\n\t\t\t\t\tif err != nil {\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Store first error occuring.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif firstError == nil {\n\t\t\t\t\t\t\tmsg := err.Error()\n\t\t\t\t\t\t\tfirstError = fmt.Errorf(\"Failed to parse running duration: %s\", msg)\n\t\t\t\t\t\t\tidxFirstErr = uint64(idx)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Increment error count.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif !recordHasErrors && numErrors < math.MaxUint64 {\n\t\t\t\t\t\t\tnumErrors++\n\t\t\t\t\t\t\trecordHasErrors = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\trunningDistanceKM := record[3]\n\n\t\t\t\t/*\n\t\t\t\t * Allow for empty running distance.\n\t\t\t\t */\n\t\t\t\tif runningDistanceKM == \"\" {\n\t\t\t\t\trunningDistanceKM = \"0.0\"\n\t\t\t\t}\n\n\t\t\t\trunningStepCountString := record[4]\n\t\t\t\trunningStepCount := uint64(0)\n\n\t\t\t\t/*\n\t\t\t\t * Allow for empty running step count.\n\t\t\t\t */\n\t\t\t\tif runningStepCountString != \"\" {\n\t\t\t\t\trunningStepCount, err = strconv.ParseUint(runningStepCountString, 10, 64)\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Check if running step count could be parsed.\n\t\t\t\t\t */\n\t\t\t\t\tif err != nil {\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Store first error occuring.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif firstError == nil {\n\t\t\t\t\t\t\tmsg := err.Error()\n\t\t\t\t\t\t\tfirstError = fmt.Errorf(\"Failed to parse running step count: %s\", msg)\n\t\t\t\t\t\t\tidxFirstErr = uint64(idx)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Increment error count.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif !recordHasErrors && numErrors < math.MaxUint64 {\n\t\t\t\t\t\t\tnumErrors++\n\t\t\t\t\t\t\trecordHasErrors = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\trunningEnergyKJString := record[5]\n\t\t\t\trunningEnergyKJ := uint64(0)\n\n\t\t\t\t/*\n\t\t\t\t * Allow for empty running energy.\n\t\t\t\t */\n\t\t\t\tif runningEnergyKJString != \"\" {\n\t\t\t\t\trunningEnergyKJ, err = strconv.ParseUint(runningEnergyKJString, 10, 64)\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Check if running energy could be parsed.\n\t\t\t\t\t */\n\t\t\t\t\tif err != nil {\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Store first error occuring.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif firstError == nil {\n\t\t\t\t\t\t\tmsg := err.Error()\n\t\t\t\t\t\t\tfirstError = fmt.Errorf(\"Failed to parse running energy: %s\", msg)\n\t\t\t\t\t\t\tidxFirstErr = uint64(idx)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Increment error count.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif !recordHasErrors && numErrors < math.MaxUint64 {\n\t\t\t\t\t\t\tnumErrors++\n\t\t\t\t\t\t\trecordHasErrors = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\tcyclingDurationString := record[6]\n\t\t\t\tcyclingDuration := time.Duration(0)\n\n\t\t\t\t/*\n\t\t\t\t * Allow for empty cycling duration.\n\t\t\t\t */\n\t\t\t\tif cyclingDurationString != \"\" {\n\t\t\t\t\tcyclingDuration, err = time.ParseDuration(cyclingDurationString)\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Check if cycling duration could be parsed.\n\t\t\t\t\t */\n\t\t\t\t\tif err != nil {\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Store first error occuring.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif firstError == nil {\n\t\t\t\t\t\t\tmsg := err.Error()\n\t\t\t\t\t\t\tfirstError = fmt.Errorf(\"Failed to parse cycling duration: %s\", msg)\n\t\t\t\t\t\t\tidxFirstErr = uint64(idx)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Increment error count.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif !recordHasErrors && numErrors < math.MaxUint64 {\n\t\t\t\t\t\t\tnumErrors++\n\t\t\t\t\t\t\trecordHasErrors = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\tcyclingDistanceKM := record[7]\n\n\t\t\t\t/*\n\t\t\t\t * Allow for empty cycling distance.\n\t\t\t\t */\n\t\t\t\tif cyclingDistanceKM == \"\" {\n\t\t\t\t\tcyclingDistanceKM = \"0.0\"\n\t\t\t\t}\n\n\t\t\t\tcyclingEnergyKJString := record[8]\n\t\t\t\tcyclingEnergyKJ := uint64(0)\n\n\t\t\t\t/*\n\t\t\t\t * Allow for empty cycling energy.\n\t\t\t\t */\n\t\t\t\tif cyclingEnergyKJString != \"\" {\n\t\t\t\t\tcyclingEnergyKJ, err = strconv.ParseUint(cyclingEnergyKJString, 10, 64)\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Check if cycling energy could be parsed.\n\t\t\t\t\t */\n\t\t\t\t\tif err != nil {\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Store first error occuring.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif firstError == nil {\n\t\t\t\t\t\t\tmsg := err.Error()\n\t\t\t\t\t\t\tfirstError = fmt.Errorf(\"Failed to parse cycling energy: %s\", msg)\n\t\t\t\t\t\t\tidxFirstErr = uint64(idx)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Increment error count.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif !recordHasErrors && numErrors < math.MaxUint64 {\n\t\t\t\t\t\t\tnumErrors++\n\t\t\t\t\t\t\trecordHasErrors = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\totherEnergyKJString := record[9]\n\t\t\t\totherEnergyKJ := uint64(0)\n\n\t\t\t\t/*\n\t\t\t\t * Allow for empty other energy.\n\t\t\t\t */\n\t\t\t\tif otherEnergyKJString != \"\" {\n\t\t\t\t\totherEnergyKJ, err = strconv.ParseUint(otherEnergyKJString, 10, 64)\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Check if other energy could be parsed.\n\t\t\t\t\t */\n\t\t\t\t\tif err != nil {\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Store first error occuring.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif firstError == nil {\n\t\t\t\t\t\t\tmsg := err.Error()\n\t\t\t\t\t\t\tfirstError = fmt.Errorf(\"Failed to parse other energy: %s\", msg)\n\t\t\t\t\t\t\tidxFirstErr = uint64(idx)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Increment error count.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif !recordHasErrors && numErrors < math.MaxUint64 {\n\t\t\t\t\t\t\tnumErrors++\n\t\t\t\t\t\t\trecordHasErrors = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\t/*\n\t\t\t\t * Create activity info.\n\t\t\t\t */\n\t\t\t\tinfo := ActivityInfo{\n\t\t\t\t\tBegin: begin,\n\t\t\t\t\tWeightKG: weightKG,\n\t\t\t\t\tRunningDuration: runningDuration,\n\t\t\t\t\tRunningDistanceKM: runningDistanceKM,\n\t\t\t\t\tRunningStepCount: runningStepCount,\n\t\t\t\t\tRunningEnergyKJ: runningEnergyKJ,\n\t\t\t\t\tCyclingDuration: cyclingDuration,\n\t\t\t\t\tCyclingDistanceKM: cyclingDistanceKM,\n\t\t\t\t\tCyclingEnergyKJ: cyclingEnergyKJ,\n\t\t\t\t\tOtherEnergyKJ: otherEnergyKJ,\n\t\t\t\t}\n\n\t\t\t\tg, err := createActivityGroup(&info)\n\n\t\t\t\t/*\n\t\t\t\t * Check if activity group could be parsed.\n\t\t\t\t */\n\t\t\t\tif err != nil {\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Store first error occuring.\n\t\t\t\t\t */\n\t\t\t\t\tif firstError == nil {\n\t\t\t\t\t\tfirstError = err\n\t\t\t\t\t\tidxFirstErr = uint64(idx)\n\t\t\t\t\t}\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Increment error count.\n\t\t\t\t\t */\n\t\t\t\t\tif !recordHasErrors && numErrors < math.MaxUint64 {\n\t\t\t\t\t\tnumErrors++\n\t\t\t\t\t}\n\n\t\t\t\t} else {\n\t\t\t\t\tgroupsCopy = append(groupsCopy, g)\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\n\t\t/*\n\t\t * Only modify activity groups if no error occured.\n\t\t */\n\t\tif firstError == nil {\n\n\t\t\t/*\n\t\t\t * Comparison function for sorting algorithm.\n\t\t\t */\n\t\t\tless := func(i int, j int) bool {\n\t\t\t\tgi := groupsCopy[i]\n\t\t\t\tgiBegin := gi.begin\n\t\t\t\tgj := groupsCopy[j]\n\t\t\t\tgjBegin := gj.begin\n\t\t\t\tresult := giBegin.Before(gjBegin)\n\t\t\t\treturn result\n\t\t\t}\n\n\t\t\tsort.SliceStable(groupsCopy, less)\n\t\t\tthis.groups = groupsCopy\n\t\t\tthis.revision++\n\t\t}\n\n\t\tthis.mutex.Unlock()\n\n\t\t/*\n\t\t * Check if error occured.\n\t\t */\n\t\tif firstError != nil {\n\t\t\tmsg := firstError.Error()\n\t\t\treturn fmt.Errorf(\"Error deserializing activity data: %d erroneous activity groups, first at group number %d: %s\", numErrors, idxFirstErr, msg)\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\n\t}\n\n}", "func (a UserGroupAggregateRow) Count() int {\n\treturn a.count\n}", "func ReadCSV(path string, cols map[string]int, sep rune, comment string) (<-chan map[string]string, <-chan error, chan<- int) {\n\tout, err, sig, sigv := make(chan map[string]string, 64), make(chan error, 1), make(chan int), 0\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif e := recover(); e != nil {\n\t\t\t\terr <- e.(error)\n\t\t\t}\n\t\t\tclose(err)\n\t\t\tclose(out)\n\t\t}()\n\t\tin, ierr, isig := readLn(path)\n\t\tdefer close(isig)\n\t\thandleSig(sig, &sigv)\n\n\t\tvcols, wid, line, algn := make(map[string]int, 32), 0, 0, 0\n\t\tfor ln := range in {\n\t\t\tfor line++; ; {\n\t\t\t\tswitch {\n\t\t\t\tcase len(strings.TrimLeft(ln, \" \")) == 0:\n\t\t\t\tcase comment != \"\" && strings.HasPrefix(ln, comment):\n\t\t\t\tcase sep == '\\x00':\n\t\t\t\t\tfor _, r := range sepSet {\n\t\t\t\t\t\tif c := len(splitCSV(ln, r)); c > wid {\n\t\t\t\t\t\t\twid, sep = c, r\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\tcase len(vcols) == 0:\n\t\t\t\t\tsl, uc, sc, mc, qc := splitCSV(ln, sep), make(map[int]int), make(map[string]int), 0, make(map[string]int)\n\t\t\t\t\tfor c, i := range cols {\n\t\t\t\t\t\tif c = strings.Trim(c, \" \"); c != \"\" && i > 0 {\n\t\t\t\t\t\t\tsc[c] = i\n\t\t\t\t\t\t\tif uc[i]++; i > mc {\n\t\t\t\t\t\t\t\tmc = i\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tfor i, c := range sl {\n\t\t\t\t\t\tif c = strings.Trim(c, \" \"); c != \"\" {\n\t\t\t\t\t\t\tif len(sc) == 0 || sc[c] > 0 {\n\t\t\t\t\t\t\t\tvcols[c] = i + 1\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif _, e := strconv.ParseFloat(c, 64); e != nil {\n\t\t\t\t\t\t\t\tqc[c] = i + 1\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tswitch wid = len(sl); {\n\t\t\t\t\tcase len(sc) == 0 && len(qc) == wid:\n\t\t\t\t\tcase len(sc) == 0:\n\t\t\t\t\t\tpanic(fmt.Errorf(\"no heading in CSV file %q and no column map provided\", path))\n\t\t\t\t\tcase len(vcols) == len(sc):\n\t\t\t\t\tcase len(vcols) > 0:\n\t\t\t\t\t\tpanic(fmt.Errorf(\"missing columns in CSV file %q\", path))\n\t\t\t\t\tcase len(qc) == wid || mc > wid:\n\t\t\t\t\t\tpanic(fmt.Errorf(\"column map incompatible with CSV file %q\", path))\n\t\t\t\t\tcase len(uc) < len(sc):\n\t\t\t\t\t\tpanic(fmt.Errorf(\"ambiguous column map provided for CSV file %q\", path))\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tvcols = sc\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\tdefault:\n\t\t\t\t\tif sl := splitCSV(ln, sep); len(sl) == wid {\n\t\t\t\t\t\tm, heading := make(map[string]string, len(vcols)), true\n\t\t\t\t\t\tfor c, i := range vcols {\n\t\t\t\t\t\t\tf := strings.Trim(sl[i-1], \" \")\n\t\t\t\t\t\t\tif len(f) > 0 {\n\t\t\t\t\t\t\t\tm[c] = f\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\theading = heading && f == c\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif !heading && len(m) > 0 {\n\t\t\t\t\t\t\tm[\"~line\"] = strconv.Itoa(line)\n\t\t\t\t\t\t\tout <- m\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if algn++; line > 200 && float64(algn)/float64(line) > 0.02 {\n\t\t\t\t\t\tpanic(fmt.Errorf(\"excessive column misalignment in CSV file %q (>%d rows)\", path, algn))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif sigv != 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif e := <-ierr; e != nil {\n\t\t\tpanic(fmt.Errorf(\"problem reading CSV file %q (%v)\", path, e))\n\t\t}\n\t}()\n\treturn out, err, sig\n}", "func CSVHandler(r *http.Request) (string, int, error) {\n\t// Validate the request was via POST method\n\tcode, err := ValidateMethod(r, \"POST\")\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn \"\", code, err\n\t}\n\n\tlog.Infof(\"Handling CSV data request...\")\n\n\t// Attempt to get runId query argument\n\trunId, err := TryGetQueryArg(r, \"runId\")\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn \"\", http.StatusBadRequest, err\n\t}\n\n\t// Grab the database\n\tdb := datastore.GetDatabase(\"data/test.sqlite\")\n\n\t// Make sure there is no error when retrieving the database connection\n\tif db == nil {\n\t\terr := fmt.Errorf(\"Unable to connect to database for CSVHandler\")\n\t\tlog.Error(err)\n\t\treturn \"\", http.StatusInternalServerError, err\n\t}\n\n\t// Retrieve data related to specific run number. Should only be one run\n\trows, err := db.Queryx(query, runId)\n\tif err != nil {\n\t\tlog.Error(\"Unable to retrieve sensor data for CSVHandler\")\n\t\tlog.Error(err)\n\t\treturn \"\", http.StatusInternalServerError, fmt.Errorf(internalServerErrMsg)\n\t}\n\n\tvar curRow packets.DBSensorData\n\n\t// Attempt to fill initial record with data\n\tdbRows := make([]packets.DBSensorData, 1)\n\tfor rows.Next() {\n\t\tif err := rows.StructScan(&curRow); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn \"\", http.StatusInternalServerError, fmt.Errorf(internalServerErrMsg)\n\t\t}\n\n\t\tdbRows = append(dbRows, curRow)\n\t}\n\n\t// Should only ever have 1 row returned, warn if otherwise\n\tif len(dbRows) > 1 {\n\t\tlog.Warn(\"Multiple records retrieved from table SensorData for run number\", runId)\n\t}\n\n\t// Convert rows into CSV format\n\tbuf := new(bytes.Buffer)\n\tdbWriter := csv.NewWriter(buf)\n\n\t// Write header\n\tdbWriter.Write(headerSlice)\n\n\tfor _, row := range dbRows {\n\t\tdbWriter.Write(row.ToCSVString())\n\t}\n\tdbWriter.Flush()\n\n\treturn buf.String(), http.StatusOK, nil\n}", "func (dataset *Dataset) Count() int {\r\n\treturn len(dataset.data)\r\n}", "func NumOfDataEntries(db *sql.DB, name string) (int, error) {\n\tscript := fmt.Sprintf(\"SELECT count(*) FROM %v;\", name)\n\tvar num int\n\terr := db.QueryRow(script).Scan(&num)\n\treturn num, err\n}", "func (r *linesIterator) Count() uint64 {\n\treturn uint64(r.linesCount)\n}", "func CountOpenFiles() int {\n\tt.Lock()\n\tdefer t.Unlock()\n\treturn len(t.entries)\n}", "func readCSVfile(path string) ([][]float64, error) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := csv.NewReader(bufio.NewReader(f))\n\tdataPoint := make([][]float64, 0)\n\tfor {\n\t\trecord, err := r.Read()\n\t\t// Stop at EOF.\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tvar row = make([]float64, len(record))\n\t\tfor index := range record {\n\t\t\trow[index], _ = strconv.ParseFloat(record[index], 64)\n\t\t}\n\t\tdataPoint = append(dataPoint, row)\n\t}\n\tif len(dataPoint) > 0 {\n\t\treturn dataPoint, nil\n\t}\n\treturn nil, errors.New(\"The length of past data is 0\")\n}", "func (r *CsvHandler) ImportFromCsv(c *gin.Context) {\n\tlogger := r.logger.New(\"action\", \"UpdateFromCsv\")\n\tuser := r.contextService.MustGetCurrentUser(c)\n\n\tfile, _, err := c.Request.FormFile(\"file\")\n\n\tif nil != err {\n\t\tlogger.Error(\"can't get file\", \"err\", err)\n\t\terrcodes.AddError(c, errcodes.CodeFileInvalid)\n\t\treturn\n\t}\n\n\tdefer file.Close()\n\n\tbuf := bytes.NewBuffer(nil)\n\tif _, err := io.Copy(buf, file); err != nil {\n\t\tprivateError := errors.PrivateError{Message: \"can't write file to buffer\"}\n\t\tprivateError.AddLogPair(\"error\", err)\n\t\terrors.AddErrors(c, &privateError)\n\t\treturn\n\t}\n\n\ttErrs, failed, success := r.csvService.ImportFromCsv(buf, user)\n\tdata := struct {\n\t\tSuccess uint64 `json:\"success\"`\n\t\tFailed uint64 `json:\"failed\"`\n\t}{\n\t\tSuccess: success,\n\t\tFailed: failed,\n\t}\n\n\tif len(tErrs) > 0 {\n\t\tc.Set(\"data\", data)\n\t\terrors.AddErrors(c, tErrs...)\n\t\treturn\n\t}\n\n\tres := response.NewResponse().AddMessage(\"Requests are successfully imported\")\n\tres.Data = data\n\tc.JSON(http.StatusOK, res)\n}", "func CountPackageAggRows(ctx context.Context, db SQLHandle, cond PackageAggValues) (count int, err error) {\n\tif _, err = queryWithJSONArgs(ctx, db, func(int) []interface{} { return []interface{}{&count} }, SQLCountPackageAggRows, cond); err != nil {\n\t\treturn 0, formatError(\"CountPackageAggRows\", err)\n\t}\n\treturn count, nil\n}", "func (c ColDecimal256) Rows() int {\n\treturn len(c)\n}", "func (this Worker) TotalLines() int {\n return LINES_PER_FILE\n}", "func (q sourceQuery) Count(exec boil.Executor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow(exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"mdbmodels: failed to count sources rows\")\n\t}\n\n\treturn count, nil\n}" ]
[ "0.6375762", "0.6306837", "0.61106974", "0.60449296", "0.60007024", "0.59904045", "0.58921826", "0.5806305", "0.57993895", "0.57476985", "0.56987125", "0.56797904", "0.56628454", "0.56590277", "0.5635066", "0.5588222", "0.55808735", "0.55445486", "0.5538845", "0.5538845", "0.5519105", "0.5519105", "0.5502711", "0.54968846", "0.5433581", "0.5429684", "0.54257226", "0.5379317", "0.5335139", "0.53275794", "0.5317617", "0.53106385", "0.5308484", "0.53021216", "0.5291483", "0.5286893", "0.5278305", "0.5278094", "0.52741003", "0.52497095", "0.5238329", "0.52163994", "0.52141285", "0.5211914", "0.51822317", "0.51821965", "0.5179491", "0.51689637", "0.5168464", "0.5151884", "0.51256895", "0.51196957", "0.5119639", "0.5116419", "0.50886154", "0.5088227", "0.50689226", "0.5051888", "0.5050418", "0.5046655", "0.50432634", "0.5017003", "0.50158685", "0.50151867", "0.49987483", "0.49792197", "0.49730667", "0.4966895", "0.49651256", "0.49559197", "0.49475917", "0.49440834", "0.49379483", "0.49324796", "0.4929607", "0.49282047", "0.49176374", "0.49065328", "0.48942524", "0.48824498", "0.48739073", "0.48737714", "0.48707312", "0.48658702", "0.48638877", "0.4856483", "0.4838278", "0.4836583", "0.4824572", "0.48197016", "0.48010805", "0.480027", "0.479763", "0.47943625", "0.4792816", "0.4789047", "0.4785237", "0.47848344", "0.4783863", "0.47825146" ]
0.8511326
0
TestService is the doc.go usage example
func TestService(t *testing.T) { // Create service to test s := res.NewService("foo") s.Handle("bar.$id", res.Access(res.AccessGranted), res.GetModel(func(r res.ModelRequest) { r.Model(struct { Message string `json:"msg"` }{r.PathParam("id")}) }), ) // Create test session c := restest.NewSession(t, s) defer c.Close() // Test sending get request and validate response c.Get("foo.bar.42"). Response(). AssertModel(map[string]string{"msg": "42"}) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func testService() *corev1.Service {\n\treturn &corev1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: \"default\",\n\t\t\tName: \"symbols\",\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"deploy\": \"sourcegraph\",\n\t\t\t},\n\t\t},\n\t\tSpec: corev1.ServiceSpec{\n\t\t\tType: corev1.ServiceTypeClusterIP,\n\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tName: \"http\",\n\t\t\t\t\tPort: 3184,\n\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tStatus: corev1.ServiceStatus{},\n\t}\n}", "func RunServiceExample(nsxManager, nsxUser, nsxPassword string, debug bool) {\n\t//\n\t// Create NSXClient object.\n\t//\n\tnsxclient := gonsx.NewNSXClient(nsxManager, nsxUser, nsxPassword, true, debug)\n\n\t//\n\t// Get All Services.\n\t//\n\t// Create api object.\n\tgetAllAPI := service.NewGetAll(\"globalroot-0\")\n\n\t// make api call.\n\terr := nsxclient.Do(getAllAPI)\n\n\t// check if there were any errors\n\tif err != nil {\n\t\tfmt.Println(\"Error: \", err)\n\t}\n\n\t// check the status code and proceed accordingly.\n\tif getAllAPI.StatusCode() == 200 {\n\t\tAllApplications := getAllAPI.GetResponse().Applications\n\t\tfor _, service := range AllApplications {\n\t\t\tfmt.Printf(\"objectId: %-20s name: %-20s\\n\", service.ObjectID, service.Name)\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Status code:\", getAllAPI.StatusCode())\n\t\tfmt.Println(\"Response: \", getAllAPI.ResponseObject())\n\t}\n\n\t//\n\t// Get Single Service\n\t//\n\t// Get All ( we re-utilize the GetAll object from above here )\n\t// check the status code and proceed accordingly.\n\tif getAllAPI.StatusCode() == 200 {\n\t\tservice := getAllAPI.GetResponse().FilterByName(\"OVP_test1\")\n\t\tif service.ObjectID != \"\" {\n\t\t\tfmt.Println(service)\n\t\t} else {\n\t\t\tfmt.Println(\"Not found!\")\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Status code:\", getAllAPI.StatusCode())\n\t\tfmt.Println(\"Response: \", getAllAPI.ResponseObject())\n\t}\n\n\t//\n\t// Create single service.\n\t//\n\tcreateAPI := service.NewCreate(\"globalroot-0\", \"test\", \"desc\", \"TCP\", \"8080\")\n\terr = nsxclient.Do(createAPI)\n\tif err != nil {\n\t\tfmt.Println(\"Error:\", err)\n\t}\n\n\tif createAPI.StatusCode() == 201 {\n\t\tapplicationID := createAPI.ResponseObject()\n\t\tfmt.Println(\"Service created successfully.\")\n\t\tfmt.Println(\"objectId:\", applicationID)\n\t} else {\n\t\tfmt.Println(\"Failed to created the service!\")\n\t\tfmt.Println(createAPI.ResponseObject())\n\t}\n\n\t// UPDATE\n\t//\n\t// Updating a single service.\n\t// Get list of all applications. Search through looking for application match.\n\t// Update the attribute/s of the service.\n\tgetAllAPI = service.NewGetAll(\"globalroot-0\")\n\n\t// make api call.\n\terr = nsxclient.Do(getAllAPI)\n\n\t// check if there were any errors\n\tif err != nil {\n\t\tfmt.Println(\"Error: \", err)\n\t}\n\n\t// check the status code and proceed accordingly.\n\tif getAllAPI.StatusCode() != 200 {\n\t\tfmt.Printf(\"Status code: %v, Response: %v\\n\", getAllAPI.StatusCode(), getAllAPI.ResponseObject())\n\t}\n\n\t// Get All ( we re-utilize the GetAll object from above here )\n\t// check the status code and proceed accordingly.\n\tif getAllAPI.StatusCode() == 200 {\n\t\tservice := getAllAPI.GetResponse().FilterByName(\"test\")\n\t\tif service.ObjectID != \"\" {\n\t\t\tfmt.Println(\"Found service: \", service.ObjectID, service.Name)\n\t\t} else {\n\t\t\tfmt.Println(\"Not found!\")\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Status code:\", getAllAPI.StatusCode())\n\t\tfmt.Println(\"Response: \", getAllAPI.ResponseObject())\n\t}\n\n\t// Change the name of the service from test to test_https and change the port to TCP/443.\n\tserviceToModify := getAllAPI.GetResponse().FilterByName(\"test\")\n\tserviceToModify.Name = \"test_https\"\n\tmodifyElement := service.Element{ApplicationProtocol: \"TCP\", Value: \"443\"}\n\tserviceToModify.Element = []service.Element{modifyElement}\n\tupdateAPI := service.NewUpdate(serviceToModify.ObjectID, serviceToModify)\n\n\terr = nsxclient.Do(updateAPI)\n\tif err != nil {\n\t\tfmt.Println(\"Error: \", err)\n\t}\n\n\tif updateAPI.StatusCode() == 200 {\n\t\tnewObject := updateAPI.GetResponse()\n\t\tfmt.Println(\"Service updated successfully.\")\n\t\tfmt.Println(\"objectId:\", newObject.ObjectID)\n\t} else {\n\t\tfmt.Println(\"Failed to update the service!\")\n\t\tfmt.Println(updateAPI.ResponseObject())\n\t}\n\n\t//\n\t// Deleting a single service.\n\t//\n\n\t// Let's refresh the getAllAPI call, so that it has the last created data.\n\terr = nsxclient.Do(getAllAPI)\n\tif err != nil {\n\t\tfmt.Println(\"Error: \", err)\n\t}\n\n\tapplicationIDToDelete := getAllAPI.GetResponse().FilterByName(\"test_https\")\n\tdeleteAPI := service.NewDelete(applicationIDToDelete.ObjectID)\n\terr = nsxclient.Do(deleteAPI)\n\tif err != nil {\n\t\tfmt.Println(\"Error:\", err)\n\t}\n\n\tif deleteAPI.StatusCode() == 200 {\n\t\tfmt.Println(\"Service deleted successfully.\")\n\t} else {\n\t\tfmt.Println(\"Failed to delete the service!\")\n\t\tfmt.Println(\"Status code:\", deleteAPI.StatusCode())\n\t\tfmt.Println(\"Response:\", deleteAPI.ResponseObject())\n\t}\n\n}", "func TestServices(t *testing.T) { check.TestingT(t) }", "func Test(c *gin.Context) {\n\tvar serviceTestDTO model.ServiceTest\n\n\terr := c.BindJSON(&serviceTestDTO)\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\trefConf := genericServiceImpl.NewRefConf(\"dubbo-admin\", serviceTestDTO.Service, \"dubbo\")\n\ttime.Sleep(2 * time.Second)\n\tresp, err := refConf.\n\t\tGetRPCService().(*generic.GenericService).\n\t\tInvoke(\n\t\t\tc,\n\t\t\tserviceTestDTO.Method,\n\t\t\tserviceTestDTO.ParameterTypes,\n\t\t\t[]hessian.Object{\"A003\"}, // fixme\n\t\t)\n\trefConf.GetInvoker().Destroy()\n\tif err != nil {\n\t\tlogger.Error(\"Error do generic invoke for service test\", err)\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, resp)\n}", "func (us *userService) Test(ctx *atreugo.RequestCtx) error {\n\treturn ctx.TextResponse(\"Hello World!\")\n}", "func TestServiceStart(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, nil)\n}", "func TestServiceTestSuite(t *testing.T) {\n\tsuite.Run(t, new(ServiceTestSuite))\n}", "func TestServiceTestSuite(t *testing.T) {\n\tsuite.Run(t, new(ServiceTestSuite))\n}", "func generateServiceDocumentation(svc *broker.ServiceDefinition) string {\n\tcatalog, err := svc.CatalogEntry()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error getting catalog entry for service %s, %v\", svc.Name, err)\n\t}\n\n\tvars := map[string]interface{}{\n\t\t\"catalog\": catalog,\n\t\t\"metadata\": catalog.Metadata,\n\t\t\"bindIn\": svc.BindInputVariables,\n\t\t\"bindOut\": svc.BindOutputVariables,\n\t\t\"provisionInputVars\": svc.ProvisionInputVariables,\n\t\t\"examples\": svc.Examples,\n\t}\n\n\tfuncMap := template.FuncMap{\n\t\t\"code\": mdCode,\n\t\t\"join\": strings.Join,\n\t\t\"varNotes\": varNotes,\n\t\t\"jsonCodeBlock\": jsonCodeBlock,\n\t\t\"exampleCommands\": func(example broker.ServiceExample) string {\n\t\t\tplanName := \"unknown-plan\"\n\t\t\tfor _, plan := range catalog.Plans {\n\t\t\t\tif plan.ID == example.PlanId {\n\t\t\t\t\tplanName = plan.Name\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tparams, err := json.Marshal(example.ProvisionParams)\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tprovision := fmt.Sprintf(\"$ cf create-service %s %s my-%s-example -c `%s`\", catalog.Name, planName, catalog.Name, params)\n\n\t\t\tparams, err = json.Marshal(example.BindParams)\n\t\t\tif err != nil {\n\t\t\t\treturn err.Error()\n\t\t\t}\n\t\t\tbind := fmt.Sprintf(\"$ cf bind-service my-app my-%s-example -c `%s`\", catalog.Name, params)\n\t\t\treturn provision + \"\\n\" + bind\n\t\t},\n\t}\n\n\ttemplateText := `\n--------------------------------------------------------------------------------\n\n# <a name=\"{{ .catalog.Name }}\"></a> ![]({{ .metadata.ImageUrl }}) {{ .metadata.DisplayName }}\n{{ .metadata.LongDescription }}\n\n * [Documentation]({{.metadata.DocumentationUrl }})\n * [Support]({{ .metadata.SupportUrl }})\n * Catalog Metadata ID: {{code .catalog.ID}}\n * Tags: {{ join .catalog.Tags \", \" }}\n * Service Name: {{ code .catalog.Name }}\n\n## Provisioning\n\n**Request Parameters**\n\n{{ if eq (len .provisionInputVars) 0 }}_No parameters supported._{{ end }}\n{{ range $i, $var := .provisionInputVars }} * {{ varNotes $var }}\n{{ end }}\n\n## Binding\n\n**Request Parameters**\n\n{{ if eq (len .bindIn) 0 }}_No parameters supported._{{ end }}\n{{ range $i, $var := .bindIn }} * {{ varNotes $var }}\n{{ end }}\n**Response Parameters**\n\n{{ range $i, $var := .bindOut }} * {{ varNotes $var }}\n{{ end }}\n## Plans\n\nThe following plans are built-in to the GCP Service Broker and may be overriden\nor disabled by the broker administrator.\n\n{{ if eq (len .catalog.Plans) 0 }}_No plans available_{{ end }}\n{{ range $i, $plan := .catalog.Plans }} * **{{ $plan.Name }}**: {{ $plan.Description }} Plan ID: {{code $plan.ID}}.\n{{ end }}\n\n## Examples\n\n{{ if eq (len .examples) 0 }}_No examples._{{ end }}\n\n{{ range $i, $example := .examples}}\n### {{ $example.Name }}\n\n\n{{ $example.Description }}\nUses plan: {{ code $example.PlanId }}.\n\n**Provision**\n\n{{ jsonCodeBlock $example.ProvisionParams }}\n\n**Bind**\n\n{{ jsonCodeBlock $example.BindParams }}\n\n**Cloud Foundry Example**\n\n<pre>\n{{exampleCommands $example}}\n</pre>\n\n{{ end }}\n`\n\n\ttmpl, err := template.New(\"titleTest\").Funcs(funcMap).Parse(templateText)\n\tif err != nil {\n\t\tlog.Fatalf(\"parsing: %s\", err)\n\t}\n\n\t// Run the template to verify the output.\n\tvar buf bytes.Buffer\n\terr = tmpl.Execute(&buf, vars)\n\tif err != nil {\n\t\tlog.Fatalf(\"execution: %s\", err)\n\t}\n\n\treturn buf.String()\n\n}", "func TestConsoleService(t *testing.T) {\n\tc := new(ConsoleService)\n\n\tc.Started()\n\tc.Stopped()\n\tc.Error(\"Error msg\")\n\tc.Init()\n\tc.Close()\n}", "func TestServiceSuite(t *testing.T) {\n\tsuite.Run(t, new(TestSuite))\n}", "func main() {\n\tservice := service.Service{}\n\tservice.Start(\"\")\n}", "func (suite *PouchAPIHelpSuite) TestExample(c *check.C) {\n}", "func TestCallToPublicService(t *testing.T) {\n\tt.Parallel()\n\n\tclients := Setup(t)\n\n\tt.Log(\"Creating a Service for the helloworld test app.\")\n\tnames := test.ResourceNames{\n\t\tService: test.ObjectNameForTest(t),\n\t\tImage: test.HelloWorld,\n\t}\n\n\ttest.EnsureTearDown(t, clients, &names)\n\n\tresources, err := v1test.CreateServiceReady(t, clients, &names)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create initial Service: %v: %v\", names.Service, err)\n\t}\n\n\tif resources.Route.Status.URL.Host == \"\" {\n\t\tt.Fatalf(\"Route is missing .Status.URL: %#v\", resources.Route.Status)\n\t}\n\tif resources.Route.Status.Address == nil {\n\t\tt.Fatalf(\"Route is missing .Status.Address: %#v\", resources.Route.Status)\n\t}\n\n\tgatewayTestCases := []struct {\n\t\tname string\n\t\turl *url.URL\n\t\taccessibleExternally bool\n\t}{\n\t\t{\"local_address\", resources.Route.Status.Address.URL.URL(), false},\n\t\t{\"external_address\", resources.Route.Status.URL.URL(), true},\n\t}\n\n\tfor _, tc := range gatewayTestCases {\n\t\ttc := tc\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tif !test.ServingFlags.DisableLogStream {\n\t\t\t\tcancel := logstream.Start(t)\n\t\t\t\tdefer cancel()\n\t\t\t}\n\t\t\ttestProxyToHelloworld(t, clients, tc.url, false /*inject*/, tc.accessibleExternally)\n\t\t})\n\t}\n}", "func setupToDoListServiceTest(t *testing.T) func() {\n\tctrl := gomock.NewController(t)\n\tmockToDoListRepository = ports.NewMockToDoListRepository(ctrl)\n\tdefaultToDoListService = NewToDoListService(mockToDoListRepository)\n\treturn func() {\n\t\tdefaultToDoListService = nil\n\t\tdefer ctrl.Finish()\n\t}\n}", "func TestTaskService(t *testing.T, fn BackendComponentFactory) {\n\tsys, cancel := fn(t)\n\tdefer cancel()\n\tif sys.TaskServiceFunc == nil {\n\t\tsys.ts = task.PlatformAdapter(sys.S, sys.LR, sys.Sch)\n\t} else {\n\t\tsys.ts = sys.TaskServiceFunc()\n\t}\n\n\tt.Run(\"TaskService\", func(t *testing.T) {\n\t\t// We're running the subtests in parallel, but if we don't use this wrapper,\n\t\t// the defer cancel() call above would return before the parallel subtests completed.\n\t\t//\n\t\t// Running the subtests in parallel might make them slightly faster,\n\t\t// but more importantly, it should exercise concurrency to catch data races.\n\n\t\tt.Run(\"Task CRUD\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\ttestTaskCRUD(t, sys)\n\t\t})\n\n\t\tt.Run(\"Task Runs\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\ttestTaskRuns(t, sys)\n\t\t})\n\n\t\tt.Run(\"Task Concurrency\", func(t *testing.T) {\n\t\t\tif testing.Short() {\n\t\t\t\tt.Skip(\"skipping in short mode\")\n\t\t\t}\n\t\t\tt.Parallel()\n\t\t\ttestTaskConcurrency(t, sys)\n\t\t})\n\n\t\tt.Run(\"Task Meta Updates\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\ttestMetaUpdate(t, sys)\n\t\t})\n\t})\n}", "func Test_IndexHandler(t *testing.T) {\n\tvar (\n\t\tversionMsg Service\n\t\tresp *http.Response\n\t)\n\n\tsvc := NewService()\n\n\tts := httptest.NewServer(svc.NewRouter(\"*\"))\n\tdefer ts.Close()\n\n\treq, _ := http.NewRequest(\"GET\", ts.URL+\"/\", nil)\n\n\toutputLog := helpers.CaptureOutput(func() {\n\t\tresp, _ = http.DefaultClient.Do(req)\n\t})\n\n\tif got, want := resp.StatusCode, 200; got != want {\n\t\tt.Fatalf(\"Invalid status code, got %d but want %d\", got, want)\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Got an error when reading body: %s\", err.Error())\n\t}\n\n\terr = json.Unmarshal(data, &versionMsg)\n\tif err != nil {\n\t\tt.Fatalf(\"Got an error when parsing json: %s\", err.Error())\n\t}\n\tif got, want := versionMsg.Version, svc.Version; got != want {\n\t\tt.Fatalf(\"Wrong version return, got %s but want %s\", got, want)\n\t}\n\tif got, want := versionMsg.Name, svc.Name; got != want {\n\t\tt.Fatalf(\"Wrong version return, got %s but want %s\", got, want)\n\t}\n\n\tmatched, err := regexp.MatchString(`uri=/ `, outputLog)\n\tif matched != true || err != nil {\n\t\tt.Fatalf(\"request is not logged :\\n%s\", outputLog)\n\t}\n}", "func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) {\n\tpath := fmt.Sprintf(\"6,%d\", index) // 6 means service.\n\n\torigServName := service.GetName()\n\tfullServName := origServName\n\tif pkg := file.GetPackage(); pkg != \"\" {\n\t\tfullServName = pkg + \".\" + fullServName\n\t}\n\tservName := generator.CamelCase(origServName)\n\tdeprecated := service.GetOptions().GetDeprecated()\n\n\tg.P()\n\tg.P(fmt.Sprintf(`// %sClient is the client API for %s service.\n//\n// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.`, servName, servName))\n\n\t// Client interface.\n\tif deprecated {\n\t\tg.P(\"//\")\n\t\tg.P(deprecationComment)\n\t}\n\tg.P(\"type \", servName, \"Client interface {\")\n\tfor i, method := range service.Method {\n\t\tg.gen.PrintComments(fmt.Sprintf(\"%s,2,%d\", path, i)) // 2 means method in a service.\n\t\tg.P(g.generateClientSignature(servName, method))\n\t}\n\tg.P(\"}\")\n\tg.P()\n\n\t// Client structure.\n\tg.P(\"type \", unexport(servName), \"Client struct {\")\n\tg.P(\"cc *\", grpcPkg, \".ClientConn\")\n\tg.P(\"}\")\n\tg.P()\n\n\t// NewClient factory.\n\tif deprecated {\n\t\tg.P(deprecationComment)\n\t}\n\tg.P(\"func New\", servName, \"Client (cc *\", grpcPkg, \".ClientConn) \", servName, \"Client {\")\n\tg.P(\"return &\", unexport(servName), \"Client{cc}\")\n\tg.P(\"}\")\n\tg.P()\n\n\tvar methodIndex, streamIndex int\n\tserviceDescVar := \"_\" + servName + \"_serviceDesc\"\n\t// Client method implementations.\n\tfor _, method := range service.Method {\n\t\tvar descExpr string\n\t\tif !method.GetServerStreaming() && !method.GetClientStreaming() {\n\t\t\t// Unary RPC method\n\t\t\tdescExpr = fmt.Sprintf(\"&%s.Methods[%d]\", serviceDescVar, methodIndex)\n\t\t\tmethodIndex++\n\t\t} else {\n\t\t\t// Streaming RPC method\n\t\t\tdescExpr = fmt.Sprintf(\"&%s.Streams[%d]\", serviceDescVar, streamIndex)\n\t\t\tstreamIndex++\n\t\t}\n\t\tg.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr)\n\t}\n\n\t// Server interface.\n\tserverType := servName + \"Server\"\n\tg.P(\"// \", serverType, \" is the server API for \", servName, \" service.\")\n\tif deprecated {\n\t\tg.P(\"//\")\n\t\tg.P(deprecationComment)\n\t}\n\tg.P(\"type \", serverType, \" interface {\")\n\tfor i, method := range service.Method {\n\t\tg.gen.PrintComments(fmt.Sprintf(\"%s,2,%d\", path, i)) // 2 means method in a service.\n\t\tg.P(g.generateServerSignature(servName, method))\n\t}\n\tg.P(\"}\")\n\tg.P()\n\n\t// Server Unimplemented struct for forward compatability.\n\tif deprecated {\n\t\tg.P(deprecationComment)\n\t}\n\tg.generateUnimplementedServer(servName, service)\n\n\t// Server registration.\n\tif deprecated {\n\t\tg.P(deprecationComment)\n\t}\n\tg.P(\"func Register\", servName, \"Server(s *\", grpcPkg, \".Server, srv \", serverType, \") {\")\n\tg.P(\"s.RegisterService(&\", serviceDescVar, `, srv)`)\n\tg.P(\"}\")\n\tg.P()\n\n\t// Server handler implementations.\n\tvar handlerNames []string\n\tfor _, method := range service.Method {\n\t\thname := g.generateServerMethod(servName, fullServName, method)\n\t\thandlerNames = append(handlerNames, hname)\n\t}\n\n\t// Service descriptor.\n\tg.P(\"var \", serviceDescVar, \" = \", grpcPkg, \".ServiceDesc {\")\n\tg.P(\"ServiceName: \", strconv.Quote(fullServName), \",\")\n\tg.P(\"HandlerType: (*\", serverType, \")(nil),\")\n\tg.P(\"Methods: []\", grpcPkg, \".MethodDesc{\")\n\tfor i, method := range service.Method {\n\t\tif method.GetServerStreaming() || method.GetClientStreaming() {\n\t\t\tcontinue\n\t\t}\n\t\tg.P(\"{\")\n\t\tg.P(\"MethodName: \", strconv.Quote(method.GetName()), \",\")\n\t\tg.P(\"Handler: \", handlerNames[i], \",\")\n\t\tg.P(\"},\")\n\t}\n\tg.P(\"},\")\n\tg.P(\"Streams: []\", grpcPkg, \".StreamDesc{\")\n\tfor i, method := range service.Method {\n\t\tif !method.GetServerStreaming() && !method.GetClientStreaming() {\n\t\t\tcontinue\n\t\t}\n\t\tg.P(\"{\")\n\t\tg.P(\"StreamName: \", strconv.Quote(method.GetName()), \",\")\n\t\tg.P(\"Handler: \", handlerNames[i], \",\")\n\t\tif method.GetServerStreaming() {\n\t\t\tg.P(\"ServerStreams: true,\")\n\t\t}\n\t\tif method.GetClientStreaming() {\n\t\t\tg.P(\"ClientStreams: true,\")\n\t\t}\n\t\tg.P(\"},\")\n\t}\n\tg.P(\"},\")\n\tg.P(\"Metadata: \\\"\", file.GetName(), \"\\\",\")\n\tg.P(\"}\")\n\tg.P()\n}", "func TestCreateService(t *testing.T) {\n\tsvc := &v12.Service{\n\t\tTypeMeta: v1.TypeMeta{},\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: ResourceName,\n\t\t\tNamespace: Namespace,\n\t\t},\n\t\tSpec: v12.ServiceSpec{\n\t\t\tPorts: []v12.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tPort: int32(ServicePorts),\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: map[string]string{\n\t\t\t\t\"app\": ResourceName,\n\t\t\t},\n\t\t\tType: \"ClusterIP\",\n\t\t},\n\t}\n\t_, err := kube_cli.CreateService(Namespace, svc, v1.CreateOptions{})\n\tif err != nil{\n\t\tt.Errorf(\"failed to create service - %+v\", err)\n\t}\n\t// waiting till the service gets created\n\twa, err := kube_cli.WatchService(Namespace, v1.ListOptions{})\n\tif err != nil{\n\t\tt.Errorf(\"failed to setup watcher for service %+v\", err)\n\t}\n\tfor i := range wa.ResultChan(){\n\t\tsvc := i.Object.(*v12.Service)\n\t\tgot := svc.Spec.ClusterIP\n\t\tif got != \"\"{\n\t\t\twa.Stop()\n\t\t}\n\t}\n}", "func (p *printer) Service(service *descriptor.ServiceDescriptorProto, methodIndex int) {\n\tp.MaybeLeadingComments(service)\n\tdefer p.open(\"service %s\", service.GetName())()\n\n\tif methodIndex < 0 {\n\t\tfor i := range service.Method {\n\t\t\tp.Method(service.Method[i])\n\t\t}\n\t} else {\n\t\tp.Method(service.Method[methodIndex])\n\t\tif len(service.Method) > 1 {\n\t\t\tp.Printf(\"// other methods were omitted.\\n\")\n\t\t}\n\t}\n}", "func RunTodoListService(addr string) {\n\ttodolistService := NewTodoListService(addr)\n\ttodolistService.Run()\n}", "func TestServiceCreateListAndDelete(t *testing.T) {\n\tt.Parallel()\n\tclients := test.Setup(t)\n\n\tnames := test.ResourceNames{\n\t\tService: test.ObjectNameForTest(t),\n\t\tImage: test.PizzaPlanet1,\n\t}\n\n\t// Clean up on test failure or interrupt\n\ttest.EnsureTearDown(t, clients, &names)\n\n\t// Setup initial Service\n\tif _, err := v1test.CreateServiceReady(t, clients, &names); err != nil {\n\t\tt.Fatalf(\"Failed to create initial Service %v: %v\", names.Service, err)\n\t}\n\n\t// Validate State after Creation\n\tif err := validateControlPlane(t, clients, names, \"1\"); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tlist, err := v1test.GetServices(clients)\n\tif err != nil {\n\t\tt.Fatal(\"Listing Services failed\")\n\t}\n\tif len(list.Items) < 1 {\n\t\tt.Fatal(\"Listing should return at least one Service\")\n\t}\n\tvar serviceFound = false\n\tfor _, service := range list.Items {\n\t\tt.Logf(\"Service Returned: %s\", service.Name)\n\t\tif service.Name == names.Service {\n\t\t\tserviceFound = true\n\t\t}\n\t}\n\tif !serviceFound {\n\t\tt.Fatal(\"The Service that was previously created was not found by listing all Services.\")\n\t}\n\tt.Logf(\"Deleting Service: %s\", names.Service)\n\tif err := v1test.DeleteService(clients, names.Service); err != nil {\n\t\tt.Fatal(\"Error deleting Service\")\n\t}\n\n}", "func TestGetSlidesApiInfo(t *testing.T) {\n e := initializeTest(\"GetSlidesApiInfo\", \"\", \"\")\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n c := getTestApiClient()\n r, _, e := c.DocumentApi.GetSlidesApiInfo()\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n if r.Code != 200 && r.Code != 201 {\n t.Errorf(\"Wrong response code: %d.\", r.Code)\n return\n }\n}", "func NewExampleService(config *config.Config, logger *logrus.Logger) *ExampleService {\n\n\tservice := &ExampleService{\n\t\tlogger: logger,\n\t\tconfig: config,\n\t}\n\n\treturn service\n}", "func New(svc service.Service, buf *bytes.Buffer) ServiceTest {\n\tvar svctest ServiceTest\n\t{\n\t\tsvctest = NewBasicServiceTest(svc)\n\t\tsvctest = LoggingMiddlewareTest(buf)(svctest)\n\t}\n\treturn svctest\n}", "func TestGetSlidesDocument(t *testing.T) {\n request := createGetSlidesDocumentRequest()\n e := initializeTest(\"GetSlidesDocument\", \"\", \"\")\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n c := getTestApiClient()\n r, _, e := c.DocumentApi.GetSlidesDocument(request)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n if r.Code != 200 && r.Code != 201 {\n t.Errorf(\"Wrong response code: %d.\", r.Code)\n return\n }\n}", "func createPhpService() {\n\tserviceData := &apiv1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"test-server\",\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"name\": \"test-server\",\n\t\t\t},\n\t\t},\n\t\tSpec: apiv1.ServiceSpec{\n\t\t\tPorts: []apiv1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tPort: 9000,\n\t\t\t\t\tProtocol: apiv1.ProtocolTCP,\n\t\t\t\t\tTargetPort: intstr.IntOrString{\n\t\t\t\t\t\tIntVal: 9000,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: map[string]string{\n\t\t\t\t\"app\": \"test-server\",\n\t\t\t},\n\t\t},\n\t}\n\tcreateService(serviceData)\n}", "func TestController(t *testing.T) {\n\tfakeKubeClient, catalogClient, fakeBrokerCatalog, _, _, testController, _, stopCh := newTestController(t)\n\tdefer close(stopCh)\n\n\tt.Log(fakeKubeClient, catalogClient, fakeBrokerCatalog, testController, stopCh)\n\n\tfakeBrokerCatalog.RetCatalog = &brokerapi.Catalog{\n\t\tServices: []*brokerapi.Service{\n\t\t\t{\n\t\t\t\tName: \"test-service\",\n\t\t\t\tID: \"12345\",\n\t\t\t\tDescription: \"a test service\",\n\t\t\t\tPlans: []brokerapi.ServicePlan{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"test-plan\",\n\t\t\t\t\t\tFree: true,\n\t\t\t\t\t\tID: \"34567\",\n\t\t\t\t\t\tDescription: \"a test plan\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tname := \"test-name\"\n\tbroker := &v1alpha1.Broker{\n\t\tObjectMeta: v1.ObjectMeta{Name: name},\n\t\tSpec: v1alpha1.BrokerSpec{\n\t\t\tURL: \"https://example.com\",\n\t\t},\n\t}\n\tbrokerClient := catalogClient.Servicecatalog().Brokers()\n\n\tbrokerServer, err := brokerClient.Create(broker)\n\tif nil != err {\n\t\tt.Fatalf(\"error creating the broker %q (%q)\", broker, err)\n\t}\n\n\tif err := wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout,\n\t\tfunc() (bool, error) {\n\t\t\tbrokerServer, err = brokerClient.Get(name)\n\t\t\tif nil != err {\n\t\t\t\treturn false,\n\t\t\t\t\tfmt.Errorf(\"error getting broker %s (%s)\",\n\t\t\t\t\t\tname, err)\n\t\t\t} else if len(brokerServer.Status.Conditions) > 0 {\n\t\t\t\tt.Log(brokerServer)\n\t\t\t\treturn true, nil\n\t\t\t} else {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t},\n\t); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// check\n\tserviceClassClient := catalogClient.Servicecatalog().ServiceClasses()\n\t_, err = serviceClassClient.Get(\"test-service\")\n\tif nil != err {\n\t\tt.Fatal(\"could not find the test service\", err)\n\t}\n\n\t// cleanup our broker\n\terr = brokerClient.Delete(name, &v1.DeleteOptions{})\n\tif nil != err {\n\t\tt.Fatalf(\"broker should be deleted (%s)\", err)\n\t}\n\n\t// uncomment if/when deleting a broker deletes the associated service\n\t// if class, err := serviceClassClient.Get(\"test-service\"); nil == err {\n\t// \tt.Fatal(\"found the test service that should have been deleted\", err, class)\n\t// }\n}", "func main() {\n\n\t// Prepare some dependencies:\n\tlogger := logrus.New()\n\tstorer := new(storageMocks.FakeStorer)\n\n\t// Program the storer mock to respond with _something_:\n\tstorer.CreateCruftReturns(\"12345\", nil)\n\tstorer.ReadCruftReturns(nil, storage.ErrNotFound)\n\n\t// Inject the dependencies into a new Handler:\n\thandler := serviceHandler.New(logger, storer)\n\n\t// Make a new GRPC Server (usually I would have this in a common / shared library, and pre-load it with middleware built from our logger / instrumenter / tracer interfaces):\n\tgrpcServer := grpc.NewServer()\n\n\t// Register our Handler and GRPC Server with our generated service-proto code:\n\tserviceProto.RegisterExampleServer(grpcServer, handler)\n\n\t// Listen for connections:\n\tlistener, err := net.Listen(\"tcp\", listenAddress)\n\tif err != nil {\n\t\tlogger.Fatalf(\"Unable to start GRPC server on TCP address %s\", listenAddress)\n\t}\n\n\t// Start the GRPC server:\n\tif err := grpcServer.Serve(listener); err != nil {\n\t\tlogger.Fatalf(\"Unable to start the GRPC server: %v\", err)\n\t}\n}", "func SetUpService(webServiceEndpoint string,healthCheckEndpoint string,subscriptionServiceUrl string,googleSubscriptionsUrl string,clientId string, clientSecret string, callbackUrl string, issuer string, sessionKey string, cloudCommerceProcurementUrl string, partnerId string, finishUrl string, finishUrlTitle string, testMode string) error {\n\thandler := GetSubscriptionFrontendHandler(subscriptionServiceUrl,googleSubscriptionsUrl,clientId, clientSecret, callbackUrl, issuer, sessionKey, cloudCommerceProcurementUrl, partnerId, finishUrl, finishUrlTitle)\n\n\thealthCheck := mux.NewRouter()\n\thealthCheck.Methods(http.MethodGet).Path(\"/healthz\").HandlerFunc(handler.Healthz)\n\tgo http.ListenAndServe(\":\"+healthCheckEndpoint, healthCheck)\n\n\twebService := mux.NewRouter()\n\tif testModeBool,err := strconv.ParseBool(testMode); err==nil && testModeBool {\n\t\twebService.Methods(http.MethodGet).Path(\"/resetsaas\").HandlerFunc(handler.ResetSaas)\n\t\twebService.Methods(http.MethodGet).Path(\"/signupsaastest\").HandlerFunc(handler.SignupSaasTest)\n\t}\n\twebService.Methods(http.MethodGet).Path(\"/signupprod/{accountId}\").HandlerFunc(handler.SignupProd)\n\twebService.Methods(http.MethodPost).Path(\"/signupsaas\").HandlerFunc(handler.SignupSaas)\n\twebService.Methods(http.MethodGet).Path(\"/login\").HandlerFunc(handler.Auth0Login)\n\twebService.Methods(http.MethodGet).Path(\"/callback\").HandlerFunc(handler.Auth0Callback)\n\twebService.Methods(http.MethodPost).Path(\"/finishSaas\").HandlerFunc(handler.FinishSaas)\n\twebService.Methods(http.MethodPost).Path(\"/finishProd\").HandlerFunc(handler.FinishProd)\n\n\twebService.Methods(http.MethodGet).Path(\"/healthz\").HandlerFunc(handler.Healthz)\n\n\twebService.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.Redirect(w, r, \"https://www.cloudbees.com\", http.StatusFound)\n\t})\n\n\treturn http.ListenAndServe(\":\"+webServiceEndpoint, webService)\n}", "func TestServiceInfo(t *testing.T) {\n\t// Set our expected environment variables.\n\tfor key, value := range exampleEnvVars {\n\t\tos.Setenv(key, value)\n\t}\n\n\t// Get the service info.\n\tinfo := GetMicroserviceInfo()\n\n\t// Check each expected env var.\n\tif info.ServiceName != os.Getenv(\"SERVICE_NAME\") {\n\t\tt.Errorf(\"Expected %v, got %v\", os.Getenv(\"SERVICE_NAME\"), info.ServiceName)\n\t}\n\n\tif info.ServiceType != os.Getenv(\"SERVICE_TYPE\") {\n\t\tt.Errorf(\"Expected %v, got %v\", os.Getenv(\"SERVICE_TYPE\"), info.ServiceType)\n\t}\n\n\tif info.ServiceScope != os.Getenv(\"SERVICE_SCOPE\") {\n\t\tt.Errorf(\"Expected %v, got %v\", os.Getenv(\"SERVICE_SCOPE\"), info.ServiceScope)\n\t}\n\n\tif info.ServiceVersion != os.Getenv(\"SERVICE_VERSION\") {\n\t\tt.Errorf(\"Expected %v, got %v\", os.Getenv(\"SERVICE_VERSION\"), info.ServiceVersion)\n\t}\n}", "func StartTestService(t *testing.T) {\n\tdb, err := sqorc.Open(\"sqlite3\", \":memory:\")\n\trequire.NoError(t, err)\n\tstartService(t, db)\n}", "func (suite *ServiceTestSuite) SetupTest() {\n\tsuite.mockCtrl = gomock.NewController(suite.T())\n\tsuite.resmgrClient = resource_mocks.NewMockResourceManagerServiceYARPCClient(suite.mockCtrl)\n\tsuite.hostMgrClient = host_mocks.NewMockInternalHostServiceYARPCClient(suite.mockCtrl)\n\tsuite.metrics = metrics.NewMetrics(tally.NoopScope)\n\tsuite.hostService = NewService(suite.hostMgrClient, suite.resmgrClient, suite.metrics)\n}", "func TestPostSlidesDocument(t *testing.T) {\n request := createPostSlidesDocumentRequest()\n e := initializeTest(\"PostSlidesDocument\", \"\", \"\")\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n c := getTestApiClient()\n r, _, e := c.DocumentApi.PostSlidesDocument(request)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n if r.Code != 200 && r.Code != 201 {\n t.Errorf(\"Wrong response code: %d.\", r.Code)\n return\n }\n}", "func TestEventService(t *testing.T) {\n\tvar result EventService\n\terr := json.NewDecoder(strings.NewReader(eventServiceBody)).Decode(&result)\n\n\tif err != nil {\n\t\tt.Errorf(\"Error decoding JSON: %s\", err)\n\t}\n\n\tif result.ID != \"EventService\" {\n\t\tt.Errorf(\"Received invalid ID: %s\", result.ID)\n\t}\n\n\tif result.Name != \"Event Service\" {\n\t\tt.Errorf(\"Received invalid name: %s\", result.Name)\n\t}\n\n\tif result.DeliveryRetryAttempts != 4 {\n\t\tt.Errorf(\"Expected 4 retry attempts, got: %d\", result.DeliveryRetryAttempts)\n\t}\n\n\tif result.DeliveryRetryIntervalSeconds != 30 {\n\t\tt.Errorf(\"Expected 30 second retry interval, got: %d\", result.DeliveryRetryIntervalSeconds)\n\t}\n\n\tif result.SSEFilterPropertiesSupported.MetricReportDefinition {\n\t\tt.Error(\"MetricReportDefinition filter should be false\")\n\t}\n\n\tif !result.SSEFilterPropertiesSupported.MessageID {\n\t\tt.Error(\"Message ID filter should be true\")\n\t}\n\n\tif result.submitTestEventTarget != \"/redfish/v1/EventService/Actions/EventService.SubmitTestEvent\" {\n\t\tt.Errorf(\"Invalid SubmitTestEvent target: %s\", result.submitTestEventTarget)\n\t}\n\n\tfor _, et := range result.EventTypesForSubscription {\n\t\tif !et.IsValidEventType() {\n\t\t\tt.Errorf(\"invalid event type: %s\", et)\n\t\t}\n\t}\n\n}", "func writeServiceDocFile(g *generateInfo) error {\n\treturn writeGoFile(filepath.Join(g.PackageDir, \"doc.go\"),\n\t\tcodeLayout,\n\t\tstrings.TrimSpace(g.API.ServicePackageDoc()),\n\t\tg.API.PackageName(),\n\t\t\"\",\n\t)\n}", "func TestServiceServiceServer(server TestServiceServer) catalog.ServiceServer {\n\treturn &testServiceServiceServer{\n\t\tserver: server,\n\t}\n}", "func TestServiceWithoutLogger(t *testing.T) {\n\ts := res.NewService(\"test\")\n\ts.SetLogger(nil)\n\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\tsession := restest.NewSession(t, s, restest.WithKeepLogger)\n\tdefer session.Close()\n}", "func (t *ServiceTestFixture) BuildServiceSpec() *api.Service {\n\tservice := &api.Service{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: t.ServiceName,\n\t\t\tNamespace: t.Namespace,\n\t\t},\n\t\tSpec: api.ServiceSpec{\n\t\t\tSelector: t.Labels,\n\t\t\tPorts: []api.ServicePort{{\n\t\t\t\tPort: 80,\n\t\t\t\tTargetPort: intstr.FromInt(80),\n\t\t\t}},\n\t\t},\n\t}\n\treturn service\n}", "func (ft *FacadeUnitTest) Test_GetEvaluatedServiceSimple(c *C) {\n\tserviceID := \"0\"\n\tserviceName := \"service0\"\n\tsvc := service.Service{\n\t\tID: serviceID,\n\t\tName: serviceName,\n\t\tActions: map[string]string{\"name\": \"{{.Name}}\", \"instanceID\": \"{{.InstanceID}}\"},\n\t}\n\tft.serviceStore.On(\"GetServiceDetails\", ft.ctx, serviceID).Return(&service.ServiceDetails{ID: serviceID}, nil)\n\tft.serviceStore.On(\"Get\", ft.ctx, serviceID).Return(&svc, nil)\n\tft.configStore.On(\"GetConfigFiles\", ft.ctx, serviceID, \"/\"+serviceID).Return([]*serviceconfigfile.SvcConfigFile{}, nil)\n\n\tinstanceID := 99\n\tresult, err := ft.Facade.GetEvaluatedService(ft.ctx, serviceID, instanceID)\n\n\tc.Assert(result, Not(IsNil))\n\tc.Assert(err, IsNil)\n\n\tc.Assert(result.Actions[\"name\"], Equals, serviceName)\n\tc.Assert(result.Actions[\"instanceID\"], Equals, fmt.Sprintf(\"%d\", instanceID))\n}", "func Service() {\n\ts, err := NewServerFromOptions()\n\tif err != nil {\n\t\tlog.Fatal(err, \"Error starting server\")\n\t}\n\tgo s.Serve()\n}", "func newServiceNoPatchTest(name string, options ...controllertesting.ServiceOption) TableRow {\n\ttest := newDispatcherBasicTest(\"Existing Dispatcher Service, \" + name + \", No Patch\")\n\ttest.Objects = append(test.Objects,\n\t\tcontrollertesting.NewKafkaChannelDispatcherService(options...),\n\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment())\n\treturn test\n}", "func TestClient_CorporateClient(_ *testing.T) {\n\tvar _ CorporateAPI = CorporateClient{}\n}", "func (m *MockapprunnerDescriber) Service() (*apprunner.Service, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Service\")\n\tret0, _ := ret[0].(*apprunner.Service)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func LabelService(\n\tinit func(LabelFields, *testing.T) (platform.LabelService, string, func()),\n\tt *testing.T,\n) {\n\ttests := []struct {\n\t\tname string\n\t\tfn labelServiceF\n\t}{\n\t\t{\n\t\t\tname: \"CreateLabel\",\n\t\t\tfn: CreateLabel,\n\t\t},\n\t\t{\n\t\t\tname: \"FindLabels\",\n\t\t\tfn: FindLabels,\n\t\t},\n\t\t{\n\t\t\tname: \"UpdateLabel\",\n\t\t\tfn: UpdateLabel,\n\t\t},\n\t\t{\n\t\t\tname: \"DeleteLabel\",\n\t\t\tfn: DeleteLabel,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\ttt.fn(init, t)\n\t\t})\n\t}\n}", "func TestDogService(t *testing.T) {\n\n\t// skip test if docker is not available, very important that go test always work!\n\ttestcontainers.SkipIfProviderIsNotHealthy(t)\n\tctx := context.Background()\n\n\t// set up out testing container\n\tfsContainer, err := testx.CreateFirestoreContainer(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"createFirestoreContainer() err = %v; want nil\", err)\n\t}\n\n\t// run a clean up method to terminate the container when this test and all sub tests are completed\n\tt.Cleanup(func() {\n\t\terr := fsContainer.Terminate(ctx)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"fsContainer.Terminate() err = %v; want nil\", err)\n\t\t}\n\t})\n\n\t// cool way to dynamically allocate an endpoint for us to use, we could have multiple integration test running in parallel that would never have a port collision\n\tendpoint, err := fsContainer.Endpoint(ctx, \"\")\n\tif err != nil {\n\t\tt.Errorf(\"fsContainer.Endpoint() err = %v; want nil\", err)\n\t}\n\n\t// home made firestore testing client that has a util method for clearing all data\n\tfsClient := testx.NewFirestoreTestingClient(ctx, t, endpoint)\n\n\tservice := gotoproduction.NewDogService(fsClient.Client, logx.NewTesterLogger(t))\n\tt.Run(\"Create\", testDogService_CreateDog(service, fsClient))\n\tt.Run(\"Find By Type\", testDogService_FindDogByType(service, fsClient))\n\tt.Run(\"GetDog By ID\", testDogService_GetDogByID(service, fsClient))\n}", "func TestServiceCreateAndUpdate(t *testing.T) {\n\tt.Parallel()\n\tclients := test.Setup(t)\n\n\tnames := test.ResourceNames{\n\t\tService: test.ObjectNameForTest(t),\n\t\tImage: test.PizzaPlanet1,\n\t}\n\n\t// Clean up on test failure or interrupt\n\ttest.EnsureTearDown(t, clients, &names)\n\n\t// Setup initial Service\n\tobjects, err := v1test.CreateServiceReady(t, clients, &names)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create initial Service %v: %v\", names.Service, err)\n\t}\n\n\t// Validate State after Creation\n\n\tif err = validateControlPlane(t, clients, names, \"1\"); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif err = validateDataPlane(t, clients, names, test.PizzaPlanetText1); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif err = validateLabelsPropagation(t, *objects, names); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif err := validateAnnotations(objects); err != nil {\n\t\tt.Error(\"Service annotations are incorrect:\", err)\n\t}\n\n\t// We start a background prober to test if Route is always healthy even during Route update.\n\tprober := test.RunRouteProber(\n\t\tt.Logf,\n\t\tclients, names.URL,\n\t\ttest.AddRootCAtoTransport(context.Background(), t.Logf, clients, test.ServingFlags.HTTPS),\n\t\tspoof.WithHeader(test.ServingFlags.RequestHeader()))\n\tdefer test.AssertProberDefault(t, prober)\n\n\t// Update Container Image\n\tt.Log(\"Updating the Service to use a different image.\")\n\tnames.Image = test.PizzaPlanet2\n\timage2 := pkgtest.ImagePath(names.Image)\n\tif objects.Service, err = v1test.UpdateService(t, clients, names, rtesting.WithServiceImage(image2)); err != nil {\n\t\tt.Fatalf(\"Update for Service %s with new image %s failed: %v\", names.Service, image2, err)\n\t}\n\n\tt.Log(\"Service should reflect new revision created and ready in status.\")\n\tnames.Revision, err = v1test.WaitForServiceLatestRevision(clients, names)\n\tif err != nil {\n\t\tt.Fatal(\"New image not reflected in Service:\", err)\n\t}\n\n\tt.Log(\"Waiting for Service to transition to Ready.\")\n\tif err := v1test.WaitForServiceState(clients.ServingClient, names.Service, v1test.IsServiceReady, \"ServiceIsReady\"); err != nil {\n\t\tt.Fatal(\"Error waiting for the service to become ready for the latest revision:\", err)\n\t}\n\n\t// Validate State after Image Update\n\tif err = validateControlPlane(t, clients, names, \"2\"); err != nil {\n\t\tt.Error(err)\n\t}\n\tif err = validateDataPlane(t, clients, names, test.PizzaPlanetText2); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Update Metadata (Labels)\n\tt.Logf(\"Updating labels of the RevisionTemplateSpec for service %s.\", names.Service)\n\tmetadata := metav1.ObjectMeta{\n\t\tLabels: map[string]string{\n\t\t\t\"label-x\": \"abc\",\n\t\t\t\"label-y\": \"def\",\n\t\t},\n\t}\n\tif objects.Service, err = v1test.UpdateService(t, clients, names, rtesting.WithServiceTemplateMeta(metadata)); err != nil {\n\t\tt.Fatalf(\"Service %s was not updated with labels in its RevisionTemplateSpec: %v\", names.Service, err)\n\t}\n\n\tt.Log(\"Waiting for the new revision to appear as LatestRevision.\")\n\tif names.Revision, err = v1test.WaitForServiceLatestRevision(clients, names); err != nil {\n\t\tt.Fatalf(\"The Service %s was not updated with new revision %s after updating labels in its RevisionTemplateSpec: %v\", names.Service, names.Revision, err)\n\t}\n\n\t// Update Metadata (Annotations)\n\tt.Log(\"Updating annotations of RevisionTemplateSpec for service\", names.Service)\n\tmetadata = metav1.ObjectMeta{\n\t\tAnnotations: map[string]string{\n\t\t\t\"annotation-a\": \"123\",\n\t\t\t\"annotation-b\": \"456\",\n\t\t},\n\t}\n\tif objects.Service, err = v1test.UpdateService(t, clients, names, rtesting.WithServiceTemplateMeta(metadata)); err != nil {\n\t\tt.Fatalf(\"Service %s was not updated with annotation in its RevisionTemplateSpec: %v\", names.Service, err)\n\t}\n\n\tt.Log(\"Waiting for the new revision to appear as LatestRevision.\")\n\tnames.Revision, err = v1test.WaitForServiceLatestRevision(clients, names)\n\tif err != nil {\n\t\tt.Fatal(\"The new revision has not become ready in Service:\", err)\n\t}\n\n\tt.Log(\"Waiting for Service to transition to Ready.\")\n\tif err := v1test.WaitForServiceState(clients.ServingClient, names.Service, v1test.IsServiceReady, \"ServiceIsReady\"); err != nil {\n\t\tt.Fatal(\"Error waiting for the service to become ready for the latest revision:\", err)\n\t}\n\n\t// Validate the Service shape.\n\tif err = validateControlPlane(t, clients, names, \"4\"); err != nil {\n\t\tt.Error(err)\n\t}\n\tif err = validateDataPlane(t, clients, names, test.PizzaPlanetText2); err != nil {\n\t\tt.Error(err)\n\t}\n}", "func main() {\n\tws := new(restful.WebService)\n\tws.Path(\"/goass\").Consumes(restful.MIME_JSON).Produces(restful.MIME_JSON)\n\n\tws.Route(ws.GET(\"/version\").Doc(\"get version number\").\n\t\tTo(func(r *restful.Request, w *restful.Response) { io.WriteString(w, version) }).\n\t\tOperation(\"version\").\n\t\tProduces(restful.MIME_OCTET))\n\n\tws.Route(\n\t\tws.GET(\"/plant/{name}\").Doc(\"get plant data\").\n\t\t\tTo(getplant).\n\t\t\tOperation(\"getplant\").\n\t\t\tParam(ws.PathParameter(\"name\", \"plant name\")).\n\t\t\tWrites(PlantDef{}))\n\n\tws.Route(\n\t\tws.GET(\"/plant/{name}/totalpower\").Doc(\"get plant total power\").\n\t\t\tTo(getplantpower).\n\t\t\tOperation(\"getplantpower\").\n\t\t\tParam(ws.PathParameter(\"name\", \"plant name\")).\n\t\t\tWrites(struct{ Power float32 }{}))\n\n\t// simple alarming mock implemented in file alarming.go\n\talarming := InitAlarming(ws)\n\talarming.Run()\n\n\trestful.Add(ws)\n\tswagger.RegisterSwaggerService(\n\t\tswagger.Config{\n\t\t\tWebServices: restful.DefaultContainer.RegisteredWebServices(), // you control what services are visible\n\t\t\tWebServicesUrl: \"/\",\n\t\t\tApiPath: \"/apidocs.json\",\n\t\t\tSwaggerPath: \"/apidocs/\",\n\t\t\tSwaggerFilePath: \"./swaggerui\"},\n\t\trestful.DefaultContainer)\n\terr := http.ListenAndServe(\":8123\", nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func TestServiceStatusHandler(t *testing.T) {\n\ttestServicesCmdHandler(statusCmd, t)\n}", "func RunTests(t *testing.T, svctest ServiceTest) {\n\tt.Run(\"NewSite\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestNewSite) })\n\tt.Run(\"DeleteSite\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestDeleteSite) })\n\tt.Run(\"WritePost\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestWritePost) })\n\tt.Run(\"RemovePost\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestRemovePost) })\n\tt.Run(\"ReadPost\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestReadPost) })\n\tt.Run(\"WriteConfig\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestWriteConfig) })\n\tt.Run(\"ReadConfig\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestReadConfig) })\n\tt.Run(\"UpdateAbout\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestUpdateAbout) })\n\tt.Run(\"ReadAbout\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestReadAbout) })\n\tt.Run(\"ChangeDefaultConfig\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestChangeDefaultConfig) })\n}", "func TestGetSlidesDocumentWithFormat(t *testing.T) {\n request := createGetSlidesDocumentWithFormatRequest()\n e := initializeTest(\"GetSlidesDocumentWithFormat\", \"\", \"\")\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n c := getTestApiClient()\n r, _, e := c.DocumentApi.GetSlidesDocumentWithFormat(request)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n assertBinaryResponse(r, t)\n}", "func Test(t *testing.T) {\n\tsuite.Run(t, &DiscoverySuite{})\n}", "func main() {\n\t// New Service\n\tservice := micro.NewService(\n\t\tmicro.Name(conf.AppConf.SrvName),\n\t\tmicro.Version(\"1\"),\n\t)\n\n\t// Initialise service\n\tservice.Init()\n\n\t// Initialise models\n\tmodels.Init(false)\n\n\t// Register Handler,each goroutine..\n\thandler.Init( service.Server() )\n\n\t// Register Struct as Subscriber\n\t// micro.RegisterSubscriber(\"go.micro.srv.template\", service.Server(), new(subscriber.Example))\n\t// micro.RegisterSubscriber(\"go.micro.srv.template\", service.Server(), subscriber.Handler)\n\n\t// Run service\n\tif err := service.Run(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func TestServiceLookupSuite(t *testing.T) {\n\tsuite.Run(t, new(ServiceLookupTestSuite))\n}", "func BenchmarkService(b *testing.B) {\n\tfp := protofile\n\n\treader, _ := os.Open(fp)\n\tdefer reader.Close()\n\n\tparser := proto.NewParser(reader)\n\tdefinition, _ := parser.Parse()\n\n\tvar (\n\t\tcurrentService *proto.Service\n\t\tcurrentTemplateMap map[string]map[string]string\n\t)\n\n\thandlerService := proto.WithService(func(s *proto.Service) {\n\t\tclearRandoms()\n\t\tif rpc != \"\" && s.Name != rpc {\n\t\t\treturn\n\t\t}\n\t\tcurrentService = s\n\t\tif m, ok := Templates[s.Name]; ok {\n\t\t\tcurrentTemplateMap = m\n\t\t}\n\t})\n\n\thandlerRPC := proto.WithRPC(func(r *proto.RPC) {\n\t\tfor _, action := range []string{\"create\", \"update\", \"delete\"} {\n\t\t\tif m, ok := currentTemplateMap[action]; ok {\n\t\t\t\tif t, ok := m[r.Name]; ok {\n\n\t\t\t\t\tb.Run(fmt.Sprintf(\"%s\", r.Name), func(b *testing.B) {\n\t\t\t\t\t\t// if ovs { // skip unvalid service: useful when various services are defined in a single proto file\n\t\t\t\t\t\t// \ttokens := strings.Split(service, \".\")\n\t\t\t\t\t\t// \tsuffix := tokens[len(tokens)-1]\n\t\t\t\t\t\t// \ttcr := strings.ToLower(currentService)\n\t\t\t\t\t\t// \tif !strings.HasPrefix(tcr, suffix) {\n\t\t\t\t\t\t// \t\treturn\n\t\t\t\t\t\t// \t}\n\t\t\t\t\t\t// }\n\n\t\t\t\t\t\t// Gather error count\n\t\t\t\t\t\tvar gotError chan bool\n\t\t\t\t\t\tvar done chan int\n\t\t\t\t\t\tif logFailedCalls {\n\t\t\t\t\t\t\tgotError = make(chan bool)\n\t\t\t\t\t\t\tdone = make(chan int)\n\t\t\t\t\t\t\tgo func() {\n\t\t\t\t\t\t\t\terrNb := 0\n\t\t\t\t\t\t\t\tfor {\n\t\t\t\t\t\t\t\t\tselect {\n\t\t\t\t\t\t\t\t\tcase nb := <-done:\n\t\t\t\t\t\t\t\t\t\tresults := float32(errNb) / float32(nb) * 100\n\t\t\t\t\t\t\t\t\t\tfmt.Printf(\"Benchmarking %s - %.00f%% error rate\\n\", currentService.Name+\".\"+r.Name, results)\n\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\tcase <-gotError:\n\t\t\t\t\t\t\t\t\t\terrNb++\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}()\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// Retrieve the test template\n\t\t\t\t\t\ttmpl, err := template.New(r.Name).Parse(t)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\t\t\tif r.StreamsRequest && !r.StreamsReturns {\n\t\t\t\t\t\t\t\terr := benchStreamClient(b, action, currentService, r, tmpl)\n\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\tif logFailedCalls {\n\t\t\t\t\t\t\t\t\t\tgotError <- true\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else if !r.StreamsRequest && r.StreamsReturns {\n\t\t\t\t\t\t\t\terr := benchStreamServer(b, action, currentService, r, tmpl)\n\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\tif logFailedCalls {\n\t\t\t\t\t\t\t\t\t\tgotError <- true\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\terr := benchCall(b, action, currentService, r, tmpl)\n\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\tif logFailedCalls {\n\t\t\t\t\t\t\t\t\t\tgotError <- true\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif logFailedCalls {\n\t\t\t\t\t\t\tdone <- b.N\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\tproto.Walk(definition, handlerService, handlerRPC)\n}", "func buildEchoService(name, namespace string, labels map[string]string) *corev1.Service {\n\treturn &corev1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tSpec: corev1.ServiceSpec{\n\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tName: \"http\",\n\t\t\t\t\tPort: int32(80),\n\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\tTargetPort: intstr.FromInt(8080),\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: labels,\n\t\t},\n\t}\n}", "func (s *Service) RunTest(ctx context.Context, req *conformance.Request) (*conformance.Response, error) {\n\tvar config test_gen.ServiceMesh\n\n\tconfig = linkerdConfig\n\tswitch req.Mesh.Type {\n\tcase smp.ServiceMesh_LINKERD:\n\t\tconfig = linkerdConfig\n\t\treq.Mesh.Annotations[\"linkerd.io/inject\"] = \"enabled\"\n\tcase smp.ServiceMesh_APP_MESH:\n\t\tconfig = linkerdConfig\n\t\treq.Mesh.Labels[\"appmesh.k8s.aws/sidecarInjectorWebhook\"] = \"enabled\"\n\tcase smp.ServiceMesh_MAESH:\n\t\tconfig = maeshConfig\n\tcase smp.ServiceMesh_ISTIO:\n\t\tconfig = istioConfig\n\t\treq.Mesh.Labels[\"istio-injection\"] = \"enabled\"\n\tcase smp.ServiceMesh_OPEN_SERVICE_MESH:\n\t\tconfig = osmConfig\n\t\treq.Mesh.Labels[\"openservicemesh.io/monitored-by\"] = \"osm\"\n\tcase smp.ServiceMesh_KUMA:\n\t\treq.Mesh.Annotations[\"kuma.io/sidecar-injection\"] = \"enabled\"\n\tcase smp.ServiceMesh_NGINX_SERVICE_MESH:\n\t\treq.Mesh.Annotations[\"njector.nsm.nginx.com/auto-inject\"] = \"true\"\n\n\t}\n\n\tresult := test_gen.RunTest(config, req.Mesh.Annotations, req.Mesh.Labels)\n\ttotalSteps := 24\n\ttotalFailures := 0\n\tstepsCount := map[string]int{\n\t\t\"traffic-access\": 7,\n\t\t\"traffic-split\": 11,\n\t\t\"traffic-spec\": 6,\n\t}\n\tspecVersion := map[string]string{\n\t\t\"traffic-access\": \"v0.6.0/v1alpha3\",\n\t\t\"traffic-split\": \"v0.6.0/v1alpha4\",\n\t\t\"traffic-spec\": \"v0.6.0/v1alpha4\",\n\t}\n\n\tdetails := make([]*conformance.Detail, 0)\n\tfor _, res := range result.Testsuite[0].Testcase {\n\t\td := &conformance.Detail{\n\t\t\tSmispec: res.Name,\n\t\t\tSpecversion: specVersion[res.Name],\n\t\t\tAssertion: strconv.Itoa(stepsCount[res.Name]),\n\t\t\tDuration: res.Time,\n\t\t\tCapability: conformance.Capability_FULL,\n\t\t\tStatus: conformance.ResultStatus_PASSED,\n\t\t\tResult: &conformance.Result{\n\t\t\t\tResult: &conformance.Result_Message{\n\t\t\t\t\tMessage: \"All test passed\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tif len(res.Failure.Text) > 2 {\n\t\t\td.Result = &conformance.Result{\n\t\t\t\tResult: &conformance.Result_Error{\n\t\t\t\t\tError: &service.CommonError{\n\t\t\t\t\t\tCode: \"\",\n\t\t\t\t\t\tSeverity: \"\",\n\t\t\t\t\t\tShortDescription: res.Failure.Text,\n\t\t\t\t\t\tLongDescription: res.Failure.Message,\n\t\t\t\t\t\tProbableCause: \"\",\n\t\t\t\t\t\tSuggestedRemediation: \"\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\td.Status = conformance.ResultStatus_FAILED\n\t\t\td.Capability = conformance.Capability_NONE\n\n\t\t\t// A hacky way to see the testStep Failed, since KUDO only provides it in Failure.Message\n\t\t\tre := regexp.MustCompile(`[0-9]+`)\n\t\t\tif res.Failure.Message != \"\" {\n\t\t\t\tstepFailed := re.FindAllString(res.Failure.Message, 1)\n\t\t\t\tif len(stepFailed) != 0 {\n\t\t\t\t\tpassed, _ := strconv.Atoi(stepFailed[0])\n\t\t\t\t\tpassed = passed - 1\n\t\t\t\t\tfailures := stepsCount[res.Name] - passed\n\t\t\t\t\ttotalFailures += failures\n\t\t\t\t\tif (passed) >= (stepsCount[res.Name] / 2) {\n\t\t\t\t\t\td.Capability = conformance.Capability_HALF\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdetails = append(details, d)\n\t}\n\n\treturn &conformance.Response{\n\t\tCasespassed: strconv.Itoa(totalSteps - totalFailures),\n\t\tPasspercent: strconv.FormatFloat(float64(totalSteps-totalFailures)/float64(totalSteps)*100, 'f', 2, 64),\n\t\tMesh: req.Mesh,\n\t\tDetails: details,\n\t}, nil\n}", "func Test(t *testing.T) {\n}", "func TestExample(t *testing.T) {\n\tinstance := &RestExample{\n\t\tpost: make(map[string]string),\n\t\twatch: make(map[string]chan string),\n\t}\n\n\tinstance.HandleCreateHello(HelloArg{\n\t\tTo: \"rest\",\n\t\tPost: \"rest is powerful\",\n\t})\n\n\tresp, err := rest.SetTest(instance, map[string]string{\"to\": \"rest\"}, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\targ := instance.HandleHello()\n\tif resp.Code != http.StatusOK {\n\t\tt.Error(\"should return ok\")\n\t}\n\tif arg.To != \"rest\" {\n\t\tt.Error(\"arg.To should be rest\")\n\t}\n\tif arg.Post != \"rest is powerful\" {\n\t\tt.Error(\"arg.Post should be 'rest is powerful'\")\n\t}\n\n\tresp, err = rest.SetTest(instance, map[string]string{\"to\": \"123\"}, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\targ = instance.HandleHello()\n\tif resp.Code != http.StatusNotFound {\n\t\tt.Error(\"should return not found\")\n\t}\n}", "func (m *MockecsClient) Service(app, env, svc string) (*ecs.Service, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Service\", app, env, svc)\n\tret0, _ := ret[0].(*ecs.Service)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func runService(env *Env, build Build, lgi LogInterceptor) error {\n\tif IsPortInUse(env.Port()) {\n\t\treturn errors.Errorf(\"port %d in use; is keysd already running?\", env.Port())\n\t}\n\n\tcert, err := GenerateCertificate(env, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { _ = DeleteCertificate(env) }()\n\n\tserveFn, closeFn, serveErr := NewServiceFn(env, build, cert, lgi)\n\tif serveErr != nil {\n\t\treturn serveErr\n\t}\n\tdefer closeFn()\n\treturn serveFn()\n}", "func TestServiceOrderID(t *testing.T) {\n\tConvey(\"TestServiceOrderID \", t, func() {\n\t\t// ios\n\t\tfor i := 0; i < 100; i++ {\n\t\t\tt.Logf(\"order no (%s)\", s.orderID())\n\t\t}\n\t})\n}", "func TestClusterServiceClassClient(t *testing.T) {\n\trootTestFunc := func(sType server.StorageType) func(t *testing.T) {\n\t\treturn func(t *testing.T) {\n\t\t\tconst name = \"test-serviceclass\"\n\t\t\tclient, _, shutdownServer := getFreshApiserverAndClient(t, sType.String(), func() runtime.Object {\n\t\t\t\treturn &servicecatalog.ClusterServiceClass{}\n\t\t\t})\n\t\t\tdefer shutdownServer()\n\n\t\t\tif err := testClusterServiceClassClient(sType, client, name); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n\t// TODO: Fix this for CRD.\n\t// https://github.com/kubernetes-incubator/service-catalog/issues/1256\n\t//\tfor _, sType := range storageTypes {\n\t//\t\tif !t.Run(sType.String(), rootTestFunc(sType)) {\n\t//\t\t\tt.Errorf(\"%q test failed\", sType)\n\t//\t\t}\n\t//\t}\n\t//\tfor _, sType := range storageTypes {\n\t//\t\tif !t.Run(sType.String(), rootTestFunc(sType)) {\n\t//\t\t\tt.Errorf(\"%q test failed\", sType)\n\t//\t\t}\n\t//\t}\n\tsType := server.StorageTypeEtcd\n\tif !t.Run(sType.String(), rootTestFunc(sType)) {\n\t\tt.Errorf(\"%q test failed\", sType)\n\t}\n}", "func Service(name, namespace string, so ...ServiceOption) *v1.Service {\n\ts := &v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t\tUID: \"cccccccc-cccc-cccc-cccc-cccccccccccc\",\n\t\t},\n\t}\n\tfor _, opt := range so {\n\t\topt(s)\n\t}\n\treturn s\n}", "func RunServiceName(t *testing.T, store SimpleStore) {\n\ttester := assert.New(t)\n\ttests := []struct {\n\t\twant posting.List\n\t\tname string\n\t\targ index.Field\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"match gateway\",\n\t\t\targ: index.Field{\n\t\t\t\tKey: serviceName,\n\t\t\t\tTerm: []byte(\"gateway\"),\n\t\t\t},\n\t\t\twant: roaring.NewRange(0, 50),\n\t\t},\n\t\t{\n\t\t\tname: \"match webpage\",\n\t\t\targ: index.Field{\n\t\t\t\tKey: serviceName,\n\t\t\t\tTerm: []byte(\"webpage\"),\n\t\t\t},\n\t\t\twant: roaring.NewRange(50, 100),\n\t\t},\n\t\t{\n\t\t\tname: \"unknown field\",\n\t\t\twant: roaring.DummyPostingList,\n\t\t},\n\t\t{\n\t\t\tname: \"unknown term\",\n\t\t\targ: index.Field{\n\t\t\t\tKey: serviceName,\n\t\t\t\tTerm: []byte(\"unknown\"),\n\t\t\t},\n\t\t\twant: roaring.DummyPostingList,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tlist, err := store.MatchTerms(tt.arg)\n\t\t\tif tt.wantErr {\n\t\t\t\ttester.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttester.NoError(err)\n\t\t\ttester.NotNil(list)\n\t\t\ttester.True(tt.want.Equal(list))\n\t\t})\n\t}\n}", "func NewService(t testing.TB) *Service {\n\tmock := &Service{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (c *configuration) Service(clientSet ClientSet) *Service {\n\tif clientSet != nil {\n\t\treturn NewService(clientSet)\n\t}\n\treturn nil\n\n}", "func NewTodoListService(addr string) *Service {\n\ts := &Service{\n\t\th: http.DefaultServeMux,\n\t}\n\n\t// 使用默认设置初始化httpServer\n\ts.httpServer = &http.Server{\n\t\tAddr: addr,\n\t\tHandler: s.h,\n\t\tReadTimeout: 5 * time.Second,\n\t\tWriteTimeout: 5 * time.Second,\n\t\tMaxHeaderBytes: http.DefaultMaxHeaderBytes,\n\t}\n\n\t// 使用一个模拟的数据持久化服务\n\ts.storageService = NewMemStorageService()\n\n\t// 初始化接口配置\n\ts.initRouter()\n\treturn s\n}", "func recipeForServiceTest() model.Recipe {\n\treturn model.Recipe{\n\t\tType: model.BakingRecipe,\n\t\tTitle: \"Bake a Cake\",\n\t\tIngredients: \"100g Mehl\\n100g Zucker\\n50ml Wasser\",\n\t\tDescription: \"Einrühren.\\nBacken.\\nFertig!\",\n\t}\n}", "func TestServiceMethodNamesCamelCase(t *testing.T) {\n\ts := httptest.NewServer(NewHaberdasherV1Server(&HaberdasherService{}, nil))\n\tdefer s.Close()\n\n\tclient := NewHaberdasherV1ProtobufClient(s.URL, http.DefaultClient)\n\n\that, err := client.MakeHatV1(context.Background(), &MakeHatArgsV1_SizeV1{Inches: 1})\n\tif err != nil {\n\t\tt.Fatalf(\"go protobuf client err=%q\", err)\n\t}\n\tif hat.Size != 1 {\n\t\tt.Errorf(\"wrong hat size returned\")\n\t}\n}", "func NewService(c Config) *Service {\n \n s := &Service{}\n s.instance = c.Instance\n s.hostname = c.Hostname\n s.userAgent = c.UserAgent\n s.port = c.Endpoint\n s.router = mux.NewRouter()\n s.entityHandler = c.EntityHandler\n s.readTimeout = c.ReadTimeout\n s.writeTimeout = c.WriteTimeout\n s.idleTimeout = c.IdleTimeout\n \n if c.Name == \"\" {\n s.name = \"service\"\n }else{\n s.name = c.Name\n }\n \n if c.Debug || os.Getenv(\"GOREST_DEBUG\") == \"true\" {\n s.debug = true\n }\n \n if c.TraceRegexps != nil {\n if s.traceRequests == nil {\n s.traceRequests = make(map[string]*regexp.Regexp)\n }\n for _, e := range c.TraceRegexps {\n s.traceRequests[e.String()] = e\n }\n }\n if t := os.Getenv(\"GOREST_TRACE\"); t != \"\" {\n if s.traceRequests == nil {\n s.traceRequests = make(map[string]*regexp.Regexp)\n }\n for _, e := range strings.Split(t, \";\") {\n s.traceRequests[e] = regexp.MustCompile(e)\n }\n }\n if s.debug {\n for k, _ := range s.traceRequests {\n fmt.Println(\"rest: trace:\", k)\n }\n }\n \n s.suppress = make(map[string]struct{})\n if v := os.Getenv(\"GOREST_TRACE_SUPPRESS_HEADERS\"); v != \"\" {\n if !strings.EqualFold(v, \"none\") {\n for _, e := range strings.Split(v, \",\") {\n s.suppress[strings.ToLower(e)] = struct{}{}\n }\n }\n }else{\n s.suppress[\"authorization\"] = struct{}{}\n }\n \n return s\n}", "func TestCreate(t *testing.T) {\n\n}", "func Service(client client.Client) *ServiceContext {\n\treturn &ServiceContext{client: client}\n}", "func TestNewService(t *testing.T) {\n\tt.Parallel()\n\n\ttype args struct {\n\t\tpathSVC pathUsecase.Service\n\t}\n\n\ttestCases := []struct {\n\t\tname string\n\t\targs args\n\t\twant Service\n\t}{\n\t\t{\n\t\t\tname: \"Happy path\",\n\t\t\targs: args{\n\t\t\t\tpathSVC: nil,\n\t\t\t},\n\t\t\twant: &serviceImpl{},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tif got := NewService(tc.args.pathSVC); !reflect.DeepEqual(got, tc.want) {\n\t\t\t\tt.Errorf(\"NewService(): %v, want: %v\", got, tc.want)\n\t\t\t}\n\t\t})\n\t}\n}", "func (c *ServiceCreate) Help() {\n\tlog.Println(\"Usage: create-service [service-name]\")\n}", "func TestMakePublicService(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tsks *v1alpha1.ServerlessService\n\t\twant *corev1.Service\n\t}{{\n\t\tname: \"HTTP - serve\",\n\t\tsks: &v1alpha1.ServerlessService{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"melon\",\n\t\t\t\tName: \"collie\",\n\t\t\t\tUID: \"1982\",\n\t\t\t\t// Those labels are propagated from the Revision->PA.\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tserving.RevisionLabelKey: \"collie\",\n\t\t\t\t\tserving.RevisionUID: \"1982\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: v1alpha1.ServerlessServiceSpec{\n\t\t\t\tProtocolType: networking.ProtocolHTTP1,\n\t\t\t\tMode: v1alpha1.SKSOperationModeServe,\n\t\t\t},\n\t\t},\n\t\twant: &corev1.Service{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"melon\",\n\t\t\t\tName: \"collie\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t// Those should be propagated.\n\t\t\t\t\tserving.RevisionLabelKey: \"collie\",\n\t\t\t\t\tserving.RevisionUID: \"1982\",\n\t\t\t\t\tnetworking.SKSLabelKey: \"collie\",\n\t\t\t\t\tnetworking.ServiceTypeKey: \"Public\",\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{},\n\t\t\t\tOwnerReferences: []metav1.OwnerReference{{\n\t\t\t\t\tAPIVersion: v1alpha1.SchemeGroupVersion.String(),\n\t\t\t\t\tKind: \"ServerlessService\",\n\t\t\t\t\tName: \"collie\",\n\t\t\t\t\tUID: \"1982\",\n\t\t\t\t\tController: ptr.Bool(true),\n\t\t\t\t\tBlockOwnerDeletion: ptr.Bool(true),\n\t\t\t\t}},\n\t\t\t},\n\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\tPorts: []corev1.ServicePort{{\n\t\t\t\t\tName: networking.ServicePortNameHTTP1,\n\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\tPort: networking.ServiceHTTPPort,\n\t\t\t\t\tTargetPort: intstr.FromInt(networking.BackendHTTPPort),\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t}, {\n\t\tname: \"HTTP - proxy\",\n\t\tsks: &v1alpha1.ServerlessService{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"melon\",\n\t\t\t\tName: \"collie\",\n\t\t\t\tUID: \"1982\",\n\t\t\t\t// Those labels are propagated from the Revision->PA.\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tserving.RevisionLabelKey: \"collie\",\n\t\t\t\t\tserving.RevisionUID: \"1982\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: v1alpha1.ServerlessServiceSpec{\n\t\t\t\tMode: v1alpha1.SKSOperationModeProxy,\n\t\t\t\tProtocolType: networking.ProtocolHTTP1,\n\t\t\t},\n\t\t},\n\t\twant: &corev1.Service{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"melon\",\n\t\t\t\tName: \"collie\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t// Those should be propagated.\n\t\t\t\t\tserving.RevisionLabelKey: \"collie\",\n\t\t\t\t\tserving.RevisionUID: \"1982\",\n\t\t\t\t\tnetworking.SKSLabelKey: \"collie\",\n\t\t\t\t\tnetworking.ServiceTypeKey: \"Public\",\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{},\n\t\t\t\tOwnerReferences: []metav1.OwnerReference{{\n\t\t\t\t\tAPIVersion: v1alpha1.SchemeGroupVersion.String(),\n\t\t\t\t\tKind: \"ServerlessService\",\n\t\t\t\t\tName: \"collie\",\n\t\t\t\t\tUID: \"1982\",\n\t\t\t\t\tController: ptr.Bool(true),\n\t\t\t\t\tBlockOwnerDeletion: ptr.Bool(true),\n\t\t\t\t}},\n\t\t\t},\n\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\tPorts: []corev1.ServicePort{{\n\t\t\t\t\tName: networking.ServicePortNameHTTP1,\n\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\tPort: networking.ServiceHTTPPort,\n\t\t\t\t\tTargetPort: intstr.FromInt(networking.BackendHTTPPort),\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t}, {\n\t\tname: \"HTTP2 - serve\",\n\t\tsks: &v1alpha1.ServerlessService{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"siamese\",\n\t\t\t\tName: \"dream\",\n\t\t\t\tUID: \"1988\",\n\t\t\t\t// Those labels are propagated from the Revision->PA.\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tserving.RevisionLabelKey: \"dream\",\n\t\t\t\t\tserving.RevisionUID: \"1988\",\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"cherub\": \"rock\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: v1alpha1.ServerlessServiceSpec{\n\t\t\t\tProtocolType: networking.ProtocolH2C,\n\t\t\t\tMode: v1alpha1.SKSOperationModeServe,\n\t\t\t},\n\t\t},\n\t\twant: &corev1.Service{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"siamese\",\n\t\t\t\tName: \"dream\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t// Those should be propagated.\n\t\t\t\t\tserving.RevisionLabelKey: \"dream\",\n\t\t\t\t\tserving.RevisionUID: \"1988\",\n\t\t\t\t\tnetworking.SKSLabelKey: \"dream\",\n\t\t\t\t\tnetworking.ServiceTypeKey: \"Public\",\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"cherub\": \"rock\",\n\t\t\t\t},\n\t\t\t\tOwnerReferences: []metav1.OwnerReference{{\n\t\t\t\t\tAPIVersion: v1alpha1.SchemeGroupVersion.String(),\n\t\t\t\t\tKind: \"ServerlessService\",\n\t\t\t\t\tName: \"dream\",\n\t\t\t\t\tUID: \"1988\",\n\t\t\t\t\tController: ptr.Bool(true),\n\t\t\t\t\tBlockOwnerDeletion: ptr.Bool(true),\n\t\t\t\t}},\n\t\t\t},\n\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\tPorts: []corev1.ServicePort{{\n\t\t\t\t\tName: networking.ServicePortNameH2C,\n\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\tPort: networking.ServiceHTTP2Port,\n\t\t\t\t\tTargetPort: intstr.FromInt(networking.BackendHTTP2Port),\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t}, {\n\t\tname: \"HTTP2 - serve - no backends\",\n\t\tsks: &v1alpha1.ServerlessService{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"siamese\",\n\t\t\t\tName: \"dream\",\n\t\t\t\tUID: \"1988\",\n\t\t\t\t// Those labels are propagated from the Revision->PA.\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tserving.RevisionLabelKey: \"dream\",\n\t\t\t\t\tserving.RevisionUID: \"1988\",\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"cherub\": \"rock\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: v1alpha1.ServerlessServiceSpec{\n\t\t\t\tProtocolType: networking.ProtocolH2C,\n\t\t\t\tMode: v1alpha1.SKSOperationModeServe,\n\t\t\t},\n\t\t},\n\t\twant: &corev1.Service{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"siamese\",\n\t\t\t\tName: \"dream\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t// Those should be propagated.\n\t\t\t\t\tserving.RevisionLabelKey: \"dream\",\n\t\t\t\t\tserving.RevisionUID: \"1988\",\n\t\t\t\t\tnetworking.SKSLabelKey: \"dream\",\n\t\t\t\t\tnetworking.ServiceTypeKey: \"Public\",\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"cherub\": \"rock\",\n\t\t\t\t},\n\t\t\t\tOwnerReferences: []metav1.OwnerReference{{\n\t\t\t\t\tAPIVersion: v1alpha1.SchemeGroupVersion.String(),\n\t\t\t\t\tKind: \"ServerlessService\",\n\t\t\t\t\tName: \"dream\",\n\t\t\t\t\tUID: \"1988\",\n\t\t\t\t\tController: ptr.Bool(true),\n\t\t\t\t\tBlockOwnerDeletion: ptr.Bool(true),\n\t\t\t\t}},\n\t\t\t},\n\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\tPorts: []corev1.ServicePort{{\n\t\t\t\t\tName: networking.ServicePortNameH2C,\n\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\tPort: networking.ServiceHTTP2Port,\n\t\t\t\t\tTargetPort: intstr.FromInt(networking.BackendHTTP2Port),\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t}, {\n\t\tname: \"HTTP2 - proxy\",\n\t\tsks: &v1alpha1.ServerlessService{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"siamese\",\n\t\t\t\tName: \"dream\",\n\t\t\t\tUID: \"1988\",\n\t\t\t\t// Those labels are propagated from the Revision->PA.\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tserving.RevisionLabelKey: \"dream\",\n\t\t\t\t\tserving.RevisionUID: \"1988\",\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"cherub\": \"rock\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: v1alpha1.ServerlessServiceSpec{\n\t\t\t\tProtocolType: networking.ProtocolH2C,\n\t\t\t\tMode: v1alpha1.SKSOperationModeProxy,\n\t\t\t},\n\t\t},\n\t\twant: &corev1.Service{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"siamese\",\n\t\t\t\tName: \"dream\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t// Those should be propagated.\n\t\t\t\t\tserving.RevisionLabelKey: \"dream\",\n\t\t\t\t\tserving.RevisionUID: \"1988\",\n\t\t\t\t\tnetworking.SKSLabelKey: \"dream\",\n\t\t\t\t\tnetworking.ServiceTypeKey: \"Public\",\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"cherub\": \"rock\",\n\t\t\t\t},\n\t\t\t\tOwnerReferences: []metav1.OwnerReference{{\n\t\t\t\t\tAPIVersion: v1alpha1.SchemeGroupVersion.String(),\n\t\t\t\t\tKind: \"ServerlessService\",\n\t\t\t\t\tName: \"dream\",\n\t\t\t\t\tUID: \"1988\",\n\t\t\t\t\tController: ptr.Bool(true),\n\t\t\t\t\tBlockOwnerDeletion: ptr.Bool(true),\n\t\t\t\t}},\n\t\t\t},\n\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\tPorts: []corev1.ServicePort{{\n\t\t\t\t\tName: networking.ServicePortNameH2C,\n\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\tPort: networking.ServiceHTTP2Port,\n\t\t\t\t\tTargetPort: intstr.FromInt(networking.BackendHTTP2Port),\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t}}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tgot := MakePublicService(test.sks)\n\t\t\tif diff := cmp.Diff(test.want, got); diff != \"\" {\n\t\t\t\tt.Errorf(\"Public K8s Service mismatch (-want, +got) = %v\", diff)\n\t\t\t}\n\t\t})\n\t}\n}", "func (t *ServiceTestFixture) CreateService(service *api.Service) (*api.Service, error) {\n\tresult, err := t.Client.Services(t.Namespace).Create(service)\n\tif err == nil {\n\t\tt.services[service.Name] = true\n\t}\n\treturn result, err\n}", "func Service(name string, start, stop func()) error {\n\tinService, err := svc.IsWindowsService()\n\tif err != nil || !inService {\n\t\treturn err\n\t}\n\tos.Chdir(filepath.Dir(os.Args[0]))\n\tif start != nil {\n\t\tstart()\n\t}\n\tserviceName = name\n\tstopFunc = stop\n\tgo runService(name)\n\treturn nil\n}", "func testResource(t *testing.T, s *Service) {\n\tp := &model.ArgRes{\n\t\tResID: 1233,\n\t}\n\tres, err := s.Resource(context.TODO(), p)\n\tif err != nil {\n\t\tt.Logf(\"testResource error(%v) \\n\", err)\n\t\treturn\n\t}\n\tt.Logf(\"testResource res: %+v \\n\", res)\n}", "func buildTestService(serviceDir string) (err error) {\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trelDir, err := filepath.Rel(wd, serviceDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbinDir := serviceDir + \"/bin\"\n\terr = os.MkdirAll(binDir, 0777)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconst serverPath = \"cmd/test\"\n\terrChan := make(chan error)\n\tgo goBuild(\"test\", binDir, filepath.Join(relDir, serverPath), errChan)\n\n\terr = <-errChan\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (e Manager) Test() {\n\tfmt.Println(\"Manager\")\n}", "func setupFakeClient(url string) *Client {\n\treturn &Client{\n\t\tServiceEndpoint: ServiceEndpoint{\n\t\t\tRequestURL: url,\n\t\t\tDocsURL: \"some-docs-url\",\n\t\t},\n\t}\n}", "func TestGetUserService (t *testing.T){\n\tuser1, err := GetUserService(user_01.SocialNumber)\n\tassert.Equal(t, 200, err.HTTPStatus)\n\tassert.Equal(t, user_01, user1)\n\n\tuser2, err := GetUserService(user_01.SocialNumber)\n\tassert.Equal(t, 200, err.HTTPStatus)\n\tassert.Equal(t, user_01, user2)\n\n\tuser3, err := GetUserService(user_01.SocialNumber)\n\tassert.Equal(t, 200, err.HTTPStatus)\n\tassert.Equal(t, user_01, user3)\n\n\tuser4, err := GetUserService(user_01.SocialNumber)\n\tassert.Equal(t, 200, err.HTTPStatus)\n\tassert.Equal(t, user_01, user4)\n}", "func TestMain(m *testing.M) {\n\ttestServer = rpc.NewServer()\n\n\tts, e := rpcServer.NewTrackService(types.User{}, fakeTracker)\n\tif e != nil {\n\t\tlog.Fatal(\"NewTrackService error:\", e)\n\t}\n\ttestServer.RegisterName(\"TrackService\", ts)\n\n\tus, e := rpcServer.NewUserService(types.User{}, fakeUserer)\n\tif e != nil {\n\t\tlog.Fatal(\"NewUserService error:\", e)\n\t}\n\ttestServer.RegisterName(\"UserService\", us)\n\n\tlis, e = net.Listen(\"tcp\", \":0\")\n\tif e != nil {\n\t\tlog.Fatal(\"listen error:\", e)\n\t}\n\tlog.Println(\"listening on\", lis.Addr())\n\tgo http.Serve(lis, testServer)\n\n\t// dial client to it and create test client\n\trpcc, e := rpc.DialHTTP(\"tcp\", lis.Addr().String())\n\tif e != nil {\n\t\tlog.Fatal(\"rpc.DialHTTP:\", e)\n\t}\n\n\ttcClient, e := rpcClient.NewTracksClient(rpcc)\n\tif e != nil {\n\t\tlog.Fatal(\"rpcClient.NewTracksClient:\", e)\n\t}\n\n\ttuClient, e := rpcClient.NewUsersClient(rpcc)\n\tif e != nil {\n\t\tlog.Fatal(\"rpcClient.NewUsersClient:\", e)\n\t}\n\n\ttestClient = rpcClient.New(tcClient, tuClient)\n\n\tret := m.Run()\n\n\tif e := lis.Close(); e != nil {\n\t\tlog.Fatal(e)\n\t}\n\ttestServer = nil\n\n\tos.Exit(ret)\n}", "func HttpTest() {\n\tStartHttpServer()\n}", "func newService(namespace, name string) *v1.Service {\n\treturn &v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: labelMap(),\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tSelector: labelMap(),\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{Name: \"port-1338\", Port: 1338, Protocol: \"TCP\", TargetPort: intstr.FromInt(1338)},\n\t\t\t\t{Name: \"port-1337\", Port: 1337, Protocol: \"TCP\", TargetPort: intstr.FromInt(1337)},\n\t\t\t},\n\t\t},\n\t}\n\n}", "func TestServiceGetMemoryReference(t *testing.T) {\n\tassert := assert.New(t)\n\tcollection := servicesCollection()\n\n\tvar service Service\n\tservice.Name = kong.String(\"my-service\")\n\tservice.ID = kong.String(\"first\")\n\terr := collection.Add(service)\n\tassert.Nil(err)\n\n\tse, err := collection.Get(\"first\")\n\tassert.Nil(err)\n\tassert.NotNil(se)\n\tse.Host = kong.String(\"example.com\")\n\n\tse, err = collection.Get(\"my-service\")\n\tassert.Nil(err)\n\tassert.NotNil(se)\n\tassert.Nil(se.Host)\n}", "func (s *systemtestSuite) TestServiceAddDeleteServiceVxlan(c *C) {\n\ts.testServiceAddDeleteService(c, \"vxlan\")\n}", "func (o UrlMapTestOutput) Service() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v UrlMapTest) *string { return v.Service }).(pulumi.StringPtrOutput)\n}", "func TestFleetctlRunService(t *testing.T) {\n\tserviceName := \"hello.service\"\n\n\tserviceFile, err := os.Create(path.Join(os.TempDir(), serviceName))\n\tif err != nil {\n\t\tt.Fatalf(\"Failed creating %v: %v\", serviceName, err)\n\t}\n\tdefer syscall.Unlink(serviceFile.Name())\n\n\tif _, err := io.WriteString(serviceFile, serviceData); err != nil {\n\t\tt.Fatalf(\"Failed writing %v: %v\", serviceFile.Name(), err)\n\t}\n\n\tdefer timeoutFleetctl(\"destroy\", serviceFile.Name())\n\n\tstdout, stderr, err := timeoutFleetctl(\"start\", serviceFile.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"fleetctl start failed with error: %v\\nstdout: %s\\nstderr: %s\", err, stdout, stderr)\n\t}\n\n\tstdout, stderr, err = timeoutFleetctl(\"unload\", serviceName)\n\tif err != nil {\n\t\tt.Fatalf(\"fleetctl unload failed with error: %v\\nstdout: %s\\nstderr: %s\", err, stdout, stderr)\n\t}\n\n\tstdout, stderr, err = timeoutFleetctl(\"destroy\", serviceName)\n\tif err != nil {\n\t\tt.Fatalf(\"fleetctl destroy failed with error: %v\\nstdout: %s\\nstderr: %s\", err, stdout, stderr)\n\t}\n}", "func TestPutSlidesDocumentFromHtml(t *testing.T) {\n request := createPutSlidesDocumentFromHtmlRequest()\n e := initializeTest(\"PutSlidesDocumentFromHtml\", \"\", \"\")\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n c := getTestApiClient()\n r, _, e := c.DocumentApi.PutSlidesDocumentFromHtml(request)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n if r.Code != 200 && r.Code != 201 {\n t.Errorf(\"Wrong response code: %d.\", r.Code)\n return\n }\n}", "func main() {\n\tlogs.InitLogs()\n\tdefer logs.FlushLogs()\n\n\tflag.Parse()\n\n\trestConfig, err := clientcmd.BuildConfigFromFlags(\"\", *kubeconfig)\n\tif err != nil {\n\t\tklog.Fatal(err)\n\t}\n\n\tc := service.New(restConfig)\n\tc.Run(genericapiserver.SetupSignalContext(), 1)\n}", "func Test_main(t *testing.T) {\n\tfmt.Println(\"addr: \", *addr)\n\tfmt.Println(\"network: \", *network)\n\n\t//go run main in another go routine\n\t/*\n\tconn, err := grpc.Dial(address, grpc.WithInsecure())\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tdefer conn.Close()\n\n\tclient := pb.NewEchoServiceClient(conn)\n\tsimpleMessage, err := client.Echo(context.Background(), &pb.SimpleMessage{Id: \"4567\"})\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t} else {\n\t\tlog.Println(simpleMessage)\n\t}\n\t*/\n}", "func main() {\n\tws := new(restful.WebService)\n\tws.Route(ws.GET(\"/run\").To(hello))\n\trestful.Add(ws)\n\tlog.Fatal(http.ListenAndServe(\":8080\", nil))\n}", "func TestFleetctlRunService() error {\n\tserviceName := \"hello.service\"\n\n\tserviceFile, err := os.Create(path.Join(os.TempDir(), serviceName))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed creating %v: %v\", serviceName, err)\n\t}\n\tdefer syscall.Unlink(serviceFile.Name())\n\n\tif _, err := io.WriteString(serviceFile, serviceData); err != nil {\n\t\treturn fmt.Errorf(\"Failed writing %v: %v\", serviceFile.Name(), err)\n\t}\n\n\tmyid := MachineID()\n\n\tfleetChecker := func() error {\n\t\tstdout, stderr, err := Run(\"fleetctl\", \"list-machines\", \"-no-legend\", \"-l\", \"-fields\", \"machine\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"fleetctl list-machines failed with error: %v\\nstdout: %s\\nstderr: %s\", err, stdout, stderr)\n\t\t}\n\n\t\tif !strings.Contains(stdout, myid) {\n\t\t\treturn fmt.Errorf(\"fleetctl list-machines: machine ID %q missing from output\\nstdout: %v\\nstderr: %v\", myid, stdout, stderr)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tif err := util.Retry(5, 5*time.Second, fleetChecker); err != nil {\n\t\treturn err\n\t}\n\n\tstdout, stderr, err := Run(\"fleetctl\", \"start\", \"-block-attempts\", \"20\", serviceFile.Name())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"fleetctl start failed with error: %v\\nstdout: %s\\nstderr: %s\", err, stdout, stderr)\n\t}\n\n\tstdout, stderr, err = Run(\"fleetctl\", \"unload\", \"-block-attempts\", \"20\", serviceName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"fleetctl unload failed with error: %v\\nstdout: %s\\nstderr: %s\", err, stdout, stderr)\n\t}\n\n\tstdout, stderr, err = Run(\"fleetctl\", \"destroy\", serviceName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"fleetctl destroy failed with error: %v\\nstdout: %s\\nstderr: %s\", err, stdout, stderr)\n\t}\n\n\treturn nil\n}", "func NewService(bin Bin) *Service {\n\tbin.ShouldStillBeRunningAfterTest = true\n\treturn &Service{\n\t\tBin: bin,\n\t\tReadyForCleanup: make(chan struct{}),\n\t}\n}", "func TestGRPCService(t *testing.T) {\n\tt.Parallel()\n\t// Start a test gRPC server.\n\tendpoint := newEndpoint()\n\t_ = mock.NewBase64Plugin(t, endpoint.path)\n\n\tctx := testContext(t)\n\n\t// Create the gRPC client service.\n\tservice, err := NewGRPCService(ctx, endpoint.endpoint, 1*time.Second)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create envelope service, error: %v\", err)\n\t}\n\tdefer destroyService(service)\n\n\t// Call service to encrypt data.\n\tdata := []byte(\"test data\")\n\tcipher, err := service.Encrypt(data)\n\tif err != nil {\n\t\tt.Fatalf(\"failed when execute encrypt, error: %v\", err)\n\t}\n\n\t// Call service to decrypt data.\n\tresult, err := service.Decrypt(cipher)\n\tif err != nil {\n\t\tt.Fatalf(\"failed when execute decrypt, error: %v\", err)\n\t}\n\n\tif !reflect.DeepEqual(data, result) {\n\t\tt.Errorf(\"expect: %v, but: %v\", data, result)\n\t}\n}", "func (s *Service) Listen() {\n\n}", "func TestRealisClient_CreateService(t *testing.T) {\n\n\t// Create a single job\n\tjob := realis.NewJob().\n\t\tEnvironment(\"prod\").\n\t\tRole(\"vagrant\").\n\t\tName(\"create_service_test\").\n\t\tExecutorName(aurora.AURORA_EXECUTOR_NAME).\n\t\tExecutorData(string(thermosPayload)).\n\t\tCPU(.25).\n\t\tRAM(4).\n\t\tDisk(10).\n\t\tInstanceCount(3).\n\t\tIsService(true)\n\n\tsettings := realis.NewUpdateSettings()\n\tsettings.UpdateGroupSize = 2\n\tsettings.MinWaitInInstanceRunningMs = 5000\n\tjob.InstanceCount(3)\n\n\t_, result, err := r.CreateService(job, settings)\n\trequire.NoError(t, err)\n\tassert.NotNil(t, result)\n\n\t// Test asking the scheduler to backup a Snapshot\n\tt.Run(\"PauseJobUpdate\", func(t *testing.T) {\n\t\t_, err = r.PauseJobUpdate(result.GetKey(), \"\")\n\t\tassert.NoError(t, err)\n\t})\n\n\tt.Run(\"ResumeJobUpdate\", func(t *testing.T) {\n\t\t_, err = r.ResumeJobUpdate(result.GetKey(), \"\")\n\t\tassert.NoError(t, err)\n\t})\n\n\tvar ok bool\n\tvar mErr error\n\n\tif ok, mErr = monitor.JobUpdate(*result.GetKey(), 5, 240); !ok || mErr != nil {\n\t\t// Update may already be in a terminal state so don't check for error\n\t\t_, err := r.AbortJobUpdate(*result.GetKey(), \"Monitor timed out.\")\n\t\t_, err = r.KillJob(job.JobKey())\n\t\tassert.NoError(t, err)\n\t}\n\n\trequire.NoError(t, mErr)\n\tassert.True(t, ok)\n\n\t// Kill task test task after confirming it came up fine\n\t_, err = r.KillJob(job.JobKey())\n\tassert.NoError(t, err)\n\n\tsuccess, err := monitor.Instances(job.JobKey(), 0, 1, 50)\n\trequire.NoError(t, mErr)\n\tassert.True(t, success)\n\n\t// Create a client which will timeout and close the connection before receiving an answer\n\ttimeoutClient, err := realis.NewRealisClient(\n\t\trealis.SchedulerUrl(auroraURL),\n\t\trealis.BasicAuth(\"aurora\", \"secret\"),\n\t\trealis.TimeoutMS(5),\n\t)\n\trequire.NoError(t, err)\n\tdefer timeoutClient.Close()\n\n\t// Test case where http connection timeouts out.\n\tt.Run(\"TimeoutError\", func(t *testing.T) {\n\t\tjob.Name(\"createService_timeout\")\n\n\t\t// Make sure a timedout error was returned\n\t\t_, _, err = timeoutClient.CreateService(job, settings)\n\t\trequire.Error(t, err)\n\t\tassert.True(t, realis.IsTimeout(err))\n\n\t\tupdateReceivedQuery := aurora.JobUpdateQuery{\n\t\t\tRole: &job.JobKey().Role,\n\t\t\tJobKey: job.JobKey(),\n\t\t\tUpdateStatuses: aurora.ACTIVE_JOB_UPDATE_STATES,\n\t\t\tLimit: 1}\n\n\t\tupdateSummaries, err := monitor.JobUpdateQuery(updateReceivedQuery, time.Second*1, time.Second*50)\n\t\trequire.NoError(t, err)\n\n\t\trequire.Len(t, updateSummaries, 1)\n\n\t\tr.AbortJobUpdate(*updateSummaries[0].Key, \"Cleaning up\")\n\t\t_, err = r.KillJob(job.JobKey())\n\t\tassert.NoError(t, err)\n\n\t})\n\n\t// Test case where http connection timeouts out.\n\tt.Run(\"TimeoutError_BadPayload\", func(t *testing.T) {\n\t\t// Illegal payload\n\t\tjob.InstanceCount(-1)\n\t\tjob.Name(\"createService_timeout_bad_payload\")\n\n\t\t// Make sure a timedout error was returned\n\t\t_, _, err = timeoutClient.CreateService(job, settings)\n\t\trequire.Error(t, err)\n\t\tassert.True(t, realis.IsTimeout(err))\n\n\t\tsummary, err := r.GetJobUpdateSummaries(\n\t\t\t&aurora.JobUpdateQuery{\n\t\t\t\tRole: &job.JobKey().Role,\n\t\t\t\tJobKey: job.JobKey(),\n\t\t\t\tUpdateStatuses: aurora.ACTIVE_JOB_UPDATE_STATES})\n\t\tassert.NoError(t, err)\n\n\t\t// Payload should have been rejected, no update should exist\n\t\trequire.Len(t, summary.GetResult_().GetGetJobUpdateSummariesResult_().GetUpdateSummaries(), 0)\n\t})\n}", "func NewService(repository todo.Repository) Service {\n\treturn &service{repository}\n}" ]
[ "0.69882417", "0.6967048", "0.68721616", "0.68416625", "0.67049074", "0.66628826", "0.65935975", "0.65935975", "0.63858414", "0.6379433", "0.6224912", "0.6220467", "0.6087149", "0.60556716", "0.60545856", "0.60364485", "0.6027248", "0.6005909", "0.5961527", "0.59466285", "0.59282917", "0.5920667", "0.5902186", "0.58714706", "0.5868349", "0.58590364", "0.5814842", "0.5805941", "0.580039", "0.5790351", "0.57886374", "0.57884496", "0.57878524", "0.5775919", "0.5772469", "0.57676756", "0.5751763", "0.5712053", "0.5708329", "0.570713", "0.5706692", "0.5695174", "0.56948787", "0.5670456", "0.56676733", "0.565018", "0.5650172", "0.56263745", "0.56050366", "0.5603971", "0.5599072", "0.5593943", "0.5593703", "0.5588504", "0.55862635", "0.5586133", "0.55846816", "0.5584463", "0.55687475", "0.55663764", "0.555583", "0.55550635", "0.5545093", "0.5543743", "0.5535199", "0.5533457", "0.5531931", "0.553111", "0.55241436", "0.5521835", "0.5520307", "0.552", "0.5519142", "0.5515653", "0.5510568", "0.5491581", "0.5481208", "0.5468166", "0.5466575", "0.5460464", "0.5454465", "0.54523647", "0.5450122", "0.5449233", "0.5448226", "0.54464555", "0.54444927", "0.5441443", "0.5434489", "0.5432777", "0.54320383", "0.5422067", "0.54216975", "0.54205376", "0.54160535", "0.54122734", "0.5411877", "0.54106486", "0.5405187", "0.5399636" ]
0.6961546
2
Test that the service returns the correct protocol version
func TestServiceProtocolVersion(t *testing.T) { s := res.NewService("test") restest.AssertEqualJSON(t, "ProtocolVersion()", s.ProtocolVersion(), "1.2.2") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestPeersService_Version(t *testing.T) {\n\tclient, mux, _, teardown := setupTest()\n\tdefer teardown()\n\n\tmux.HandleFunc(\"/peers/version\", func(writer http.ResponseWriter, request *http.Request) {\n\t\ttestMethod(t, request, \"GET\")\n\t\tfmt.Fprint(writer,\n\t\t\t`{\n\t\t\t \"version\": \"2.0.0\",\n\t\t\t \"success\": true\n\t\t\t}`)\n\t})\n\n\tresponseStruct, response, err := client.Peers.Version(context.Background())\n\ttestGeneralError(t, \"Peers.Version\", err)\n\ttestResponseUrl(t, \"Peers.Version\", response, \"/api/peers/version\")\n\ttestResponseStruct(t, \"Peers.Version\", responseStruct, &PeersVersion{\n\t\tSuccess: true,\n\t\tVersion: \"2.0.0\",\n\t})\n}", "func TestGetVersion(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmockedTpmProvider := new(tpmprovider.MockedTpmProvider)\n\tmockedTpmProvider.On(\"Close\").Return(nil)\n\tmockedTpmProvider.On(\"NvIndexExists\", mock.Anything).Return(false, nil)\n\tmockedTpmProvider.On(\"NvRelease\", mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvDefine\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvWrite\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\n\tmockedTpmFactory := tpmprovider.MockedTpmFactory{TpmProvider: mockedTpmProvider}\n\n\ttrustAgentService, err := CreateTrustAgentService(CreateTestConfig(), mockedTpmFactory)\n\n\ttrustAgentService.router.HandleFunc(\"/version\", errorHandler(getVersion())).Methods(\"GET\")\n\n\t// test request\n\trequest, err := http.NewRequest(\"GET\", \"/version\", nil)\n\tassert.NoError(err)\n\n\trecorder := httptest.NewRecorder()\n\tresponse := recorder.Result()\n\ttrustAgentService.router.ServeHTTP(recorder, request)\n\tassert.Equal(http.StatusOK, response.StatusCode)\n\tfmt.Printf(\"Version: %s\\n\", recorder.Body.String())\n\tassert.NotEmpty(recorder.Body.String())\n}", "func TestGetVersion(t *testing.T) {\n\n\tversion, err := GetVersion()\n\n\tif err != nil{\n\t\tt.Error(err)\n\t}\n\n\tif version != \"v1\"{\n\t\tt.Errorf(\"app version not match: %s, expect: %s.\", version, \"v1\")\n\t}\n\n\tfmt.Println(version)\n}", "func (c *Client) ProtocolVersion() (*NumberResponse, error) {\n\trequest := c.newRequest(EthProtocolVersion)\n\n\tresponse := &NumberResponse{}\n\n\treturn response, c.send(request, response)\n}", "func TestClientVersion(t *testing.T) {\n\t// t.SkipNow()\n\tet := testutil.GetETH()\n\n\tclientVersion, err := et.ClientVersion()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt.Log(\"clientVersion:\", clientVersion)\n}", "func TestGetVersion(t *testing.T) {\n\tv := \"0.0.0\"\n\tmaj, min, patch := getVersion(v)\n\n\tif maj != 0 && min != 0 && patch != 0 {\n\t\tt.Error(\"maj, min or patch are not set to 0\", maj, min, patch)\n\t}\n\n\tv = \"1.2.4\"\n\n\tmaj, min, patch = getVersion(v)\n\n\tif maj != 1 && min != 2 && patch != 4 {\n\t\tt.Error(\"maj, min or patch are not set to 1, 2, 4\", maj, min, patch)\n\t}\n}", "func (p OpenFlow10Protocol) GetVersion() uint8 {\n\treturn goloxi.VERSION_1_0\n}", "func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) {\n\tprotoVersion := int(opts.ProtocolVersion)\n\tpluginSet := opts.Plugins\n\tprotoType := ProtocolNetRPC\n\t// Check if the client sent a list of acceptable versions\n\tvar clientVersions []int\n\tif vs := os.Getenv(\"PLUGIN_PROTOCOL_VERSIONS\"); vs != \"\" {\n\t\tfor _, s := range strings.Split(vs, \",\") {\n\t\t\tv, err := strconv.Atoi(s)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"server sent invalid plugin version %q\", s)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tclientVersions = append(clientVersions, v)\n\t\t}\n\t}\n\n\t// We want to iterate in reverse order, to ensure we match the newest\n\t// compatible plugin version.\n\tsort.Sort(sort.Reverse(sort.IntSlice(clientVersions)))\n\n\t// set the old un-versioned fields as if they were versioned plugins\n\tif opts.VersionedPlugins == nil {\n\t\topts.VersionedPlugins = make(map[int]PluginSet)\n\t}\n\n\tif pluginSet != nil {\n\t\topts.VersionedPlugins[protoVersion] = pluginSet\n\t}\n\n\t// Sort the version to make sure we match the latest first\n\tvar versions []int\n\tfor v := range opts.VersionedPlugins {\n\t\tversions = append(versions, v)\n\t}\n\n\tsort.Sort(sort.Reverse(sort.IntSlice(versions)))\n\n\t// See if we have multiple versions of Plugins to choose from\n\tfor _, version := range versions {\n\t\t// Record each version, since we guarantee that this returns valid\n\t\t// values even if they are not a protocol match.\n\t\tprotoVersion = version\n\t\tpluginSet = opts.VersionedPlugins[version]\n\n\t\t// If we have a configured gRPC server we should select a protocol\n\t\tif opts.GRPCServer != nil {\n\t\t\t// All plugins in a set must use the same transport, so check the first\n\t\t\t// for the protocol type\n\t\t\tfor _, p := range pluginSet {\n\t\t\t\tswitch p.(type) {\n\t\t\t\tcase GRPCPlugin:\n\t\t\t\t\tprotoType = ProtocolGRPC\n\t\t\t\tdefault:\n\t\t\t\t\tprotoType = ProtocolNetRPC\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tfor _, clientVersion := range clientVersions {\n\t\t\tif clientVersion == protoVersion {\n\t\t\t\treturn protoVersion, protoType, pluginSet\n\t\t\t}\n\t\t}\n\t}\n\n\t// Return the lowest version as the fallback.\n\t// Since we iterated over all the versions in reverse order above, these\n\t// values are from the lowest version number plugins (which may be from\n\t// a combination of the Handshake.ProtocolVersion and ServeConfig.Plugins\n\t// fields). This allows serving the oldest version of our plugins to a\n\t// legacy client that did not send a PLUGIN_PROTOCOL_VERSIONS list.\n\treturn protoVersion, protoType, pluginSet\n}", "func (rs *RunnerSuite) TestBadProtocolVersions(c *C) {\n\tplugin := &externalPlugin{\n\t\tpluginInstance: &PluginV1Instance{\n\t\t\tName: \"test\",\n\t\t},\n\t\tpluginCommand: &PluginV1Command{\n\t\t\tPrefix: ids.PluginID{\"test\", \"prefix\"},\n\t\t},\n\t\tlogger: log.WithField(\"\", \"\"),\n\t}\n\n\tctx := new(mocks.AgentContext)\n\tcfg := &config.Config{\n\t\tForceProtocolV2toV3: false,\n\t}\n\tctx.On(\"Config\").Return(cfg)\n\tctx.On(\"EntityKey\").Return(\"my-agent-id\")\n\tctx.On(\"HostnameResolver\").Return(newFixedHostnameResolver(\"foo.bar\", \"short\"))\n\tctx.On(\"IDLookup\").Return(newFixedIDLookup())\n\n\tplugin.Context = ctx\n\n\textraLabels := data.Map{}\n\tvar entityRewrite []data.EntityRewrite\n\tok, err := plugin.handleLine([]byte(`{}`), extraLabels, entityRewrite)\n\tc.Assert(err, NotNil)\n\tc.Assert(err.Error(), Equals, \"protocol_version is not defined\")\n\tc.Assert(ok, Equals, false)\n\tok, err = plugin.handleLine([]byte(`{\"protocol_version\": \"abc\"}`), extraLabels, entityRewrite)\n\tc.Assert(err, DeepEquals, errors.New(\"Protocol version 'abc' could not be parsed as an integer.\"))\n\tc.Assert(ok, Equals, false)\n\tok, err = plugin.handleLine([]byte(`{\"protocol_version\": \"abc\"}`), extraLabels, entityRewrite)\n\tc.Assert(err, DeepEquals, errors.New(\"Protocol version 'abc' could not be parsed as an integer.\"))\n\tc.Assert(ok, Equals, false)\n\tok, err = plugin.handleLine([]byte(`{\"protocol_version\": 1.5}`), extraLabels, entityRewrite)\n\tc.Assert(err, DeepEquals, errors.New(\"Protocol version 1.5 was a float, not an integer.\"))\n\tc.Assert(ok, Equals, false)\n\tok, err = plugin.handleLine([]byte(`{\"protocol_version\": \"1500\"}`), extraLabels, entityRewrite)\n\tc.Assert(err, DeepEquals, errors.New(\"unsupported protocol version: 1500. Please try updating the Agent to the newest version.\"))\n\tc.Assert(ok, Equals, false)\n}", "func Test_downloadURLVersion(t *testing.T) {\n\ttests := []struct {\n\t\tin string\n\t\twant string\n\t}{\n\t\t{\n\t\t\tin: \"1.17.0\",\n\t\t\twant: \"1.17.0\",\n\t\t},\n\t\t{\n\t\t\tin: \"1.18.0\",\n\t\t\twant: \"1.18.0\",\n\t\t},\n\t\t{\n\t\t\tin: \"2.0.0\",\n\t\t\twant: \"2.0.0\",\n\t\t},\n\t\t{\n\t\t\tin: \"v2.0.0\",\n\t\t\twant: \"2.0.0\",\n\t\t},\n\t\t{\n\t\t\tin: \"v1.12.13+hotfix.8\",\n\t\t\twant: \"v1.12.13+hotfix.8\",\n\t\t},\n\t\t{\n\t\t\tin: \"1.12.13+hotfix.8\",\n\t\t\twant: \"v1.12.13+hotfix.8\",\n\t\t},\n\t\t{\n\t\t\tin: \"v1.12.0\",\n\t\t\twant: \"v1.12.0\",\n\t\t},\n\t\t{\n\t\t\tin: \"1.12.0\",\n\t\t\twant: \"v1.12.0\",\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.in, func(t *testing.T) {\n\t\t\tif got := downloadURLVersion(tt.in); got != tt.want {\n\t\t\t\tt.Errorf(\"downloadURLVersion() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}", "func (a *DefaultApiService) VersionCheck(ctx _context.Context) (ServiceVersion, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue ServiceVersion\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/version\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func Test_LatestVersion(t *testing.T) {\n\tmockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(\"0.6.1\\n\"))\n\t}))\n\tdefer mockServer.Close()\n\n\tversion, err := latestVersion(mockServer.URL)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\ttestVersion := semver.New(\"0.6.1\")\n\tif !version.Equal(*testVersion) {\n\t\tt.Error(\"Version equality check failed.\")\n\t}\n}", "func (e *Ethereum) ProtocolVersion() (uint64, error) {\n\tvar resProtocolVer string\n\terr := e.rpcClient.CallContext(e.ctx, &resProtocolVer, \"eth_protocolVersion\")\n\tif err != nil {\n\t\treturn 0, errors.Wrapf(err, \"fail to call rpc.CallContext(eth_protocolVersion) error: %s\", err)\n\t}\n\th, err := e.DecodeBig(resProtocolVer)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn h.Uint64(), err\n}", "func TestVersion(t *testing.T) {\n\t// Get Vault client\n\tvaultClientConfig := vault.DefaultConfig()\n\tvaultClientConfig.Address = vaultAddress\n\tv, err := vault.NewClient(vaultClientConfig)\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\tv.SetToken(\"root\")\n\tvl := v.Logical()\n\n\t// Get Pachyderm version from plugin\n\tsecret, err := vl.Read(\"/pachyderm/version\")\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\tif _, ok := secret.Data[\"client-version\"]; !ok {\n\t\tt.Fatalf(\"could not get client version from Pachyderm plugin\")\n\t}\n\tif _, ok := secret.Data[\"server-version\"]; !ok {\n\t\tt.Fatalf(\"could not get server version from Pachyderm plugin\")\n\t}\n\n\t// Test client-only endpoint\n\tsecret, err = vl.Read(\"/pachyderm/version/client-only\")\n\tif _, ok := secret.Data[\"client-version\"]; !ok {\n\t\tt.Fatalf(\"could not get client version from Pachyderm plugin (client-only)\")\n\t}\n\tif _, ok := secret.Data[\"server-version\"]; ok {\n\t\tt.Fatalf(\"got unexpected server version from Pachyderm plugin (client-only)\")\n\t}\n}", "func (_m *MockBackend) ProtocolVersion() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}", "func TestHandleGetVersion(t *testing.T) {\n\tsv := ServerVersion{Version:\"v1\", IP:\"127.0.0.1\", Port:8080}\n\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/version\", sv.handGetVersion)\n\n\twriter := httptest.NewRecorder()\n\treq, _ := http.NewRequest(\"GET\", \"/version\", nil)\n\tmux.ServeHTTP(writer, req)\n\n\tfmt.Println(writer.Body.String())\n}", "func (api *PublicEthereumAPI) ProtocolVersion() hexutil.Uint {\n\tapi.logger.Debug(\"eth_protocolVersion\")\n\treturn hexutil.Uint(ethermint.ProtocolVersion)\n}", "func TestGetVersions4A(t *testing.T) {\n}", "func (o *Service) GetServiceProtocolOk() (string, bool) {\n\tif o == nil || o.ServiceProtocol == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.ServiceProtocol, true\n}", "func TestVersion(t *testing.T) {\n\t//fmt.Println(\"EliteProvision [\" + Version() + \"]\")\n}", "func ProtocolVersion(major int, minor int) int {\n\tif minor > 999 {\n\t\tminor = 999\n\t}\n\treturn major*1000 + minor\n}", "func (s *PublicHarmonyAPI) ProtocolVersion() hexutil.Uint {\n\treturn hexutil.Uint(proto.ProtocolVersion)\n}", "func (r *Ricochet) negotiateVersion(conn net.Conn, outbound bool) (*OpenConnection, error) {\n\tversions := []byte{0x49, 0x4D, 0x01, 0x01}\n\n\t// Outbound side of the connection sends a list of supported versions\n\tif outbound {\n\t\tif n, err := conn.Write(versions); err != nil || n < len(versions) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tres := make([]byte, 1)\n\t\tif _, err := io.ReadAtLeast(conn, res, len(res)); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif res[0] != 0x01 {\n\t\t\treturn nil, errors.New(\"unsupported protocol version\")\n\t\t}\n\t} else {\n\t\t// Read version response header\n\t\theader := make([]byte, 3)\n\t\tif _, err := io.ReadAtLeast(conn, header, len(header)); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif header[0] != versions[0] || header[1] != versions[1] || header[2] < 1 {\n\t\t\treturn nil, errors.New(\"invalid protocol response\")\n\t\t}\n\n\t\t// Read list of supported versions (which is header[2] bytes long)\n\t\tversionList := make([]byte, header[2])\n\t\tif _, err := io.ReadAtLeast(conn, versionList, len(versionList)); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tselectedVersion := byte(0xff)\n\t\tfor _, v := range versionList {\n\t\t\tif v == 0x01 {\n\t\t\t\tselectedVersion = v\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif n, err := conn.Write([]byte{selectedVersion}); err != nil || n < 1 {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif selectedVersion == 0xff {\n\t\t\treturn nil, errors.New(\"no supported protocol version\")\n\t\t}\n\t}\n\n\toc := new(OpenConnection)\n\toc.Init(outbound, conn)\n\treturn oc, nil\n}", "func TestVersion(t *testing.T) {\n\tfor _, v := range versionTests {\n\t\tp, e := model.ParseVersion(v[0])\n\t\tassert.Nil(t, e, \"Should have parsed %s\", v)\n\t\tassert.Equal(t, p.String(), v[1], \"Should be equal %s==%s\", p.String(), v)\n\t}\n}", "func TestProtocolVersionMarshalUnMarshal(t *testing.T) {\n\tt.Parallel()\n\n\ttestVersions := [...]ProtocolVersion{\n\t\tProtocolVersionLegacy,\n\t\tProtocolVersionMultiLoopOut,\n\t\tProtocolVersionSegwitLoopIn,\n\t\tProtocolVersionPreimagePush,\n\t\tProtocolVersionUserExpiryLoopOut,\n\t}\n\n\tbogusVersion := []byte{0xFF, 0xFF, 0xFF, 0xFF}\n\tinvalidSlice := []byte{0xFF, 0xFF, 0xFF}\n\n\tfor i := 0; i < len(testVersions); i++ {\n\t\ttestVersion := testVersions[i]\n\n\t\t// Test that unmarshal(marshal(v)) == v.\n\t\tversion, err := UnmarshalProtocolVersion(\n\t\t\tMarshalProtocolVersion(testVersion),\n\t\t)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, testVersion, version)\n\n\t\t// Test that unmarshalling a nil slice returns the default\n\t\t// version along with no error.\n\t\tversion, err = UnmarshalProtocolVersion(nil)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, ProtocolVersionUnrecorded, version)\n\n\t\t// Test that unmarshalling an unknown version returns the\n\t\t// default version along with an error.\n\t\tversion, err = UnmarshalProtocolVersion(bogusVersion)\n\t\trequire.Error(t, err, \"expected invalid version\")\n\t\trequire.Equal(t, ProtocolVersionUnrecorded, version)\n\n\t\t// Test that unmarshalling an invalid slice returns the\n\t\t// default version along with an error.\n\t\tversion, err = UnmarshalProtocolVersion(invalidSlice)\n\t\trequire.Error(t, err, \"expected invalid size\")\n\t\trequire.Equal(t, ProtocolVersionUnrecorded, version)\n\t}\n}", "func TestVersion(t *testing.T) {\n\n\ttests := []struct {\n\t\tInput driver.Version\n\t\tMajor int\n\t\tMinor int\n\t\tSub string\n\t\tSubInt int\n\t\tSubIsInt bool\n\t}{\n\t\t{\"1.2.3\", 1, 2, \"3\", 3, true},\n\t\t{\"\", 0, 0, \"\", 0, false},\n\t\t{\"1.2.3a\", 1, 2, \"3a\", 0, false},\n\t\t{\"13.12\", 13, 12, \"\", 0, false},\n\t}\n\n\tfor _, test := range tests {\n\t\tif v := test.Input.Major(); v != test.Major {\n\t\t\tt.Errorf(\"Major failed for '%s', expected %d, got %d\", test.Input, test.Major, v)\n\t\t}\n\t\tif v := test.Input.Minor(); v != test.Minor {\n\t\t\tt.Errorf(\"Minor failed for '%s', expected %d, got %d\", test.Input, test.Minor, v)\n\t\t}\n\t\tif v := test.Input.Sub(); v != test.Sub {\n\t\t\tt.Errorf(\"Sub failed for '%s', expected '%s', got '%s'\", test.Input, test.Sub, v)\n\t\t}\n\t\tif v, vIsInt := test.Input.SubInt(); vIsInt != test.SubIsInt || v != test.SubInt {\n\t\t\tt.Errorf(\"SubInt failed for '%s', expected (%d,%v), got (%d,%v)\", test.Input, test.SubInt, test.SubIsInt, v, vIsInt)\n\t\t}\n\t}\n}", "func (m *MockServerStreamConnection) Protocol() api.Protocol {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Protocol\")\n\tret0, _ := ret[0].(api.Protocol)\n\treturn ret0\n}", "func TestVersion(t *testing.T) {\n\tassert := audit.NewTestingAssertion(t, true)\n\t// Setup the test server.\n\tmux := newMultiplexer(assert)\n\tts := restaudit.StartServer(mux, assert)\n\tdefer ts.Close()\n\terr := mux.Register(\"test\", \"json\", NewTestHandler(\"json\", assert))\n\tassert.Nil(err)\n\t// Perform test requests.\n\treq := restaudit.NewRequest(\"GET\", \"/base/test/json/4711?foo=0815\")\n\treq.AddHeader(restaudit.HeaderAccept, restaudit.ApplicationJSON)\n\tresp := ts.DoRequest(req)\n\tresp.AssertStatusEquals(200)\n\tresp.AssertHeaderEquals(\"Version\", \"1.0.0\")\n\n\treq = restaudit.NewRequest(\"GET\", \"/base/test/json/4711?foo=0815\")\n\treq.AddHeader(restaudit.HeaderAccept, restaudit.ApplicationJSON)\n\treq.AddHeader(\"Version\", \"2\")\n\tresp = ts.DoRequest(req)\n\tresp.AssertStatusEquals(200)\n\tresp.AssertHeaderEquals(\"Version\", \"2.0.0\")\n\n\treq = restaudit.NewRequest(\"GET\", \"/base/test/json/4711?foo=0815\")\n\treq.AddHeader(restaudit.HeaderAccept, restaudit.ApplicationJSON)\n\treq.AddHeader(\"Version\", \"3.0\")\n\tresp = ts.DoRequest(req)\n\tresp.AssertStatusEquals(200)\n\tresp.AssertHeaderEquals(\"Version\", \"4.0.0-alpha\")\n}", "func (m *MockClientStreamConnection) Protocol() api.Protocol {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Protocol\")\n\tret0, _ := ret[0].(api.Protocol)\n\treturn ret0\n}", "func (m *MockStreamConnection) Protocol() api.Protocol {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Protocol\")\n\tret0, _ := ret[0].(api.Protocol)\n\treturn ret0\n}", "func TestVersion(t *testing.T) {\n\tvers := Version()\n\tif len(vers) == 0 {\n\t\tt.Error(\"version string is not present\")\n\t}\n}", "func TestConnectRejectsVersions(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tg := newTestingGateway(t)\n\tdefer g.Close()\n\t// Setup a listener that mocks Gateway.acceptConn, but sends the\n\t// version sent over mockVersionChan instead of build.Version.\n\tlistener, err := net.Listen(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer listener.Close()\n\n\ttests := []struct {\n\t\tversion string\n\t\terrWant string\n\t\tlocalErrWant string\n\t\tinvalidVersion bool\n\t\tinsufficientVersion bool\n\t\tmsg string\n\t\t// version required for this test\n\t\tversionRequired string\n\t\t// 1.2.0 sessionHeader extension to handshake protocol\n\t\tgenesisID types.BlockID\n\t\tuniqueID gatewayID\n\t}{\n\t\t// Test that Connect fails when the remote peer's version is \"reject\".\n\t\t{\n\t\t\tversion: \"reject\",\n\t\t\terrWant: errPeerRejectedConn.Error(),\n\t\t\tmsg: \"Connect should fail when the remote peer rejects the connection\",\n\t\t},\n\t\t// Test that Connect fails when the remote peer's version is ascii gibberish.\n\t\t{\n\t\t\tversion: \"foobar\",\n\t\t\tinvalidVersion: true,\n\t\t\tmsg: \"Connect should fail when the remote peer's version is ascii gibberish\",\n\t\t},\n\t\t// Test that Connect fails when the remote peer's version is utf8 gibberish.\n\t\t{\n\t\t\tversion: \"世界\",\n\t\t\tinvalidVersion: true,\n\t\t\tmsg: \"Connect should fail when the remote peer's version is utf8 gibberish\",\n\t\t},\n\t\t// Test that Connect fails when the remote peer's version is < 0.4.0 (0).\n\t\t{\n\t\t\tversion: \"0\",\n\t\t\tinsufficientVersion: true,\n\t\t\tmsg: \"Connect should fail when the remote peer's version is 0\",\n\t\t},\n\t\t{\n\t\t\tversion: \"0.0.0\",\n\t\t\tinsufficientVersion: true,\n\t\t\tmsg: \"Connect should fail when the remote peer's version is 0.0.0\",\n\t\t},\n\t\t{\n\t\t\tversion: \"0000.0000.0000\",\n\t\t\tinsufficientVersion: true,\n\t\t\tmsg: \"Connect should fail when the remote peer's version is 0000.0000.0000\",\n\t\t},\n\t\t{\n\t\t\tversion: \"0.3.9\",\n\t\t\tinsufficientVersion: true,\n\t\t\tmsg: \"Connect should fail when the remote peer's version is 0.3.9\",\n\t\t},\n\t\t{\n\t\t\tversion: \"0.3.9999\",\n\t\t\tinsufficientVersion: true,\n\t\t\tmsg: \"Connect should fail when the remote peer's version is 0.3.9999\",\n\t\t},\n\t\t{\n\t\t\tversion: \"0.3.9.9.9\",\n\t\t\tinsufficientVersion: true,\n\t\t\tmsg: \"Connect should fail when the remote peer's version is 0.3.9.9.9\",\n\t\t},\n\t\t// Test that Connect succeeds when the remote peer's version is 0.4.0.\n\t\t{\n\t\t\tversion: \"0.4.0\",\n\t\t\tmsg: \"Connect should succeed when the remote peer's version is 0.4.0\",\n\t\t},\n\t\t// Test that Connect succeeds when the remote peer's version is > 0.4.0.\n\t\t{\n\t\t\tversion: \"0.9.0\",\n\t\t\tmsg: \"Connect should succeed when the remote peer's version is 0.9.0\",\n\t\t},\n\t\t// Test that Connect /could/ succeed when the remote peer's version is >= 1.3.0.\n\t\t{\n\t\t\tversion: minimumAcceptablePeerVersion,\n\t\t\tmsg: \"Connect should succeed when the remote peer's version is 1.3.0 and sessionHeader checks out\",\n\t\t\tuniqueID: func() (id gatewayID) { fastrand.Read(id[:]); return }(),\n\t\t\tgenesisID: types.GenesisID,\n\t\t\tversionRequired: minimumAcceptablePeerVersion,\n\t\t},\n\t\t{\n\t\t\tversion: minimumAcceptablePeerVersion,\n\t\t\tmsg: \"Connect should not succeed when peer is connecting to itself\",\n\t\t\tuniqueID: g.staticID,\n\t\t\tgenesisID: types.GenesisID,\n\t\t\terrWant: errOurAddress.Error(),\n\t\t\tlocalErrWant: errOurAddress.Error(),\n\t\t\tversionRequired: minimumAcceptablePeerVersion,\n\t\t},\n\t}\n\tfor testIndex, tt := range tests {\n\t\tif tt.versionRequired != \"\" && build.VersionCmp(build.Version, tt.versionRequired) < 0 {\n\t\t\tcontinue // skip, as we do not meet the required version\n\t\t}\n\n\t\t// create the listener\n\t\tdoneChan := make(chan struct{})\n\t\tgo func() {\n\t\t\tdefer close(doneChan)\n\t\t\tconn, err := listener.Accept()\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"test #%d failed: %s\", testIndex, err))\n\t\t\t}\n\t\t\tremoteVersion, err := acceptVersionHandshake(conn, tt.version)\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"test #%d failed: %s\", testIndex, err))\n\t\t\t}\n\t\t\tif remoteVersion != build.Version {\n\t\t\t\tpanic(fmt.Sprintf(\"test #%d failed: remoteVersion != build.Version\", testIndex))\n\t\t\t}\n\n\t\t\tif build.VersionCmp(tt.version, minimumAcceptablePeerVersion) >= 0 {\n\t\t\t\tourHeader := sessionHeader{\n\t\t\t\t\tGenesisID: tt.genesisID,\n\t\t\t\t\tUniqueID: tt.uniqueID,\n\t\t\t\t\tNetAddress: modules.NetAddress(conn.LocalAddr().String()),\n\t\t\t\t}\n\t\t\t\t_, err = exchangeRemoteHeader(conn, ourHeader)\n\t\t\t\texchangeOurHeader(conn, ourHeader)\n\t\t\t} else {\n\t\t\t\tvar dialbackPort string\n\t\t\t\terr = encoding.ReadObject(conn, &dialbackPort, 13)\n\t\t\t}\n\t\t\tif (err == nil && tt.localErrWant != \"\") || (err != nil && !strings.Contains(err.Error(), tt.localErrWant)) {\n\t\t\t\tpanic(fmt.Sprintf(\"test #%d failed: %v != %v\", testIndex, tt.localErrWant, err))\n\t\t\t}\n\t\t}()\n\t\terr = g.Connect(modules.NetAddress(listener.Addr().String()))\n\t\tswitch {\n\t\tcase tt.invalidVersion:\n\t\t\t// Check that the error is the expected type.\n\t\t\tif _, ok := err.(invalidVersionError); !ok {\n\t\t\t\tt.Fatalf(\"expected Connect to error with invalidVersionError: %s\", tt.msg)\n\t\t\t}\n\t\tcase tt.insufficientVersion:\n\t\t\t// Check that the error is the expected type.\n\t\t\tif _, ok := err.(insufficientVersionError); !ok {\n\t\t\t\tt.Fatalf(\"expected Connect to error with insufficientVersionError: %s\", tt.msg)\n\t\t\t}\n\t\tdefault:\n\t\t\t// Check that the error is the expected error.\n\t\t\tif (err == nil && tt.errWant != \"\") || (err != nil && !strings.Contains(err.Error(), tt.errWant)) {\n\t\t\t\tt.Fatalf(\"expected Connect to error with '%v', but got '%v': %s\", tt.errWant, err, tt.msg)\n\t\t\t}\n\t\t}\n\t\t<-doneChan\n\t\tg.Disconnect(modules.NetAddress(listener.Addr().String()))\n\t}\n}", "func (t *grpcTransport) ProtocolVersion() centrifuge.ProtocolVersion {\n\treturn centrifuge.ProtocolVersion2\n}", "func (p *Peer) ProtocolVersion() uint32 {\n\tp.flagsMtx.Lock()\n\tprotocolVersion := p.protocolVersion\n\tp.flagsMtx.Unlock()\n\n\treturn protocolVersion\n}", "func (m *MockConnectionPool) Protocol() api.Protocol {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Protocol\")\n\tret0, _ := ret[0].(api.Protocol)\n\treturn ret0\n}", "func (v *Client) Protocol() string {\n\tif v.protocol == \"\" {\n\t\tif v.req.TLS != nil {\n\t\t\tv.protocol = \"https\"\n\t\t} else {\n\t\t\tv.protocol = \"http\"\n\t\t}\n\t}\n\treturn v.protocol\n}", "func (c ClientProperties) ProtocolVersion() uint8 {\n\treturn c[4]\n}", "func TestMessage(t *testing.T) {\n\t// Create the various types of messages to test.\n\n\t// MsgVersion.\n\taddrYou := &net.TCPAddr{IP: net.ParseIP(\"192.168.0.1\"), Port: 8333}\n\tyou, err := wire.NewNetAddress(addrYou, 1, wire.SFNodeNetwork)\n\tif err != nil {\n\t\tt.Errorf(\"NewNetAddress: %v\", err)\n\t}\n\tyou.Timestamp = time.Time{} // Version message has zero value timestamp.\n\taddrMe := &net.TCPAddr{IP: net.ParseIP(\"127.0.0.1\"), Port: 8333}\n\tme, err := wire.NewNetAddress(addrMe, 1, wire.SFNodeNetwork)\n\tif err != nil {\n\t\tt.Errorf(\"NewNetAddress: %v\", err)\n\t}\n\t// A version message that is decoded comes out a little different than\n\t// the original data structure, so we need to create a slightly different\n\t// message to test against.\n\tme.Timestamp = time.Time{} // Version message has zero value timestamp.\n\tyouExpected, err := wire.NewNetAddress(addrYou, 0, wire.SFNodeNetwork)\n\tif err != nil {\n\t\tt.Errorf(\"NewNetAddress: %v\", err)\n\t}\n\tyouExpected.Timestamp = time.Time{} // Version message has zero value timestamp.\n\tmeExpected, err := wire.NewNetAddress(addrMe, 0, wire.SFNodeNetwork)\n\tif err != nil {\n\t\tt.Errorf(\"NewNetAddress: %v\", err)\n\t}\n\tmeExpected.Timestamp = time.Time{} // Version message has zero value timestamp.\n\tmsgVersion := wire.NewMsgVersion(me, you, 123123, []uint32{1})\n\tmsgVersionExpected := wire.NewMsgVersion(meExpected, youExpected, 123123, []uint32{1})\n\n\tmsgVerack := wire.NewMsgVerAck()\n\tmsgPong := wire.NewMsgPong()\n\tmsgAddr := wire.NewMsgAddr()\n\tmsgInv := wire.NewMsgInv()\n\tmsgGetData := wire.NewMsgGetData()\n\n\t// ripe-based getpubkey message\n\tripeBytes := make([]byte, 20)\n\tripeBytes[0] = 1\n\tripe, err := wire.NewRipeHash(ripeBytes)\n\tif err != nil {\n\t\tt.Fatalf(\"could not make a ripe hash %s\", err)\n\t}\n\texpires := time.Unix(0x495fab29, 0) // 2009-01-03 12:15:05 -0600 CST)\n\tmsgGetPubKey := wire.NewMsgGetPubKey(123123, expires, 2, 1, ripe, nil)\n\n\tpub1Bytes, pub2Bytes := make([]byte, 64), make([]byte, 64)\n\tpub2Bytes[0] = 1\n\tpub1, err := wire.NewPubKey(pub1Bytes)\n\tif err != nil {\n\t\tt.Fatalf(\"could not create a pubkey %s\", err)\n\t}\n\tpub2, err := wire.NewPubKey(pub2Bytes)\n\tif err != nil {\n\t\tt.Fatalf(\"could not create a pubkey %s\", err)\n\t}\n\tmsgPubKey := wire.NewMsgPubKey(123123, expires, 2, 1, 0, pub1, pub2, 0, 0, nil, nil, nil)\n\n\tenc := make([]byte, 99)\n\tmsgMsg := wire.NewMsgMsg(123123, expires, 2, 1, enc, 0, 0, 0, nil, nil, 0, 0, nil, 0, nil, nil, nil)\n\n\tmsgBroadcast := wire.NewMsgBroadcast(123123, expires, 2, 1, nil, enc, 0, 0, 0, nil, nil, 0, 0, 0, nil, nil)\n\n\ttests := []struct {\n\t\tin wire.Message // Value to encode\n\t\tout wire.Message // Expected decoded value\n\t\tbmnet wire.BitmessageNet // Network to use for wire.encoding\n\t\tbytes int // Expected num bytes read/written\n\t}{\n\t\t{msgVersion, msgVersionExpected, wire.MainNet, 119},\n\t\t{msgVerack, msgVerack, wire.MainNet, 24},\n\t\t{msgPong, msgPong, wire.MainNet, 24},\n\t\t{msgAddr, msgAddr, wire.MainNet, 25},\n\t\t{msgInv, msgInv, wire.MainNet, 25},\n\t\t{msgGetData, msgGetData, wire.MainNet, 25},\n\t\t{msgGetPubKey, msgGetPubKey, wire.MainNet, 66},\n\t\t{msgPubKey, msgPubKey, wire.MainNet, 178},\n\t\t{msgMsg, msgMsg, wire.MainNet, 145},\n\t\t{msgBroadcast, msgBroadcast, wire.MainNet, 145},\n\t}\n\n\tt.Logf(\"Running %d tests\", len(tests))\n\tfor i, test := range tests {\n\t\t// Encode to wire.format.\n\t\tvar buf bytes.Buffer\n\t\tnw, err := wire.WriteMessageN(&buf, test.in, test.bmnet)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"WriteMessage #%d error %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Ensure the number of bytes written match the expected value.\n\t\tif nw != test.bytes {\n\t\t\tt.Errorf(\"WriteMessage #%d unexpected num bytes \"+\n\t\t\t\t\"written - got %d, want %d\", i, nw, test.bytes)\n\t\t}\n\n\t\t// Decode from wire.format.\n\t\trbuf := bytes.NewReader(buf.Bytes())\n\t\tnr, msg, _, err := wire.ReadMessageN(rbuf, test.bmnet)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"ReadMessage #%d error %v, msg %v\", i, err,\n\t\t\t\tspew.Sdump(msg))\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(msg, test.out) {\n\t\t\tt.Errorf(\"ReadMessage #%d\\n got: %v want: %v\", i,\n\t\t\t\tspew.Sdump(msg), spew.Sdump(test.out))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Ensure the number of bytes read match the expected value.\n\t\tif nr != test.bytes {\n\t\t\tt.Errorf(\"ReadMessage #%d unexpected num bytes read - \"+\n\t\t\t\t\"got %d, want %d\", i, nr, test.bytes)\n\t\t}\n\t}\n\n\t// Do the same thing for Read/WriteMessage, but ignore the bytes since\n\t// they don't return them.\n\tt.Logf(\"Running %d tests\", len(tests))\n\tfor i, test := range tests {\n\t\t// Encode to wire.format.\n\t\tvar buf bytes.Buffer\n\t\terr := wire.WriteMessage(&buf, test.in, test.bmnet)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"WriteMessage #%d error %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Decode from wire.format.\n\t\trbuf := bytes.NewReader(buf.Bytes())\n\t\tmsg, _, err := wire.ReadMessage(rbuf, test.bmnet)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"ReadMessage #%d error %v, msg %v\", i, err,\n\t\t\t\tspew.Sdump(msg))\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(msg, test.out) {\n\t\t\tt.Errorf(\"ReadMessage #%d\\n got: %v want: %v\", i,\n\t\t\t\tspew.Sdump(msg), spew.Sdump(test.out))\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func GetTLSVersion(tr *http.Transport) string {\n switch tr.TLSClientConfig.MinVersion {\n case tls.VersionTLS10:\n return \"TLS 1.0\"\n case tls.VersionTLS11:\n return \"TLS 1.1\"\n case tls.VersionTLS12:\n return \"TLS 1.2\"\n case tls.VersionTLS13:\n return \"TLS 1.3\"\n }\n\n return \"Unknown\"\n}", "func verifyConfigVersion(httpClient *http.Client, configVersion int) error {\n\treq, err := http.NewRequest(\"GET\", \"http://nginx-plus-api/configVersionCheck\", nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating request: %v\", err)\n\t}\n\treq.Header.Set(\"x-expected-config-version\", fmt.Sprintf(\"%v\", configVersion))\n\tresp, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error doing request: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"API returned non-success status: %v\", resp.StatusCode)\n\t}\n\treturn nil\n}", "func (m *MockPacketHandler) GetVersion() protocol.VersionNumber {\n\tret := m.ctrl.Call(m, \"GetVersion\")\n\tret0, _ := ret[0].(protocol.VersionNumber)\n\treturn ret0\n}", "func TestParsePayload_v4WithV2ToV3UpgradeReturnsNoError(t *testing.T) {\n\t_, protocolV, err := ParsePayload(integration.ProtocolV4.Payload, true)\n\tassert.NoError(t, err)\n\tassert.Equal(t, protocol.V4, protocolV)\n}", "func TestAcceptConnRejectsVersions(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\tg := newTestingGateway(t)\n\tdefer g.Close()\n\n\ttests := []struct {\n\t\tremoteVersion string\n\t\tversionResponseWant string\n\t\terrWant error\n\t\tmsg string\n\t}{\n\t\t// Test that acceptConn fails when the remote peer's version is \"reject\".\n\t\t{\n\t\t\tremoteVersion: \"reject\",\n\t\t\tversionResponseWant: \"\",\n\t\t\terrWant: errPeerRejectedConn,\n\t\t\tmsg: \"acceptConn shouldn't accept a remote peer whose version is \\\"reject\\\"\",\n\t\t},\n\t\t// Test that acceptConn fails when the remote peer's version is ascii gibberish.\n\t\t{\n\t\t\tremoteVersion: \"foobar\",\n\t\t\tversionResponseWant: \"\",\n\t\t\terrWant: errPeerRejectedConn,\n\t\t\tmsg: \"acceptConn shouldn't accept a remote peer whose version is ascii gibberish\",\n\t\t},\n\t\t// Test that acceptConn fails when the remote peer's version is utf8 gibberish.\n\t\t{\n\t\t\tremoteVersion: \"世界\",\n\t\t\tversionResponseWant: \"\",\n\t\t\terrWant: errPeerRejectedConn,\n\t\t\tmsg: \"acceptConn shouldn't accept a remote peer whose version is utf8 gibberish\",\n\t\t},\n\t\t// Test that acceptConn fails when the remote peer's version is < 0.4.0 (0).\n\t\t{\n\t\t\tremoteVersion: \"0\",\n\t\t\tversionResponseWant: \"\",\n\t\t\terrWant: errPeerRejectedConn,\n\t\t\tmsg: \"acceptConn shouldn't accept a remote peer whose version is 0\",\n\t\t},\n\t\t{\n\t\t\tremoteVersion: \"0.0.0\",\n\t\t\tversionResponseWant: \"\",\n\t\t\terrWant: errPeerRejectedConn,\n\t\t\tmsg: \"acceptConn shouldn't accept a remote peer whose version is 0.0.0\",\n\t\t},\n\t\t{\n\t\t\tremoteVersion: \"0000.0000.0000\",\n\t\t\tversionResponseWant: \"\",\n\t\t\terrWant: errPeerRejectedConn,\n\t\t\tmsg: \"acceptConn shouldn't accept a remote peer whose version is 0000.000.000\",\n\t\t},\n\t\t{\n\t\t\tremoteVersion: \"0.3.9\",\n\t\t\tversionResponseWant: \"\",\n\t\t\terrWant: errPeerRejectedConn,\n\t\t\tmsg: \"acceptConn shouldn't accept a remote peer whose version is 0.3.9\",\n\t\t},\n\t\t{\n\t\t\tremoteVersion: \"0.3.9999\",\n\t\t\tversionResponseWant: \"\",\n\t\t\terrWant: errPeerRejectedConn,\n\t\t\tmsg: \"acceptConn shouldn't accept a remote peer whose version is 0.3.9999\",\n\t\t},\n\t\t{\n\t\t\tremoteVersion: \"0.3.9.9.9\",\n\t\t\tversionResponseWant: \"\",\n\t\t\terrWant: errPeerRejectedConn,\n\t\t\tmsg: \"acceptConn shouldn't accept a remote peer whose version is 0.3.9.9.9\",\n\t\t},\n\t\t// Test that acceptConn succeeds when the remote peer's version is\n\t\t// minAcceptableVersion\n\t\t{\n\t\t\tremoteVersion: minimumAcceptablePeerVersion,\n\t\t\tversionResponseWant: build.Version,\n\t\t\tmsg: \"acceptConn should accept a remote peer whose version is 0.4.0\",\n\t\t},\n\t\t// Test that acceptConn succeeds when the remote peer's version is\n\t\t// above minAcceptableVersion\n\t\t{\n\t\t\tremoteVersion: \"9\",\n\t\t\tversionResponseWant: build.Version,\n\t\t\tmsg: \"acceptConn should accept a remote peer whose version is 9\",\n\t\t},\n\t\t{\n\t\t\tremoteVersion: \"9.9.9\",\n\t\t\tversionResponseWant: build.Version,\n\t\t\tmsg: \"acceptConn should accept a remote peer whose version is 9.9.9\",\n\t\t},\n\t\t{\n\t\t\tremoteVersion: \"9999.9999.9999\",\n\t\t\tversionResponseWant: build.Version,\n\t\t\tmsg: \"acceptConn should accept a remote peer whose version is 9999.9999.9999\",\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tconn, err := net.DialTimeout(\"tcp\", string(g.Address()), dialTimeout)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tremoteVersion, err := connectVersionHandshake(conn, tt.remoteVersion)\n\t\tif err != tt.errWant {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif remoteVersion != tt.versionResponseWant {\n\t\t\tt.Fatal(tt.msg)\n\t\t}\n\t\tconn.Close()\n\t}\n}", "func (E_OpenconfigOfficeAp_System_SshServer_Config_ProtocolVersion) IsYANGGoEnum() {}", "func ValidateProtocol(spec *ProtocolValiditySpec) *ProtocolValidity {\n\tlog.Printf(\"Validating %s protocol for service with external set to %v\", spec.Protocol, spec.IsExternal)\n\n\tisValid := true\n\tif spec.Protocol == api.ProtocolUDP && spec.IsExternal {\n\t\tisValid = false\n\t}\n\n\tlog.Printf(\"Validation result for %s protocol is %v\", spec.Protocol, isValid)\n\treturn &ProtocolValidity{Valid: isValid}\n}", "func (bscfur BlobsStartCopyFromURLResponse) Version() string {\n\treturn bscfur.rawResponse.Header.Get(\"x-ms-version\")\n}", "func DecodeVersion(b []byte) (*Version, error) {\n\tlength := len(b)\n\tif length <= 80 {\n\t\treturn nil, fmt.Errorf(\"Invalid version message: %#v\", b)\n\t}\n\tvar addrRecvArr [26]byte\n\tvar addrFromArr [26]byte\n\tversionByte := binary.LittleEndian.Uint32(b[0:4])\n\tservices := binary.LittleEndian.Uint64(b[4:12])\n\ttimestamp := binary.LittleEndian.Uint64(b[12:20])\n\n\tcopy(addrRecvArr[:], b[20:46])\n\taddrRecv := common.DecodeNetAddr(addrRecvArr)\n\tfmt.Println(\"addrRecv ip: \", addrRecv.IP, addrRecv.Port)\n\n\tcopy(addrFromArr[:], b[46:72])\n\taddrFrom := common.DecodeNetAddr(addrFromArr)\n\tfmt.Println(\"addrFrom ip: \", addrFrom.IP, addrFrom.Port)\n\n\tnonce := binary.LittleEndian.Uint64(b[72:80])\n\n\t// userAgent の読み取り\n\tuserAgent, err := common.DecodeVarStr(b[80:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvarstrLen := len(userAgent.Encode())\n\tfmt.Println(\"UserAgent: \", string(userAgent.Data))\n\n\tif length < 85+varstrLen {\n\t\treturn nil, fmt.Errorf(\"Invalid version message: %#v\", b)\n\t}\n\n\tstartHeight := binary.LittleEndian.Uint32(b[80+varstrLen : 84+varstrLen])\n\tvar relay bool\n\tif b[84+varstrLen] > 0x00 {\n\t\trelay = true\n\t} else {\n\t\trelay = false\n\t}\n\treturn &Version{\n\t\tVersion: versionByte,\n\t\tServices: services,\n\t\tTimestamp: timestamp,\n\t\tAddrRecv: addrRecv,\n\t\tAddrFrom: addrFrom,\n\t\tNonce: nonce,\n\t\tUserAgent: userAgent,\n\t\tStartHeight: startHeight,\n\t\tRelay: relay,\n\t}, nil\n}", "func TestIsProxyProtocolNeeded(t *testing.T) {\n\tvar (\n\t\tawsPlatform = configv1.PlatformStatus{\n\t\t\tType: configv1.AWSPlatformType,\n\t\t}\n\t\tazurePlatform = configv1.PlatformStatus{\n\t\t\tType: configv1.AzurePlatformType,\n\t\t}\n\t\tgcpPlatform = configv1.PlatformStatus{\n\t\t\tType: configv1.GCPPlatformType,\n\t\t}\n\t\tbareMetalPlatform = configv1.PlatformStatus{\n\t\t\tType: configv1.BareMetalPlatformType,\n\t\t}\n\n\t\thostNetworkStrategy = operatorv1.EndpointPublishingStrategy{\n\t\t\tType: operatorv1.HostNetworkStrategyType,\n\t\t}\n\t\thostNetworkStrategyWithDefault = operatorv1.EndpointPublishingStrategy{\n\t\t\tType: operatorv1.HostNetworkStrategyType,\n\t\t\tHostNetwork: &operatorv1.HostNetworkStrategy{\n\t\t\t\tProtocol: operatorv1.DefaultProtocol,\n\t\t\t},\n\t\t}\n\t\thostNetworkStrategyWithTCP = operatorv1.EndpointPublishingStrategy{\n\t\t\tType: operatorv1.HostNetworkStrategyType,\n\t\t\tHostNetwork: &operatorv1.HostNetworkStrategy{\n\t\t\t\tProtocol: operatorv1.TCPProtocol,\n\t\t\t},\n\t\t}\n\t\thostNetworkStrategyWithPROXY = operatorv1.EndpointPublishingStrategy{\n\t\t\tType: operatorv1.HostNetworkStrategyType,\n\t\t\tHostNetwork: &operatorv1.HostNetworkStrategy{\n\t\t\t\tProtocol: operatorv1.ProxyProtocol,\n\t\t\t},\n\t\t}\n\t\tloadBalancerStrategy = operatorv1.EndpointPublishingStrategy{\n\t\t\tType: operatorv1.LoadBalancerServiceStrategyType,\n\t\t}\n\t\tloadBalancerStrategyWithNLB = operatorv1.EndpointPublishingStrategy{\n\t\t\tType: operatorv1.LoadBalancerServiceStrategyType,\n\t\t\tLoadBalancer: &operatorv1.LoadBalancerStrategy{\n\t\t\t\tProviderParameters: &operatorv1.ProviderLoadBalancerParameters{\n\t\t\t\t\tType: operatorv1.AWSLoadBalancerProvider,\n\t\t\t\t\tAWS: &operatorv1.AWSLoadBalancerParameters{\n\t\t\t\t\t\tType: operatorv1.AWSNetworkLoadBalancer,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tnodePortStrategy = operatorv1.EndpointPublishingStrategy{\n\t\t\tType: operatorv1.NodePortServiceStrategyType,\n\t\t}\n\t\tnodePortStrategyWithDefault = operatorv1.EndpointPublishingStrategy{\n\t\t\tType: operatorv1.NodePortServiceStrategyType,\n\t\t\tNodePort: &operatorv1.NodePortStrategy{\n\t\t\t\tProtocol: operatorv1.DefaultProtocol,\n\t\t\t},\n\t\t}\n\t\tnodePortStrategyWithTCP = operatorv1.EndpointPublishingStrategy{\n\t\t\tType: operatorv1.NodePortServiceStrategyType,\n\t\t\tNodePort: &operatorv1.NodePortStrategy{\n\t\t\t\tProtocol: operatorv1.TCPProtocol,\n\t\t\t},\n\t\t}\n\t\tnodePortStrategyWithPROXY = operatorv1.EndpointPublishingStrategy{\n\t\t\tType: operatorv1.NodePortServiceStrategyType,\n\t\t\tNodePort: &operatorv1.NodePortStrategy{\n\t\t\t\tProtocol: operatorv1.ProxyProtocol,\n\t\t\t},\n\t\t}\n\t\tprivateStrategy = operatorv1.EndpointPublishingStrategy{\n\t\t\tType: operatorv1.PrivateStrategyType,\n\t\t}\n\t)\n\ttestCases := []struct {\n\t\tdescription string\n\t\tstrategy *operatorv1.EndpointPublishingStrategy\n\t\tplatform *configv1.PlatformStatus\n\t\texpect bool\n\t\texpectError bool\n\t}{\n\n\t\t{\n\t\t\tdescription: \"nil platformStatus should cause an error\",\n\t\t\tstrategy: &loadBalancerStrategy,\n\t\t\tplatform: nil,\n\t\t\texpectError: true,\n\t\t},\n\t\t{\n\t\t\tdescription: \"hostnetwork strategy shouldn't use PROXY\",\n\t\t\tstrategy: &hostNetworkStrategy,\n\t\t\tplatform: &bareMetalPlatform,\n\t\t\texpect: false,\n\t\t},\n\t\t{\n\t\t\tdescription: \"hostnetwork strategy specifying default shouldn't use PROXY\",\n\t\t\tstrategy: &hostNetworkStrategyWithDefault,\n\t\t\tplatform: &bareMetalPlatform,\n\t\t\texpect: false,\n\t\t},\n\t\t{\n\t\t\tdescription: \"hostnetwork strategy specifying TCP shouldn't use PROXY\",\n\t\t\tstrategy: &hostNetworkStrategyWithTCP,\n\t\t\tplatform: &bareMetalPlatform,\n\t\t\texpect: false,\n\t\t},\n\t\t{\n\t\t\tdescription: \"hostnetwork strategy specifying PROXY should use PROXY\",\n\t\t\tstrategy: &hostNetworkStrategyWithPROXY,\n\t\t\tplatform: &bareMetalPlatform,\n\t\t\texpect: true,\n\t\t},\n\t\t{\n\t\t\tdescription: \"loadbalancer strategy with ELB should use PROXY\",\n\t\t\tstrategy: &loadBalancerStrategy,\n\t\t\tplatform: &awsPlatform,\n\t\t\texpect: true,\n\t\t},\n\t\t{\n\t\t\tdescription: \"loadbalancer strategy with NLB shouldn't use PROXY\",\n\t\t\tstrategy: &loadBalancerStrategyWithNLB,\n\t\t\tplatform: &awsPlatform,\n\t\t\texpect: false,\n\t\t},\n\t\t{\n\t\t\tdescription: \"loadbalancer strategy shouldn't use PROXY on Azure\",\n\t\t\tstrategy: &loadBalancerStrategy,\n\t\t\tplatform: &azurePlatform,\n\t\t\texpect: false,\n\t\t},\n\t\t{\n\t\t\tdescription: \"loadbalancer strategy shouldn't use PROXY on GCP\",\n\t\t\tstrategy: &loadBalancerStrategy,\n\t\t\tplatform: &gcpPlatform,\n\t\t\texpect: false,\n\t\t},\n\t\t{\n\t\t\tdescription: \"empty nodeport strategy shouldn't use PROXY\",\n\t\t\tstrategy: &nodePortStrategy,\n\t\t\tplatform: &awsPlatform,\n\t\t\texpect: false,\n\t\t},\n\t\t{\n\t\t\tdescription: \"nodeport strategy specifying default shouldn't use PROXY\",\n\t\t\tstrategy: &nodePortStrategyWithDefault,\n\t\t\tplatform: &awsPlatform,\n\t\t\texpect: false,\n\t\t},\n\t\t{\n\t\t\tdescription: \"nodeport strategy specifying TCP shouldn't use PROXY\",\n\t\t\tstrategy: &nodePortStrategyWithTCP,\n\t\t\tplatform: &awsPlatform,\n\t\t\texpect: false,\n\t\t},\n\t\t{\n\t\t\tdescription: \"nodeport strategy specifying PROXY should use PROXY\",\n\t\t\tstrategy: &nodePortStrategyWithPROXY,\n\t\t\tplatform: &awsPlatform,\n\t\t\texpect: true,\n\t\t},\n\t\t{\n\t\t\tdescription: \"private strategy shouldn't use PROXY\",\n\t\t\tstrategy: &privateStrategy,\n\t\t\tplatform: &awsPlatform,\n\t\t\texpect: false,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tic := &operatorv1.IngressController{\n\t\t\tStatus: operatorv1.IngressControllerStatus{\n\t\t\t\tEndpointPublishingStrategy: tc.strategy,\n\t\t\t},\n\t\t}\n\t\tswitch actual, err := IsProxyProtocolNeeded(ic, tc.platform); {\n\t\tcase tc.expectError && err == nil:\n\t\t\tt.Errorf(\"%q: expected error, got nil\", tc.description)\n\t\tcase !tc.expectError && err != nil:\n\t\t\tt.Errorf(\"%q: unexpected error: %v\", tc.description, err)\n\t\tcase tc.expect != actual:\n\t\t\tt.Errorf(\"%q: expected %t, got %t\", tc.description, tc.expect, actual)\n\t\t}\n\t}\n}", "func ping(transport http.RoundTripper, manager challenge.Manager, endpoint, versionHeader string) ([]auth.APIVersion, error) {\n\tclient := &http.Client{\n\t\tTransport: transport,\n\t}\n\n\tresp, err := client.Get(endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\t// TODO(dmage): log error?\n\t\t_ = resp.Body.Close()\n\t}()\n\n\tif err := manager.AddResponse(resp); err != nil {\n\t\treturn nil, err\n\t}\n\n\tversions := auth.APIVersions(resp, versionHeader)\n\tif len(versions) == 0 {\n\t\tok := resp.StatusCode >= 200 && resp.StatusCode < 300 ||\n\t\t\tresp.StatusCode == http.StatusUnauthorized ||\n\t\t\tresp.StatusCode == http.StatusForbidden\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"registry does not support v2 API: got %s from %s\", resp.Status, endpoint)\n\t\t}\n\t}\n\n\treturn versions, nil\n}", "func (o *APICheck) Version() int {\n\n\treturn 1\n}", "func GetVersion(val, network int) int {\n\tif network == Data.Mainnet.ID {\n\t\treturn 0x68000000 | val\n\t} else if network == Data.Testnet.ID {\n\t\treturn 0x98000000 | val\n\t}\n\treturn 0x60000000 | val\n}", "func (c *registryClient) findClosestProtocolCompatibleVersion(ctx context.Context, provider addrs.Provider, version Version) (Version, error) {\n\tvar match Version\n\tavailable, _, err := c.ProviderVersions(ctx, provider)\n\tif err != nil {\n\t\treturn UnspecifiedVersion, err\n\t}\n\n\t// extract the maps keys so we can make a sorted list of available versions.\n\tversionList := make(VersionList, 0, len(available))\n\tfor versionStr := range available {\n\t\tv, err := ParseVersion(versionStr)\n\t\tif err != nil {\n\t\t\treturn UnspecifiedVersion, ErrQueryFailed{\n\t\t\t\tProvider: provider,\n\t\t\t\tWrapped: fmt.Errorf(\"registry response includes invalid version string %q: %s\", versionStr, err),\n\t\t\t}\n\t\t}\n\t\tversionList = append(versionList, v)\n\t}\n\tversionList.Sort() // lowest precedence first, preserving order when equal precedence\n\n\tprotoVersions := MeetingConstraints(SupportedPluginProtocols)\nFindMatch:\n\t// put the versions in increasing order of precedence\n\tfor index := len(versionList) - 1; index >= 0; index-- { // walk backwards to consider newer versions first\n\t\tfor _, protoStr := range available[versionList[index].String()] {\n\t\t\tp, err := ParseVersion(protoStr)\n\t\t\tif err != nil {\n\t\t\t\treturn UnspecifiedVersion, ErrQueryFailed{\n\t\t\t\t\tProvider: provider,\n\t\t\t\t\tWrapped: fmt.Errorf(\"registry response includes invalid protocol string %q: %s\", protoStr, err),\n\t\t\t\t}\n\t\t\t}\n\t\t\tif protoVersions.Has(p) {\n\t\t\t\tmatch = versionList[index]\n\t\t\t\tbreak FindMatch\n\t\t\t}\n\t\t}\n\t}\n\treturn match, nil\n}", "func TestOcpp2Protocol(t *testing.T) {\n\tlogrus.SetLevel(logrus.PanicLevel)\n\tsuite.Run(t, new(OcppV2TestSuite))\n}", "func TestPongCrossProtocol(t *testing.T) {\n\tnonce, err := RandomUint64()\n\tif err != nil {\n\t\tt.Errorf(\"Error generating nonce: %v\", err)\n\t}\n\tmsg := NewMsgPong(nonce)\n\tif msg.Nonce != nonce {\n\t\tt.Errorf(\"Should get same nonce back out.\")\n\t}\n\n\t// Encode with latest protocol version.\n\tvar buf bytes.Buffer\n\terr = msg.BtcEncode(&buf, ProtocolVersion, BaseEncoding)\n\tif err != nil {\n\t\tt.Errorf(\"encode of MsgPong failed %v err <%v>\", msg, err)\n\t}\n\n\t// Decode with old protocol version.\n\treadmsg := NewMsgPong(0)\n\terr = readmsg.BtcDecode(&buf, BIP0031Version, BaseEncoding)\n\tif err == nil {\n\t\tt.Errorf(\"encode of MsgPong succeeded when it shouldn't have %v\",\n\t\t\tmsg)\n\t}\n\n\t// Since one of the protocol versions doesn't support the pong message,\n\t// make sure the nonce didn't get encoded and decoded back out.\n\tif msg.Nonce == readmsg.Nonce {\n\t\tt.Error(\"Should not get same nonce for cross protocol\")\n\t}\n}", "func probeVersion(cl *client.Client) {\n\t// If we request against a Kafka older than ApiVersions,\n\t// Kafka will close the connection. ErrConnDead is\n\t// retried automatically, so we must stop that.\n\tcl.AddOpt(kgo.RequestRetries(0))\n\tkresp, err := cl.Client().Request(context.Background(), apiVersionsRequest())\n\tif err != nil { // pre 0.10.0 had no api versions\n\t\tcl.RemakeWithOpts(kgo.MaxVersions(kversion.V0_9_0()))\n\t\t// 0.9.0 has list groups\n\t\tif _, err = cl.Client().SeedBrokers()[0].Request(context.Background(), new(kmsg.ListGroupsRequest)); err == nil {\n\t\t\tfmt.Println(\"Kafka 0.9.0\")\n\t\t\treturn\n\t\t}\n\t\tcl.RemakeWithOpts(kgo.MaxVersions(kversion.V0_8_2()))\n\t\t// 0.8.2 has find coordinator\n\t\tif _, err = cl.Client().SeedBrokers()[0].Request(context.Background(), new(kmsg.FindCoordinatorRequest)); err == nil {\n\t\t\tfmt.Println(\"Kafka 0.8.2\")\n\t\t\treturn\n\t\t}\n\t\tcl.RemakeWithOpts(kgo.MaxVersions(kversion.V0_8_1()))\n\t\t// 0.8.1 has offset fetch\n\t\tif _, err = cl.Client().SeedBrokers()[0].Request(context.Background(), new(kmsg.OffsetFetchRequest)); err == nil {\n\t\t\tfmt.Println(\"Kafka 0.8.1\")\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(\"Kafka 0.8.0\")\n\t\treturn\n\t}\n\n\tresp := kresp.(*kmsg.ApiVersionsResponse)\n\n\tv := kversion.FromApiVersionsResponse(resp)\n\tfmt.Println(\"Kafka \" + v.VersionGuess())\n}", "func (sc *ServerConn) negotiateVersion() (string, error) {\n\treqMsg, err := prepareRequest(sc.nextID(), \"server.version\", positional{\"Electrum\", \"1.4\"})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treqMsg = append(reqMsg, newline)\n\n\tif err = sc.send(reqMsg); err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = sc.conn.SetReadDeadline(time.Now().Add(10 * time.Second))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treader := bufio.NewReader(io.LimitReader(sc.conn, 1<<18))\n\tmsg, err := reader.ReadBytes(newline)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar jsonResp response\n\terr = json.Unmarshal(msg, &jsonResp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar vers []string // [server_software_version, protocol_version]\n\terr = json.Unmarshal(jsonResp.Result, &vers)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(vers) != 2 {\n\t\treturn \"\", fmt.Errorf(\"unexpected version response: %v\", vers)\n\t}\n\treturn vers[1], nil\n}", "func TestVersionSemVer(t *testing.T) {\n\tt.Logf(\"Testing version semantic (%s)\", Version)\n\tdetails := strings.Split(Version, \".\")\n\tif len(details) != 3 {\n\t\tt.Errorf(\"Version should provide major, minor and path informations: %s\", Version)\n\t}\n\tif _, err := strconv.ParseInt(details[0], 2, 0); err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\tif _, err := strconv.ParseInt(details[1], 2, 0); err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\n\tpatch := strings.Split(details[2], \"-\")\n\tif _, err := strconv.ParseInt(patch[0], 2, 0); err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\tif len(patch) > 2 {\n\t\tt.Error(\"last version part only provides patch number and pre-release info\")\n\n\t}\n}", "func TestAcceptConnRejects(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tg := newTestingGateway(\"TestAcceptConnRejects1\", t)\n\tdefer g.Close()\n\tmg := mockGatewayWithVersion{\n\t\tGateway: newTestingGateway(\"TestAcceptConnRejects2\", t),\n\t\tversionACK: make(chan string),\n\t}\n\tdefer mg.Close()\n\n\ttests := []struct {\n\t\tremoteVersion string\n\t\tversionResponseWant string\n\t\tmsg string\n\t}{\n\t\t// Test that acceptConn fails when the remote peer's version is \"reject\".\n\t\t{\n\t\t\tremoteVersion: \"reject\",\n\t\t\tversionResponseWant: \"reject\",\n\t\t\tmsg: \"acceptConn shouldn't accept a remote peer whose version is \\\"reject\\\"\",\n\t\t},\n\t\t// Test that acceptConn fails when the remote peer's version is ascii gibberish.\n\t\t{\n\t\t\tremoteVersion: \"foobar\",\n\t\t\tversionResponseWant: \"reject\",\n\t\t\tmsg: \"acceptConn shouldn't accept a remote peer whose version is ascii giberish\",\n\t\t},\n\t\t// Test that acceptConn fails when the remote peer's version is utf8 gibberish.\n\t\t{\n\t\t\tremoteVersion: \"世界\",\n\t\t\tversionResponseWant: \"reject\",\n\t\t\tmsg: \"acceptConn shouldn't accept a remote peer whose version is utf8 giberish\",\n\t\t},\n\t\t// Test that acceptConn fails when the remote peer's version is < 0.4.0 (0).\n\t\t{\n\t\t\tremoteVersion: \"0\",\n\t\t\tversionResponseWant: \"reject\",\n\t\t\tmsg: \"acceptConn shouldn't accept a remote peer whose version is 0\",\n\t\t},\n\t\t{\n\t\t\tremoteVersion: \"0.0.0\",\n\t\t\tversionResponseWant: \"reject\",\n\t\t\tmsg: \"acceptConn shouldn't accept a remote peer whose version is 0.0.0\",\n\t\t},\n\t\t{\n\t\t\tremoteVersion: \"0000.0000.0000\",\n\t\t\tversionResponseWant: \"reject\",\n\t\t\tmsg: \"acceptConn shouldn't accept a remote peer whose version is 0000.000.000\",\n\t\t},\n\t\t{\n\t\t\tremoteVersion: \"0.3.9\",\n\t\t\tversionResponseWant: \"reject\",\n\t\t\tmsg: \"acceptConn shouldn't accept a remote peer whose version is 0.3.9\",\n\t\t},\n\t\t{\n\t\t\tremoteVersion: \"0.3.9999\",\n\t\t\tversionResponseWant: \"reject\",\n\t\t\tmsg: \"acceptConn shouldn't accept a remote peer whose version is 0.3.9999\",\n\t\t},\n\t\t{\n\t\t\tremoteVersion: \"0.3.9.9.9\",\n\t\t\tversionResponseWant: \"reject\",\n\t\t\tmsg: \"acceptConn shouldn't accept a remote peer whose version is 0.3.9.9.9\",\n\t\t},\n\t\t// Test that acceptConn succeeds when the remote peer's version is 0.4.0.\n\t\t{\n\t\t\tremoteVersion: \"0.4.0\",\n\t\t\tversionResponseWant: build.Version,\n\t\t\tmsg: \"acceptConn should accept a remote peer whose version is 0.4.0\",\n\t\t},\n\t\t// Test that acceptConn succeeds when the remote peer's version is > 0.4.0.\n\t\t{\n\t\t\tremoteVersion: \"9\",\n\t\t\tversionResponseWant: build.Version,\n\t\t\tmsg: \"acceptConn should accept a remote peer whose version is 9\",\n\t\t},\n\t\t{\n\t\t\tremoteVersion: \"9.9.9\",\n\t\t\tversionResponseWant: build.Version,\n\t\t\tmsg: \"acceptConn should accept a remote peer whose version is 9.9.9\",\n\t\t},\n\t\t{\n\t\t\tremoteVersion: \"9999.9999.9999\",\n\t\t\tversionResponseWant: build.Version,\n\t\t\tmsg: \"acceptConn should accept a remote peer whose version is 9999.9999.9999\",\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tmg.version = tt.remoteVersion\n\t\tgo func() {\n\t\t\terr := mg.Connect(g.Address())\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}()\n\t\tremoteVersion := <-mg.versionACK\n\t\tif remoteVersion != tt.versionResponseWant {\n\t\t\tt.Fatalf(tt.msg)\n\t\t}\n\t\tg.Disconnect(mg.Address())\n\t\tmg.Disconnect(g.Address())\n\t}\n}", "func (server) GetVersion(context.Context, *empty.Empty) (*pb.VersionResponse, error) {\n\treturn &pb.VersionResponse{Version: version}, nil\n}", "func TestVersion(t *testing.T) {\n\tt.Parallel()\n\n\ttree := writeTree(t, \"\")\n\n\t// There's not much we can robustly assert about the actual version.\n\twant := debug.Version() // e.g. \"master\"\n\n\t// basic\n\t{\n\t\tres := gopls(t, tree, \"version\")\n\t\tres.checkExit(true)\n\t\tres.checkStdout(want)\n\t}\n\n\t// -json flag\n\t{\n\t\tres := gopls(t, tree, \"version\", \"-json\")\n\t\tres.checkExit(true)\n\t\tvar v debug.ServerVersion\n\t\tif res.toJSON(&v) {\n\t\t\tif v.Version != want {\n\t\t\t\tt.Errorf(\"expected Version %q, got %q (%v)\", want, v.Version, res)\n\t\t\t}\n\t\t}\n\t}\n}", "func (c Initializer) VerifyServedVersion(client *kube.Client, expectedVersion string, result *verifier.Result) error {\n\tapiClient := client.ExtClient.ApiextensionsV1()\n\tif err := c.verifyServedVersion(apiClient, c.Operator.Name, expectedVersion, result); err != nil {\n\t\treturn err\n\t}\n\tif err := c.verifyServedVersion(apiClient, c.OperatorVersion.Name, expectedVersion, result); err != nil {\n\t\treturn err\n\t}\n\tif err := c.verifyServedVersion(apiClient, c.Instance.Name, expectedVersion, result); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o TargetGroupOutput) ProtocolVersion() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TargetGroup) *string { return v.ProtocolVersion }).(pulumi.StringPtrOutput)\n}", "func ServiceCheck(opts *core.TargetOptions) {\n\tprobePack := \"\\x10\\x11\\x00\\x04MQTT\\x04\\x02\\x00\\x1e\\x00\\x05MQTTS\"\n\tpattern := \"^\\x20\\x02\\x00.$\"\n\tutils.OutputInfoMessage(opts.Host, opts.Port, \"Check port protocol type...\")\n\n\tutils.OutputInfoMessage(opts.Host, opts.Port, \"Check if it is TCP protocol\")\n\ttcpErr, tcpBuf := core.ConnectWithSingleProbePack(opts.Host, opts.Port, probePack)\n\tif tcpErr == nil {\n\t\tisTCPMatch, matchErr := binaryregexp.Match(pattern, tcpBuf)\n\t\tif matchErr == nil && isTCPMatch {\n\t\t\topts.Protocol = \"tcp\"\n\t\t\tutils.OutputSuccessMessage(opts.Host, opts.Port, \"Port protocol type is MQTT/TCP\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tutils.OutputInfoMessage(opts.Host, opts.Port, \"Check if it is SSL protocol\")\n\tsslErr, sslBuf := core.ConnectWithSingleProbePackTCPTLS(opts.Host, opts.Port, probePack)\n\tif sslErr == nil {\n\t\tisSSLMatch, matchErr := binaryregexp.Match(pattern, sslBuf)\n\t\tif matchErr == nil && isSSLMatch {\n\t\t\topts.Protocol = \"ssl\"\n\t\t\tutils.OutputSuccessMessage(opts.Host, opts.Port, \"Port protocol type is MQTT/SSL\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tutils.OutputInfoMessage(opts.Host, opts.Port, \"Check if it is WS protocol\")\n\twsErr, wsBuf := core.ConnectWithSingleProbePack(opts.Host, opts.Port, constructWebsocketPacket(opts.Host, opts.Port))\n\tif wsErr == nil {\n\t\twsLowerBuf := strings.ToLower(string(wsBuf))\n\t\tif strings.Contains(wsLowerBuf, \"http/1.1 101 switching protocols\") && strings.Contains(wsLowerBuf, \"sec-websocket-protocol: mqtt\") {\n\t\t\topts.Protocol = \"ws\"\n\t\t\tutils.OutputSuccessMessage(opts.Host, opts.Port, \"Port protocol type is MQTT/WS\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tutils.OutputInfoMessage(opts.Host, opts.Port, \"Check if it is WSS protocol\")\n\twssErr, wssBuf := core.ConnectWithSingleProbePackTCPTLS(opts.Host, opts.Port, constructWebsocketPacket(opts.Host, opts.Port))\n\tif wssErr == nil {\n\t\twssLowerBuf := strings.ToLower(string(wssBuf))\n\t\tif strings.Contains(wssLowerBuf, \"http/1.1 101 switching protocols\") && strings.Contains(wssLowerBuf, \"sec-websocket-protocol: mqtt\") {\n\t\t\topts.Protocol = \"wss\"\n\t\t\tutils.OutputSuccessMessage(opts.Host, opts.Port, \"Port protocol type is MQTT/WSS\")\n\t\t\treturn\n\t\t}\n\t}\n\tutils.OutputErrorMessage(opts.Host, opts.Port, \"Get MQTT protocol type failed, you can try use -protocol wss/ws/tcp/ssl in command to set protocol\")\n}", "func (E_OpenconfigSystem_System_SshServer_Config_ProtocolVersion) IsYANGGoEnum() {}", "func (t *websocketTransport) ProtocolVersion() ProtocolVersion {\n\treturn ProtocolVersion2\n}", "func TestDaemon_Version(t *testing.T) {\n\td, start, clean, _, _, _ := mockDaemon(t)\n\tstart()\n\tdefer clean()\n\n\tctx := context.Background()\n\tv, err := d.Version(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"Error: %s\", err.Error())\n\t}\n\tif v != testVersion {\n\t\tt.Fatalf(\"Expected %v but got %v\", testVersion, v)\n\t}\n}", "func getProtocol(proto string) robot.Protocol {\n\tproto = strings.ToLower(proto)\n\tswitch proto {\n\tcase \"slack\":\n\t\treturn robot.Slack\n\tcase \"term\", \"terminal\":\n\t\treturn robot.Terminal\n\tcase \"nullconn\":\n\t\treturn robot.Null\n\tcase \"rocket\":\n\t\treturn robot.Rocket\n\tdefault:\n\t\treturn robot.Test\n\t}\n}", "func (ds *Dsync) ProtocolVersion() (protocol.ID, error) {\n\treturn DsyncProtocolID, nil\n}", "func TestUnitAcceptableVersion(t *testing.T) {\n\tinvalidVersions := []string{\n\t\t// ascii gibberish\n\t\t\"foobar\",\n\t\t\"foobar.0\",\n\t\t\"foobar.9\",\n\t\t\"0.foobar\",\n\t\t\"9.foobar\",\n\t\t\"foobar.0.0\",\n\t\t\"foobar.9.9\",\n\t\t\"0.foobar.0\",\n\t\t\"9.foobar.9\",\n\t\t\"0.0.foobar\",\n\t\t\"9.9.foobar\",\n\t\t// utf-8 gibberish\n\t\t\"世界\",\n\t\t\"世界.0\",\n\t\t\"世界.9\",\n\t\t\"0.世界\",\n\t\t\"9.世界\",\n\t\t\"世界.0.0\",\n\t\t\"世界.9.9\",\n\t\t\"0.世界.0\",\n\t\t\"9.世界.9\",\n\t\t\"0.0.世界\",\n\t\t\"9.9.世界\",\n\t\t// missing numbers\n\t\t\".\",\n\t\t\"..\",\n\t\t\"...\",\n\t\t\"0.\",\n\t\t\".1\",\n\t\t\"2..\",\n\t\t\".3.\",\n\t\t\"..4\",\n\t\t\"5.6.\",\n\t\t\".7.8\",\n\t\t\".9.0.\",\n\t}\n\tfor _, v := range invalidVersions {\n\t\terr := acceptableVersion(v)\n\t\tif _, ok := err.(invalidVersionError); err == nil || !ok {\n\t\t\tt.Errorf(\"acceptableVersion returned %q for version %q, but expected invalidVersionError\", err, v)\n\t\t}\n\t}\n\tinsufficientVersions := []string{\n\t\t// random small versions\n\t\t\"0\",\n\t\t\"00\",\n\t\t\"0000000000\",\n\t\t\"0.0\",\n\t\t\"0000000000.0\",\n\t\t\"0.0000000000\",\n\t\t\"0.0.0.0.0.0.0.0\",\n\t\t/*\n\t\t\t\"0.0.9\",\n\t\t\t\"0.0.999\",\n\t\t\t\"0.0.99999999999\",\n\t\t\t\"0.1.2\",\n\t\t\t\"0.1.2.3.4.5.6.7.8.9\",\n\t\t\t// pre-hardfork versions\n\t\t\t\"0.3.3\",\n\t\t\t\"0.3.9.9.9.9.9.9.9.9.9.9\",\n\t\t\t\"0.3.9999999999\",\n\t\t\t\"1.3.0\",\n\t\t*/\n\t}\n\tfor _, v := range insufficientVersions {\n\t\terr := acceptableVersion(v)\n\t\tif _, ok := err.(insufficientVersionError); err == nil || !ok {\n\t\t\tt.Errorf(\"acceptableVersion returned %q for version %q, but expected insufficientVersionError\", err, v)\n\t\t}\n\t}\n\tvalidVersions := []string{\n\t\tminimumAcceptablePeerVersion,\n\t\t\"1.3.7\",\n\t\t\"1.4.0\",\n\t\t\"1.6.0\",\n\t\t\"1.6.1\",\n\t\t\"1.9\",\n\t\t\"1.999\",\n\t\t\"1.9999999999\",\n\t\t\"2\",\n\t\t\"2.0\",\n\t\t\"2.0.0\",\n\t\t\"9\",\n\t\t\"9.0\",\n\t\t\"9.0.0\",\n\t\t\"9.9.9\",\n\t}\n\tfor _, v := range validVersions {\n\t\terr := acceptableVersion(v)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"acceptableVersion returned %q for version %q, but expected nil\", err, v)\n\t\t}\n\t}\n}", "func isBadVersion(version int) bool{\n return false\n}", "func mutualVersion(theirMajor, theirMinor uint8) (major, minor uint8, ok bool) {\n\t// We don't deal with peers < TLS 1.0 (aka version 3.1).\n\tif theirMajor < 3 || theirMajor == 3 && theirMinor < 1 {\n\t\treturn 0, 0, false\n\t}\n\tmajor = 3;\n\tminor = 2;\n\tif theirMinor < minor {\n\t\tminor = theirMinor\n\t}\n\tok = true;\n\treturn;\n}", "func (bblr BlobsBreakLeaseResponse) Version() string {\n\treturn bblr.rawResponse.Header.Get(\"x-ms-version\")\n}", "func TestPongLatest(t *testing.T) {\n\tenc := BaseEncoding\n\tpver := ProtocolVersion\n\n\tnonce, err := RandomUint64()\n\tif err != nil {\n\t\tt.Errorf(\"RandomUint64: error generating nonce: %v\", err)\n\t}\n\tmsg := NewMsgPong(nonce)\n\tif msg.Nonce != nonce {\n\t\tt.Errorf(\"NewMsgPong: wrong nonce - got %v, want %v\",\n\t\t\tmsg.Nonce, nonce)\n\t}\n\n\t// Ensure the command is expected value.\n\twantCmd := \"pong\"\n\tif cmd := msg.Command(); cmd != wantCmd {\n\t\tt.Errorf(\"NewMsgPong: wrong command - got %v want %v\",\n\t\t\tcmd, wantCmd)\n\t}\n\n\t// Ensure max payload is expected value for latest protocol version.\n\twantPayload := uint32(8)\n\tmaxPayload := msg.MaxPayloadLength(pver)\n\tif maxPayload != wantPayload {\n\t\tt.Errorf(\"MaxPayloadLength: wrong max payload length for \"+\n\t\t\t\"protocol version %d - got %v, want %v\", pver,\n\t\t\tmaxPayload, wantPayload)\n\t}\n\n\t// Test encode with latest protocol version.\n\tvar buf bytes.Buffer\n\terr = msg.BtcEncode(&buf, pver, enc)\n\tif err != nil {\n\t\tt.Errorf(\"encode of MsgPong failed %v err <%v>\", msg, err)\n\t}\n\n\t// Test decode with latest protocol version.\n\treadmsg := NewMsgPong(0)\n\terr = readmsg.BtcDecode(&buf, pver, enc)\n\tif err != nil {\n\t\tt.Errorf(\"decode of MsgPong failed [%v] err <%v>\", buf, err)\n\t}\n\n\t// Ensure nonce is the same.\n\tif msg.Nonce != readmsg.Nonce {\n\t\tt.Errorf(\"Should get same nonce for protocol version %d\", pver)\n\t}\n}", "func (o *UcsdBackupInfoAllOf) GetProtocolOk() (*string, bool) {\n\tif o == nil || o.Protocol == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Protocol, true\n}", "func Test_IndexHandler(t *testing.T) {\n\tvar (\n\t\tversionMsg Service\n\t\tresp *http.Response\n\t)\n\n\tsvc := NewService()\n\n\tts := httptest.NewServer(svc.NewRouter(\"*\"))\n\tdefer ts.Close()\n\n\treq, _ := http.NewRequest(\"GET\", ts.URL+\"/\", nil)\n\n\toutputLog := helpers.CaptureOutput(func() {\n\t\tresp, _ = http.DefaultClient.Do(req)\n\t})\n\n\tif got, want := resp.StatusCode, 200; got != want {\n\t\tt.Fatalf(\"Invalid status code, got %d but want %d\", got, want)\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Got an error when reading body: %s\", err.Error())\n\t}\n\n\terr = json.Unmarshal(data, &versionMsg)\n\tif err != nil {\n\t\tt.Fatalf(\"Got an error when parsing json: %s\", err.Error())\n\t}\n\tif got, want := versionMsg.Version, svc.Version; got != want {\n\t\tt.Fatalf(\"Wrong version return, got %s but want %s\", got, want)\n\t}\n\tif got, want := versionMsg.Name, svc.Name; got != want {\n\t\tt.Fatalf(\"Wrong version return, got %s but want %s\", got, want)\n\t}\n\n\tmatched, err := regexp.MatchString(`uri=/ `, outputLog)\n\tif matched != true || err != nil {\n\t\tt.Fatalf(\"request is not logged :\\n%s\", outputLog)\n\t}\n}", "func validateTLS(details scan.LabsEndpointDetails) string {\n\t// Versions in increasing preference SSL v2, SSL v3, TLS v1.0, TLS v1.1, TLS v1.2, TLS v1.3 (future)\n\tvar versions string\n\tfor _, protocolType := range details.Protocols {\n\t\tvar vTLS = protocolType.Name + \":\" + protocolType.Version\n\t\tif !versionsTLS[vTLS] {\n\t\t\tversions += vTLS + \"\\n\"\n\t\t}\n\t}\n\tif versions != \"\" {\n\t\treturn (versions)\n\t}\n\treturn \"No Vulnerable versions supported!\"\n}", "func (net *NetAPI) Version() (string, error) {\n\treq := net.requestManager.newRequest(\"net_version\")\n\tresp, err := net.requestManager.send(req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn resp.Get(\"result\").(string), nil\n}", "func TestGetSemverVersisonWithStandardVersion(t *testing.T) {\n\tversion.Map[\"version\"] = \"1.2.1\"\n\tresult, err := version.GetSemverVersion()\n\texpectedResult := semver.Version{Major: 1, Minor: 2, Patch: 1}\n\tassert.NoError(t, err, \"GetSemverVersion should exit without failure\")\n\tassert.Exactly(t, expectedResult, result)\n}", "func (a *Client) Version(params *VersionParams) (*VersionOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewVersionParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"Version\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/version\",\n\t\tProducesMediaTypes: []string{\"text/plain\"},\n\t\tConsumesMediaTypes: []string{\"text/plain\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &VersionReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*VersionOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*VersionDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}", "func (cblr ContainersBreakLeaseResponse) Version() string {\n\treturn cblr.rawResponse.Header.Get(\"x-ms-version\")\n}", "func (btc *ExchangeWallet) getVersion() (uint64, uint64, error) {\n\tr := &struct {\n\t\tVersion uint64 `json:\"version\"`\n\t\tProtocolVersion uint64 `json:\"protocolversion\"`\n\t}{}\n\terr := btc.wallet.call(methodGetNetworkInfo, nil, r)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\treturn r.Version, r.ProtocolVersion, nil\n}", "func getVppVersion(ch api.Channel, name string) {\n\tlogInfo(\"Retrieving version of %s ..\\n\", name)\n\n\treq := &vpe.ShowVersion{}\n\treply := &vpe.ShowVersionReply{}\n\n\tif err := ch.SendRequest(req).ReceiveReply(reply); err != nil {\n\t\tlogError(err, \"retrieving version\")\n\t\treturn\n\t}\n\tlogInfo(\"Retrieved version is %q\\n\", reply.Version)\n\tfmt.Println()\n}", "func (dr downloadResponse) Version() string {\n\treturn dr.rawResponse.Header.Get(\"x-ms-version\")\n}", "func (s *suite) Test_QueryNextVersion_happy_path(c *C) {\n\tserver := NewMockServer().WithBody(`1.0`).Start(c)\n\tdefer server.Stop()\n\n\tunit := NewRemoteInventory(server.URL, \"token\", \"\", \"\", false)\n\tversion, err := unit.QueryNextVersion(\"query-project\", \"name\", \"1.@\")\n\tserver.ExpectCalled(c, true, queryNextVersionURL)\n\tc.Assert(err, IsNil)\n\tc.Assert(version, Equals, \"1.0\")\n}", "func (brlr BlobsRenewLeaseResponse) Version() string {\n\treturn brlr.rawResponse.Header.Get(\"x-ms-version\")\n}", "func TestSonobuoyVersion(t *testing.T) {\n\terr, stdout, stderr := runSonobuoyCommand(t, \"version\")\n\n\tif err != nil {\n\t\tt.Errorf(\"Sonobuoy exited with an error: %q\\n\", err)\n\t\tt.Log(stderr.String())\n\t\tt.FailNow()\n\t}\n\n\tlines := strings.Split(stdout.String(), \"\\n\")\n\tfor _, line := range lines {\n\t\tversionComponents := strings.Split(line, \":\")\n\t\t// If a Kubeconfig is not provided, a warning is included that the API version check is skipped.\n\t\t// Only check lines where a split on \":\" actually happened.\n\t\tif len(versionComponents) == 2 && strings.TrimSpace(versionComponents[1]) == \"\" {\n\t\t\tt.Errorf(\"expected value for %v to be set, but was empty\", versionComponents[0])\n\t\t}\n\t}\n}", "func (p *Plugin) Protocol() string {\n\tif p.PluginObj.Config.Interface.ProtocolScheme != \"\" {\n\t\treturn p.PluginObj.Config.Interface.ProtocolScheme\n\t}\n\treturn plugins.ProtocolSchemeHTTPV1\n}", "func (bacfur BlobsAbortCopyFromURLResponse) Version() string {\n\treturn bacfur.rawResponse.Header.Get(\"x-ms-version\")\n}", "func (h *Hub) Version() (result string, err error) {\n\treturn h.client.sendXPathRequest(mySagemcomBoxDeviceInfoProductClass)\n}", "func (u utilityEndpoints) versionCheck(c echo.Context) error {\n\tvf, err := u.version.VersionFormatter(version.FullVersion)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"format version\")\n\t\treturn c.JSON(http.StatusInternalServerError, NewErrorResponse(err))\n\t}\n\n\tmsg, update, err := u.version.UpdateWarningVersion(vf)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"update warning version\")\n\t\treturn c.JSON(http.StatusInternalServerError, NewErrorResponse(err))\n\t}\n\n\tresponse := VersionResponse{\n\t\tVersion: u.version.GetHumanVersion(),\n\t\tMsg: msg,\n\t\tUpdate: update,\n\t}\n\n\treturn c.JSON(http.StatusOK, response)\n}", "func (bclr BlobsChangeLeaseResponse) Version() string {\n\treturn bclr.rawResponse.Header.Get(\"x-ms-version\")\n}", "func (o *VirtualizationIweHost) GetVersionOk() (*string, bool) {\n\tif o == nil || o.Version == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Version, true\n}", "func TestAPIVersionKVM(t *testing.T) {\n\tc := &Client{\n\t\tioctl: func(fd uintptr, request int, argp uintptr) (uintptr, error) {\n\t\t\tif request != kvmGetAPIVersion {\n\t\t\t\tt.Fatalf(\"unexpected ioctl request number: %d\", request)\n\t\t\t}\n\n\t\t\treturn uintptr(Version), nil\n\t\t},\n\t}\n\n\tv, err := c.APIVersion()\n\tif err != nil {\n\t\tt.Errorf(\"could not get API version: %q\", err.Error())\n\t}\n\n\tif want, got := v, Version; want != got {\n\t\tt.Fatalf(\"unexpected KVM API version: %d != %d\", want, got)\n\t}\n}", "func isValidModelVersion(model string, version string, dmsaUrl string) (isValid bool, err error) {\n\tif cachedIsValidVersion != nil {\n\t\tisValid = *cachedIsValidVersion\n\t\treturn\n\t}\n\n\tparts := strings.Split(version, \".\")\n\tif len(parts) != 3 {\n\t\terr = fmt.Errorf(\"Model version must look like X.Y.Z, not '%s'\", version)\n\t\treturn\n\t}\n\n\t// First, test the DMSA service URL itself\n\tvar response *http.Response\n\tresponse, err = http.Get(dmsaUrl)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Cannot access data-models-sqlalchemy web service at %s: %v\", dmsaUrl, err)\n\t\treturn\n\t}\n\tif response.StatusCode != 200 {\n\t\terr = fmt.Errorf(\"Data-models-sqlalchemy web service (%s) returned error response: %v\", dmsaUrl, http.StatusText(response.StatusCode))\n\t\treturn\n\t}\n\n\t// Now check the requested version\n\turl := joinUrlPath(dmsaUrl, fmt.Sprintf(\"/%s/%s/ddl/postgresql/tables/\", model, version))\n\tresponse, err = http.Get(url)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Cannot access data-models-sqlalchemy web service at %v: %v\", url, err)\n\t\treturn\n\t}\n\tif response.StatusCode != 200 {\n\t\treturn // Normal \"not valid\" return: isValid will be false and err will be nil\n\t}\n\t// Normal \"valid\" return\n\tcachedIsValidVersion = new(bool)\n\t*cachedIsValidVersion = true\n\tisValid = *cachedIsValidVersion\n\treturn\n}", "func (cclr ContainersChangeLeaseResponse) Version() string {\n\treturn cclr.rawResponse.Header.Get(\"x-ms-version\")\n}", "func (brlr BlobsReleaseLeaseResponse) Version() string {\n\treturn brlr.rawResponse.Header.Get(\"x-ms-version\")\n}", "func TestConnectRejects(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tg := newTestingGateway(\"TestConnectRejects\", t)\n\t// Setup a listener that mocks Gateway.acceptConn, but sends the\n\t// version sent over mockVersionChan instead of build.Version.\n\tlistener, err := net.Listen(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tmockVersionChan := make(chan string)\n\tgo func() {\n\t\tfor {\n\t\t\tmockVersion := <-mockVersionChan\n\t\t\tconn, err := listener.Accept()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\t// Read remote peer version.\n\t\t\tvar remoteVersion string\n\t\t\tif err := encoding.ReadObject(conn, &remoteVersion, maxAddrLength); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\t// Write our mock version.\n\t\t\tif err := encoding.WriteObject(conn, mockVersion); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t}()\n\n\ttests := []struct {\n\t\tversion string\n\t\terrWant error\n\t\tinsufficientVersion bool\n\t\tmsg string\n\t}{\n\t\t// Test that Connect fails when the remote peer's version is \"reject\".\n\t\t{\n\t\t\tversion: \"reject\",\n\t\t\terrWant: errPeerRejectedConn,\n\t\t\tmsg: \"Connect should fail when the remote peer rejects the connection\",\n\t\t},\n\t\t// Test that Connect fails when the remote peer's version is ascii gibberish.\n\t\t{\n\t\t\tversion: \"foobar\",\n\t\t\tinsufficientVersion: true,\n\t\t\tmsg: \"Connect should fail when the remote peer's version is ascii gibberish\",\n\t\t},\n\t\t// Test that Connect fails when the remote peer's version is utf8 gibberish.\n\t\t{\n\t\t\tversion: \"世界\",\n\t\t\tinsufficientVersion: true,\n\t\t\tmsg: \"Connect should fail when the remote peer's version is utf8 gibberish\",\n\t\t},\n\t\t// Test that Connect fails when the remote peer's version is < 0.4.0 (0).\n\t\t{\n\t\t\tversion: \"0\",\n\t\t\tinsufficientVersion: true,\n\t\t\tmsg: \"Connect should fail when the remote peer's version is 0\",\n\t\t},\n\t\t{\n\t\t\tversion: \"0.0.0\",\n\t\t\tinsufficientVersion: true,\n\t\t\tmsg: \"Connect should fail when the remote peer's version is 0.0.0\",\n\t\t},\n\t\t{\n\t\t\tversion: \"0000.0000.0000\",\n\t\t\tinsufficientVersion: true,\n\t\t\tmsg: \"Connect should fail when the remote peer's version is 0000.0000.0000\",\n\t\t},\n\t\t{\n\t\t\tversion: \"0.3.9\",\n\t\t\tinsufficientVersion: true,\n\t\t\tmsg: \"Connect should fail when the remote peer's version is 0.3.9\",\n\t\t},\n\t\t{\n\t\t\tversion: \"0.3.9999\",\n\t\t\tinsufficientVersion: true,\n\t\t\tmsg: \"Connect should fail when the remote peer's version is 0.3.9999\",\n\t\t},\n\t\t{\n\t\t\tversion: \"0.3.9.9.9\",\n\t\t\tinsufficientVersion: true,\n\t\t\tmsg: \"Connect should fail when the remote peer's version is 0.3.9.9.9\",\n\t\t},\n\t\t// Test that Connect succeeds when the remote peer's version is 0.4.0.\n\t\t{\n\t\t\tversion: \"0.4.0\",\n\t\t\tmsg: \"Connect should succeed when the remote peer's version is 0.4.0\",\n\t\t},\n\t\t// Test that Connect succeeds when the remote peer's version is > 0.4.0.\n\t\t{\n\t\t\tversion: \"9\",\n\t\t\tmsg: \"Connect should succeed when the remote peer's version is 9\",\n\t\t},\n\t\t{\n\t\t\tversion: \"9.9.9\",\n\t\t\tmsg: \"Connect should succeed when the remote peer's version is 9.9.9\",\n\t\t},\n\t\t{\n\t\t\tversion: \"9999.9999.9999\",\n\t\t\tmsg: \"Connect should succeed when the remote peer's version is 9999.9999.9999\",\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tmockVersionChan <- tt.version\n\t\terr = g.Connect(modules.NetAddress(listener.Addr().String()))\n\t\tif tt.insufficientVersion {\n\t\t\t// Check that the error is the expected type.\n\t\t\tif _, ok := err.(insufficientVersionError); !ok {\n\t\t\t\tt.Fatalf(\"expected Connect to error with insufficientVersionError: %s\", tt.msg)\n\t\t\t}\n\t\t} else {\n\t\t\t// Check that the error is the expected error.\n\t\t\tif err != tt.errWant {\n\t\t\t\tt.Fatalf(\"expected Connect to error with '%v', but got '%v': %s\", tt.errWant, err, tt.msg)\n\t\t\t}\n\t\t}\n\t\tg.Disconnect(modules.NetAddress(listener.Addr().String()))\n\t}\n\tlistener.Close()\n}", "func (*HeartbeatRequest) Version() int16 {\n\treturn 0\n}", "func (conn *Conn) Version(t string) { conn.Ctcp(t, VERSION) }", "func (o *ConnectorTypeAllOf) GetVersionOk() (*string, bool) {\n\tif o == nil || o.Version == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Version, true\n}" ]
[ "0.6705135", "0.6438302", "0.6386366", "0.63707674", "0.6304543", "0.6262574", "0.617563", "0.6168916", "0.6146892", "0.6113688", "0.60996807", "0.6088013", "0.6075797", "0.6062475", "0.60598236", "0.60437936", "0.6016582", "0.599666", "0.59772485", "0.5974628", "0.5953988", "0.59169483", "0.58924955", "0.5832243", "0.57881093", "0.57830054", "0.57754713", "0.577481", "0.5735572", "0.57337725", "0.573009", "0.5720846", "0.5716682", "0.5687962", "0.5672081", "0.5671637", "0.5645014", "0.5639568", "0.5636682", "0.56032175", "0.5590913", "0.55883634", "0.558361", "0.55766535", "0.5575448", "0.55752265", "0.5572494", "0.55569285", "0.5551643", "0.55441856", "0.5532862", "0.55231524", "0.5494414", "0.54817957", "0.5466581", "0.54615015", "0.54542154", "0.5449218", "0.54461145", "0.54436266", "0.5441187", "0.5439101", "0.5422987", "0.54207885", "0.54165614", "0.54138595", "0.5409998", "0.5406415", "0.54006094", "0.5396717", "0.5389635", "0.5378113", "0.53765386", "0.5375758", "0.537566", "0.5375129", "0.53748214", "0.5358428", "0.53569984", "0.5351963", "0.535159", "0.5343372", "0.53414935", "0.5329183", "0.5322117", "0.53211945", "0.5318338", "0.53173023", "0.5309958", "0.53094697", "0.5303371", "0.53033066", "0.53031814", "0.5298474", "0.52966475", "0.5296457", "0.5291863", "0.5284918", "0.52816385", "0.52798694" ]
0.7818675
0
Test that the service can be served without error
func TestServiceStart(t *testing.T) { runTest(t, func(s *res.Service) { s.Handle("model", res.GetResource(func(r res.GetRequest) { r.NotFound() })) }, nil) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Test_NotFound(t *testing.T) {\n\tvar (\n\t\tnotFoundMsg ErrorMessage\n\t\tresp *http.Response\n\t)\n\n\tsvc := NewService()\n\tts := httptest.NewServer(svc.NewRouter(\"*\"))\n\tdefer ts.Close()\n\n\treq, _ := http.NewRequest(\"GET\", ts.URL+\"/not_found\", nil)\n\n\toutputLog := helpers.CaptureOutput(func() {\n\t\tresp, _ = http.DefaultClient.Do(req)\n\t})\n\n\tif got, want := resp.StatusCode, http.StatusNotFound; got != want {\n\t\tt.Fatalf(\"Invalid status code, got %d but want %d\", got, want)\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Got an error when reading body: %s\", err.Error())\n\t}\n\terr = json.Unmarshal(data, &notFoundMsg)\n\tif err != nil {\n\t\tt.Fatalf(\"Got an error when parsing json: %s\", err.Error())\n\t}\n\tif got, want := notFoundMsg.Code, http.StatusNotFound; got != want {\n\t\tt.Fatalf(\"Wrong code return, got %d but want %d\", got, want)\n\t}\n\tif got, want := notFoundMsg.Message, \"Not Found\"; got != want {\n\t\tt.Fatalf(\"Wrong message return, got %s but want %s\", got, want)\n\t}\n\n\tmatched, err := regexp.MatchString(`uri=/not_found `, outputLog)\n\tif matched != true || err != nil {\n\t\tt.Fatalf(\"request is not logged :\\n%s\", outputLog)\n\t}\n}", "func TestGetUserServiceDoesntExist (t *testing.T){\n\t_, err := GetUserService(\"\")\n\tassert.Equal(t, 404, err.HTTPStatus)\n}", "func TestReturns200IfThereAreNoChecks(t *testing.T) {\n\trecorder := httptest.NewRecorder()\n\n\treq, err := http.NewRequest(\"GET\", \"https://fakeurl.com/debug/health\", nil)\n\tif err != nil {\n\t\tt.Errorf(\"Failed to create request.\")\n\t}\n\n\tStatusHandler(recorder, req)\n\n\tif recorder.Code != 200 {\n\t\tt.Errorf(\"Did not get a 200.\")\n\t}\n}", "func Test_IndexHandler(t *testing.T) {\n\tvar (\n\t\tversionMsg Service\n\t\tresp *http.Response\n\t)\n\n\tsvc := NewService()\n\n\tts := httptest.NewServer(svc.NewRouter(\"*\"))\n\tdefer ts.Close()\n\n\treq, _ := http.NewRequest(\"GET\", ts.URL+\"/\", nil)\n\n\toutputLog := helpers.CaptureOutput(func() {\n\t\tresp, _ = http.DefaultClient.Do(req)\n\t})\n\n\tif got, want := resp.StatusCode, 200; got != want {\n\t\tt.Fatalf(\"Invalid status code, got %d but want %d\", got, want)\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Got an error when reading body: %s\", err.Error())\n\t}\n\n\terr = json.Unmarshal(data, &versionMsg)\n\tif err != nil {\n\t\tt.Fatalf(\"Got an error when parsing json: %s\", err.Error())\n\t}\n\tif got, want := versionMsg.Version, svc.Version; got != want {\n\t\tt.Fatalf(\"Wrong version return, got %s but want %s\", got, want)\n\t}\n\tif got, want := versionMsg.Name, svc.Name; got != want {\n\t\tt.Fatalf(\"Wrong version return, got %s but want %s\", got, want)\n\t}\n\n\tmatched, err := regexp.MatchString(`uri=/ `, outputLog)\n\tif matched != true || err != nil {\n\t\tt.Fatalf(\"request is not logged :\\n%s\", outputLog)\n\t}\n}", "func TestFailedEndpoint0(t *testing.T) {\n\tisTesting = true\n\tvar request = Request{\n\t\tPath: \"/api/devices\",\n\t\tHTTPMethod: \"PUT\",\n\t}\n\tvar response, _ = Handler(request)\n\tif response.StatusCode != 404 {\n\t\tt.Errorf(\"response status code has to be 404 but is %d\", response.StatusCode)\n\t}\n\tif response.Body != `{\"message\":\"requested endpoint not found\"}` {\n\t\tt.Errorf(\"body is: %s\", response.Body)\n\t}\n}", "func TestCallToPublicService(t *testing.T) {\n\tt.Parallel()\n\n\tclients := Setup(t)\n\n\tt.Log(\"Creating a Service for the helloworld test app.\")\n\tnames := test.ResourceNames{\n\t\tService: test.ObjectNameForTest(t),\n\t\tImage: test.HelloWorld,\n\t}\n\n\ttest.EnsureTearDown(t, clients, &names)\n\n\tresources, err := v1test.CreateServiceReady(t, clients, &names)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create initial Service: %v: %v\", names.Service, err)\n\t}\n\n\tif resources.Route.Status.URL.Host == \"\" {\n\t\tt.Fatalf(\"Route is missing .Status.URL: %#v\", resources.Route.Status)\n\t}\n\tif resources.Route.Status.Address == nil {\n\t\tt.Fatalf(\"Route is missing .Status.Address: %#v\", resources.Route.Status)\n\t}\n\n\tgatewayTestCases := []struct {\n\t\tname string\n\t\turl *url.URL\n\t\taccessibleExternally bool\n\t}{\n\t\t{\"local_address\", resources.Route.Status.Address.URL.URL(), false},\n\t\t{\"external_address\", resources.Route.Status.URL.URL(), true},\n\t}\n\n\tfor _, tc := range gatewayTestCases {\n\t\ttc := tc\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tif !test.ServingFlags.DisableLogStream {\n\t\t\t\tcancel := logstream.Start(t)\n\t\t\t\tdefer cancel()\n\t\t\t}\n\t\t\ttestProxyToHelloworld(t, clients, tc.url, false /*inject*/, tc.accessibleExternally)\n\t\t})\n\t}\n}", "func TestWebServerNotOK(t *testing.T) {\n\n\tctx, _ := context.WithCancel(context.Background())\n\n\t// create a new kuberhealthy\n\tkh := makeTestKuberhealthy(t)\n\n\t// add a fake check to it with a not ok return\n\tfc := NewFakeCheck()\n\tdesiredError := randomdata.SillyName()\n\tfc.Errors = []string{desiredError}\n\tfc.OK = false\n\tkh.AddCheck(fc)\n\n\t// run the checker for enough time to make and update CRD entries, then stop it\n\tgo kh.Start(ctx)\n\ttime.Sleep(time.Second * 5)\n\tkh.StopChecks()\n\n\t// now run our test against the web server handler\n\trecorder := httptest.NewRecorder()\n\treq, err := http.NewRequest(\"GET\", \"/\", bytes.NewBufferString(\"\"))\n\tif err != nil {\n\t\tt.Fatal(\"Error creating request\", err)\n\t}\n\terr = kh.healthCheckHandler(recorder, req)\n\tif err != nil {\n\t\tt.Fatal(\"Error from health check handler:\", err)\n\t}\n\n\t// check the http status from the server\n\tif recorder.Code != http.StatusOK {\n\t\tt.Fatal(\"Bad response from handler\", recorder.Code)\n\t}\n\n\t// output the response from the server\n\tb, err := ioutil.ReadAll(recorder.Body)\n\tif err != nil {\n\t\tt.Fatal(\"Error reading response body\", err)\n\t}\n\tt.Log(string(b))\n\n\t// decode the response body to validate the contents\n\tvar state health.State\n\tjson.Unmarshal(b, &state)\n\n\tif len(state.Errors) < 1 {\n\t\tt.Fatal(\"The expected error message was not set.\")\n\t}\n\tif state.Errors[0] != desiredError {\n\t\tt.Fatal(\"The expected error message was not set. Got\", state.Errors[0], \"wanted\", desiredError)\n\t}\n\n\t// check that OK is false\n\tif state.OK != false {\n\t\tt.Fatal(\"Did not observe status page failure when one was expected\")\n\t}\n\n}", "func (h Handler) TestEndpoint() error {\n\tr, err := http.Get(h.url)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif r.StatusCode != 200 {\n\t\treturn errors.New(\"Endpoint not replying typical 200 answer on ping\")\n\t}\n\n\treturn nil\n}", "func (p *Plex) Test() (bool, error) {\n\trequestInfo.headers.Token = p.token\n\n\tresp, respErr := requestInfo.get(p.URL)\n\n\tif respErr != nil {\n\t\treturn false, respErr\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode == 401 {\n\t\treturn false, errors.New(\"You are not authorized to access this server\")\n\t} else if resp.StatusCode != 200 {\n\t\tstatusCode := strconv.Itoa(resp.StatusCode)\n\t\treturn false, errors.New(\"Server replied with \" + statusCode + \" status code\")\n\t}\n\n\treturn true, nil\n}", "func TestFailedEndpoint1(t *testing.T) {\n\tisTesting = true\n\tvar request = Request{\n\t\tPath: \"/api/device\",\n\t\tHTTPMethod: \"GET\",\n\t}\n\tvar response, _ = Handler(request)\n\tif response.StatusCode != 404 {\n\t\tt.Errorf(\"response status code has to be 404 but is %d\", response.StatusCode)\n\t}\n\tif response.Body != `{\"message\":\"requested endpoint not found\"}` {\n\t\tt.Errorf(\"body is: %s\", response.Body)\n\t}\n}", "func TestProxyValid(t *testing.T) {\n\n\treq, err := http.NewRequest(\"GET\", \"/\", nil)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tw := httptest.NewRecorder()\n\tbeego.Debug(\"hola\")\n\t//ctx := context.NewContext()\n\t//beego.Debug(ctx)\n\t//ctx.Reset(w, req)\n\t//ctx.Input = context.NewInput()\n\n\t//ctx.Input.SetData(\"hola\", \"maria\")\n\t//beego.Debug(ctx.Input.GetData(\"hola\"))\n\n\t//date := beego.Date(time.Date(2016, 05, 18, 12, 37, 30, 0, gmt), time.UnixDate)\n\tbeego.BeeApp.Handlers.ServeHTTP(w, req)\n\n\tbeego.Trace(\"testing\", \"TestProxyValid\", \"Code[%d]\\n%s\", w.Code, w.Body.String())\n\n\tConvey(\"Subject: Test Station Endpoint\\n\", t, func() {\n\t\tConvey(\"Status Code Should Be 200\", func() {\n\t\t\tSo(w.Code, ShouldEqual, 200)\n\t\t})\n\t})\n\n}", "func TestEndpointURL(t *testing.T) {\n\t// these client calls should fail since we'll break the URL paths\n\tsimulator.Test(func(ctx context.Context, vc *vim25.Client) {\n\t\tlsim.BreakLookupServiceURLs()\n\n\t\t{\n\t\t\t_, err := ssoadmin.NewClient(ctx, vc)\n\t\t\tif err == nil {\n\t\t\t\tt.Error(\"expected error\")\n\t\t\t}\n\t\t\tif !strings.Contains(err.Error(), http.StatusText(404)) {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t}\n\n\t\t{\n\t\t\tc, err := sts.NewClient(ctx, vc)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\treq := sts.TokenRequest{\n\t\t\t\tUserinfo: url.UserPassword(\"[email protected]\", \"password\"),\n\t\t\t}\n\t\t\t_, err = c.Issue(ctx, req)\n\t\t\tif err == nil {\n\t\t\t\tt.Error(\"expected error\")\n\t\t\t}\n\t\t\tif !strings.Contains(err.Error(), http.StatusText(404)) {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t}\n\t})\n\n\t// these client calls should not fail\n\tsimulator.Test(func(ctx context.Context, vc *vim25.Client) {\n\t\t{\n\t\t\t// NewClient calls ServiceInstance methods\n\t\t\t_, err := ssoadmin.NewClient(ctx, vc)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\n\t\t{\n\t\t\tc, err := sts.NewClient(ctx, vc)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\treq := sts.TokenRequest{\n\t\t\t\tUserinfo: url.UserPassword(\"[email protected]\", \"password\"),\n\t\t\t}\n\n\t\t\t_, err = c.Issue(ctx, req)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t})\n}", "func serveDummy(ctx context.Context, cfg *config.Config) error {\n\t// serve a http healthcheck endpoint\n\tgo func() {\n\t\terr := serveHTTPHealthcheck(ctx, cfg)\n\t\tif err != nil {\n\t\t\tlogger.Errorf(ctx, \"Unable to serve http\", cfg.GetGrpcHostAddress(), err)\n\t\t}\n\t}()\n\n\tgrpcServer := newGRPCDummyServer(ctx, cfg)\n\n\tgrpcListener, err := net.Listen(\"tcp\", cfg.GetGrpcHostAddress())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Infof(ctx, \"Serving DataCatalog Insecure on port %v\", cfg.GetGrpcHostAddress())\n\treturn grpcServer.Serve(grpcListener)\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {}", "func CheckServiceUnavailable(w *httptest.ResponseRecorder) {\n\tCheckResponseBody(w, 503, \"{\\\"messages\\\":[\\\"There was an error, please try again later\\\"],\\\"errors\\\":{\\\"error\\\":[\\\"service unavailable\\\"]}}\")\n}", "func HttpTest() {\n\tStartHttpServer()\n}", "func TestVhosts(t *testing.T) {\n\tsrv := createAndStartServer(t, &httpConfig{Vhosts: []string{\"test\"}}, false, &wsConfig{})\n\tdefer srv.stop()\n\turl := \"http://\" + srv.listenAddr()\n\n\tresp := rpcRequest(t, url, \"host\", \"test\")\n\tassert.Equal(t, resp.StatusCode, http.StatusOK)\n\n\tresp2 := rpcRequest(t, url, \"host\", \"bad\")\n\tassert.Equal(t, resp2.StatusCode, http.StatusForbidden)\n}", "func serviceUnavailable(rw http.ResponseWriter, r *http.Request) {\n\n}", "func TestServer_ServeHTTP_Error(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tpath string\n\t\treq []byte\n\t\twantResponseCode int\n\t\twantResponseBody string\n\t}{{\n\t\tname: \"bad path\",\n\t\tpath: \"/invalid\",\n\t\treq: json.RawMessage(`{}`),\n\t\twantResponseCode: 400,\n\t\twantResponseBody: \"path did not match any interceptors\",\n\t}, {\n\t\tname: \"invalid body\",\n\t\tpath: \"/cel\",\n\t\treq: json.RawMessage(`{}`),\n\t\twantResponseCode: 400,\n\t\twantResponseBody: \"failed to parse body as InterceptorRequest\",\n\t}}\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tlogger := zaptest.NewLogger(t)\n\t\t\tctx, _ := test.SetupFakeContext(t)\n\n\t\t\tserver, err := NewWithCoreInterceptors(interceptors.DefaultSecretGetter(fakekubeclient.Get(ctx).CoreV1()), logger.Sugar())\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"error initializing core interceptors: %v\", err)\n\t\t\t}\n\t\t\tbody, err := json.Marshal(tc.req)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Failed to marshal errors \")\n\t\t\t}\n\t\t\treq := httptest.NewRequest(\"POST\", fmt.Sprintf(\"http://example.com%s\", tc.path), bytes.NewBuffer(body))\n\t\t\tw := httptest.NewRecorder()\n\t\t\tserver.ServeHTTP(w, req)\n\t\t\tresp := w.Result()\n\t\t\tif resp.StatusCode != tc.wantResponseCode {\n\t\t\t\tt.Fatalf(\"ServeHTTP() expected statusCode %d but got: %d\", tc.wantResponseCode, resp.StatusCode)\n\t\t\t}\n\n\t\t\trespBody, _ := io.ReadAll(resp.Body)\n\t\t\tdefer resp.Body.Close()\n\t\t\tif !strings.Contains(string(respBody), tc.wantResponseBody) {\n\t\t\t\tt.Fatalf(\"ServeHTTP() expected response to contain : %s \\n but got %s: \", tc.wantResponseBody, string(respBody))\n\t\t\t}\n\t\t})\n\t}\n}", "func TestHTTPSServer(t *testing.T) {\n\n\tcaCert := path.Join(assetsDir, \"rootCA.crt\")\n\tserverCert := path.Join(assetsDir, \"server.crt\")\n\tserverKey := path.Join(assetsDir, \"server.key\")\n\tclientCert := path.Join(assetsDir, \"client.crt\")\n\tclientKey := path.Join(assetsDir, \"client.key\")\n\tuntrustedClientCert := path.Join(assetsDir, \"other_client.crt\")\n\tuntrustedClientKey := path.Join(assetsDir, \"other_client.key\")\n\n\tauthServer := AuthServer{\n\t\tAddr: \":3457\",\n\t\tSpiffeID: \"spiffe://example.com/service\",\n\t\tCertFile: serverCert,\n\t\tKeyFile: serverKey,\n\t\tCaCert: caCert,\n\t\tCertValidator: validator.SvidValidator{},\n\t}\n\n\t// Run the HTTPS server\n\tgo func() {\n\t\tauthServer.Start()\n\t}()\n\n\t// Create a client with a trusted SpiffeID\n\tclient := createClient(clientCert, clientKey, caCert)\n\n\t// Perform a the authentication request\n\tres, err := client.Get(\"https://localhost:3457/auth\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != http.StatusOK {\n\t\tt.Errorf(\"Response code was %v; want 200\", res.StatusCode)\n\t}\n\n\t// Create a client with an untrusted SpiffeID\n\tclient = createClient(untrustedClientCert, untrustedClientKey, caCert)\n\n\t// Perform a the authentication request\n\tres2, err := client.Get(\"https://localhost:3457/auth\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer res2.Body.Close()\n\n\tif res2.StatusCode != 401 {\n\t\tt.Errorf(\"Response code was %v; want 401\", res.StatusCode)\n\t}\n\n\t// Send signal to shutdown the server\n\tshutdown <- 1\n}", "func TestWebServer(t *testing.T) {\n\n\tctx, _ := context.WithCancel(context.Background())\n\n\tif testing.Short() {\n\t\tt.Skip()\n\t}\n\n\t// create a new kuberhealthy\n\tt.Log(\"Making fake check\")\n\tkh := makeTestKuberhealthy(t)\n\n\t// add a fake check to it\n\tfc := NewFakeCheck()\n\tt.Log(\"Adding fake check\")\n\tkh.AddCheck(fc)\n\n\tt.Log(\"Starting Kuberhealthy checks\")\n\tgo kh.Start(ctx)\n\t// give the checker time to make CRDs\n\tt.Log(\"Waiting for checks to run\")\n\ttime.Sleep(time.Second * 2)\n\tt.Log(\"Stopping Kuberhealthy checks\")\n\tkh.StopChecks()\n\n\t// now run our test against the web server handler\n\tt.Log(\"Simulating web request\")\n\trecorder := httptest.NewRecorder()\n\treq, err := http.NewRequest(\"GET\", \"/\", bytes.NewBufferString(\"\"))\n\tif err != nil {\n\t\tt.Fatal(\"Error creating request\", err)\n\t}\n\terr = kh.healthCheckHandler(recorder, req)\n\tif err != nil {\n\t\tt.Fatal(\"Error from health check handler:\", err)\n\t}\n\n\t// check the http status from the server\n\tt.Log(\"Checking status code\")\n\tif recorder.Code != http.StatusOK {\n\t\tt.Fatal(\"Bad response from handler\", recorder.Code)\n\t}\n\n\t// output the response from the server\n\tt.Log(\"Reading reponse body\")\n\tb, err := ioutil.ReadAll(recorder.Body)\n\tif err != nil {\n\t\tt.Fatal(\"Error reading response body\", err)\n\t}\n\n\tt.Log(string(b))\n\n}", "func TestService(t *testing.T) {\n\t// Create service to test\n\ts := res.NewService(\"foo\")\n\ts.Handle(\"bar.$id\",\n\t\tres.Access(res.AccessGranted),\n\t\tres.GetModel(func(r res.ModelRequest) {\n\t\t\tr.Model(struct {\n\t\t\t\tMessage string `json:\"msg\"`\n\t\t\t}{r.PathParam(\"id\")})\n\t\t}),\n\t)\n\n\t// Create test session\n\tc := restest.NewSession(t, s)\n\tdefer c.Close()\n\n\t// Test sending get request and validate response\n\tc.Get(\"foo.bar.42\").\n\t\tResponse().\n\t\tAssertModel(map[string]string{\"msg\": \"42\"})\n}", "func TestPostNonRetriable(t *testing.T) {\n\tstatus := http.StatusBadRequest\n\ttries := 0\n\tts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(status)\n\t\tif tries++; tries > 1 {\n\t\t\tt.Errorf(\"expected client to not retry after receiving status code %d\", status)\n\t\t}\n\t}))\n\n\tdefer ts.Close()\n\n\tc := &APIClient{\n\t\tBaseURL: ts.URL,\n\t\tClient: ts.Client(),\n\t}\n\n\terr := c.PingSuccess(TestUUID, nil)\n\tif err == nil {\n\t\tt.Errorf(\"expected PingSuccess to return non-nil error after non-retriable API response\")\n\t}\n}", "func TestHandler_Root_Unauthorized(t *testing.T) {\n\th := NewTestHandler()\n\tdefer h.Close()\n\n\tresp, _ := http.Get(h.Server.URL)\n\tresp.Body.Close()\n\tequals(t, 200, resp.StatusCode)\n}", "func Run(res http.ResponseWriter, req *http.Request) {\n\tif val, ok := serviceMap[strings.Split(req.Host, \":\")[0]]; ok {\n\t\tif val.runAction(res, req) {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif val, ok := serviceMap[\"*\"]; ok {\n\t\tif val.runAction(res, req) {\n\t\t\treturn\n\t\t}\n\t}\n\n\tres.WriteHeader(http.StatusBadGateway)\n\tfmt.Fprint(res, \"BAD GATEWAY\")\n}", "func doNotFoundTest(t *testing.T, method string, uri string) {\n\tclient := testHttpClient()\n\treq, err := http.NewRequest(method, testServer.URL+uri, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif res.StatusCode != 404 {\n\t\tt.Errorf(\"%s %s : Expected HTTP Status Code 404, got %d\\n\", method, uri, res.StatusCode)\n\t}\n}", "func TestHashHandlerReturns404OnGet(t *testing.T) {\n req, err := http.NewRequest(\"GET\", \"/hash\", nil) \n if err != nil {\n t.Fatal(err)\n }\n\n rr := httptest.NewRecorder()\n\n context := makeServerContext()\n\n handler := hashHandler{sc:&context}\n handler.ServeHTTP(rr, req)\n\n // Check the status code is what we expect.\n if status := rr.Code; status != http.StatusNotFound {\n t.Errorf(\"hash handler returned wrong status code: got %v want %v\",\n status, http.StatusNotFound)\n }\n}", "func TestServeAPI(t *testing.T) {\n\tlal := memnet.Listen(\"local-tailscaled.sock:80\")\n\tdefer lal.Close()\n\t// Serve dummy localapi. Just returns \"success\".\n\tlocalapi := &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintf(w, \"success\")\n\t})}\n\tdefer localapi.Close()\n\n\tgo localapi.Serve(lal)\n\ts := &Server{lc: &tailscale.LocalClient{Dial: lal.Dial}}\n\n\ttests := []struct {\n\t\tname string\n\t\treqPath string\n\t\twantResp string\n\t\twantStatus int\n\t}{{\n\t\tname: \"invalid_endpoint\",\n\t\treqPath: \"/not-an-endpoint\",\n\t\twantResp: \"invalid endpoint\",\n\t\twantStatus: http.StatusNotFound,\n\t}, {\n\t\tname: \"not_in_localapi_allowlist\",\n\t\treqPath: \"/local/v0/not-allowlisted\",\n\t\twantResp: \"/v0/not-allowlisted not allowed from localapi proxy\",\n\t\twantStatus: http.StatusForbidden,\n\t}, {\n\t\tname: \"in_localapi_allowlist\",\n\t\treqPath: \"/local/v0/logout\",\n\t\twantResp: \"success\", // Successfully allowed to hit localapi.\n\t\twantStatus: http.StatusOK,\n\t}}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tr := httptest.NewRequest(\"POST\", \"/api\"+tt.reqPath, nil)\n\t\t\tw := httptest.NewRecorder()\n\n\t\t\ts.serveAPI(w, r)\n\t\t\tres := w.Result()\n\t\t\tdefer res.Body.Close()\n\t\t\tif gotStatus := res.StatusCode; tt.wantStatus != gotStatus {\n\t\t\t\tt.Errorf(\"wrong status; want=%q, got=%q\", tt.wantStatus, gotStatus)\n\t\t\t}\n\t\t\tbody, err := io.ReadAll(res.Body)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tgotResp := strings.TrimSuffix(string(body), \"\\n\") // trim trailing newline\n\t\t\tif tt.wantResp != gotResp {\n\t\t\t\tt.Errorf(\"wrong response; want=%q, got=%q\", tt.wantResp, gotResp)\n\t\t\t}\n\t\t})\n\t}\n}", "func PingService(url string, expectedStatus int) (*http.Response, error) {\n\tresponse, err := http.Get(url)\n\n\tif err != nil {\n\t\tlog.Println(\"Failure to get make request\")\n\t\treturn nil, err\n\t}\n\n\tif response.StatusCode != expectedStatus {\n\t\terr := fmt.Errorf(\"Expected Status %d Actual Status %d\", expectedStatus, response.StatusCode)\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}", "func TestServices(t *testing.T) { check.TestingT(t) }", "func CheckIfEndpointIsResponding(t *testing.T, endpoint string) bool {\r\n\t// we ignore certificates at this point\r\n\ttlsConfig := tls.Config{}\r\n\ttlsConfig.InsecureSkipVerify = true\r\n\r\n\terr := http_helper.HttpGetWithRetryWithCustomValidationE(\r\n\t\tt,\r\n\t\tfmt.Sprintf(\"https://%s\", endpoint),\r\n\t\t&tlsConfig,\r\n\t\t1,\r\n\t\t10*time.Second,\r\n\t\tfunc(statusCode int, body string) bool {\r\n\t\t\tif statusCode == 200 {\r\n\t\t\t\treturn true\r\n\t\t\t}\r\n\t\t\tif statusCode == 404 {\r\n\t\t\t\tt.Log(\"Warning: 404 response from endpoint. Test will still PASS.\")\r\n\t\t\t\treturn true\r\n\t\t\t}\r\n\t\t\treturn false\r\n\t\t},\r\n\t)\r\n\treturn err == nil\r\n}", "func TestHealth(t *testing.T) {\n\tSetupLogger()\n\tgaz := &Gaz{Router: mux.NewRouter(), isReady: new(int32)}\n\tgaz.InitHealthcheck()\n\n\tport, shutdown := setupServerHTTP(gaz.Router)\n\tdefer shutdown()\n\n\tbaseURL := fmt.Sprintf(\"http://localhost:%d\", port)\n\n\tokStatus := http.StatusOK\n\tkoStatus := http.StatusServiceUnavailable\n\n\tcheck(t, \"ready is unset\", baseURL+\"/ready\", koStatus)\n\tcheck(t, \"live is set\", baseURL+\"/live\", okStatus)\n\n\tgaz.SetReady(true)\n\tcheck(t, \"ready is set\", baseURL+\"/ready\", okStatus)\n\n\tgaz.SetReady(false)\n\tcheck(t, \"ready is set to false\", baseURL+\"/ready\", koStatus)\n}", "func verifyServeHostnameServiceUp(c *client.Client, ns, host string, expectedPods []string, serviceIP string, servicePort int) error {\n\texecPodName := createExecPodOrFail(c, ns, \"execpod-\")\n\tdefer func() {\n\t\tdeletePodOrFail(c, ns, execPodName)\n\t}()\n\n\t// Loop a bunch of times - the proxy is randomized, so we want a good\n\t// chance of hitting each backend at least once.\n\tbuildCommand := func(wget string) string {\n\t\treturn fmt.Sprintf(\"for i in $(seq 1 %d); do %s http://%s:%d 2>&1 || true; echo; done\",\n\t\t\t50*len(expectedPods), wget, serviceIP, servicePort)\n\t}\n\tcommands := []func() string{\n\t\t// verify service from node\n\t\tfunc() string {\n\t\t\tcmd := \"set -e; \" + buildCommand(\"wget -q --timeout=0.2 --tries=1 -O -\")\n\t\t\tframework.Logf(\"Executing cmd %q on host %v\", cmd, host)\n\t\t\tresult, err := framework.SSH(cmd, host, framework.TestContext.Provider)\n\t\t\tif err != nil || result.Code != 0 {\n\t\t\t\tframework.LogSSHResult(result)\n\t\t\t\tframework.Logf(\"error while SSH-ing to node: %v\", err)\n\t\t\t}\n\t\t\treturn result.Stdout\n\t\t},\n\t\t// verify service from pod\n\t\tfunc() string {\n\t\t\tcmd := buildCommand(\"wget -q -T 1 -O -\")\n\t\t\tframework.Logf(\"Executing cmd %q in pod %v/%v\", cmd, ns, execPodName)\n\t\t\t// TODO: Use exec-over-http via the netexec pod instead of kubectl exec.\n\t\t\toutput, err := framework.RunHostCmd(ns, execPodName, cmd)\n\t\t\tif err != nil {\n\t\t\t\tframework.Logf(\"error while kubectl execing %q in pod %v/%v: %v\\nOutput: %v\", cmd, ns, execPodName, err, output)\n\t\t\t}\n\t\t\treturn output\n\t\t},\n\t}\n\n\texpectedEndpoints := sets.NewString(expectedPods...)\n\tBy(fmt.Sprintf(\"verifying service has %d reachable backends\", len(expectedPods)))\n\tfor _, cmdFunc := range commands {\n\t\tpassed := false\n\t\tgotEndpoints := sets.NewString()\n\n\t\t// Retry cmdFunc for a while\n\t\tfor start := time.Now(); time.Since(start) < kubeProxyLagTimeout; time.Sleep(5 * time.Second) {\n\t\t\tfor _, endpoint := range strings.Split(cmdFunc(), \"\\n\") {\n\t\t\t\ttrimmedEp := strings.TrimSpace(endpoint)\n\t\t\t\tif trimmedEp != \"\" {\n\t\t\t\t\tgotEndpoints.Insert(trimmedEp)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// TODO: simply checking that the retrieved endpoints is a superset\n\t\t\t// of the expected allows us to ignore intermitten network flakes that\n\t\t\t// result in output like \"wget timed out\", but these should be rare\n\t\t\t// and we need a better way to track how often it occurs.\n\t\t\tif gotEndpoints.IsSuperset(expectedEndpoints) {\n\t\t\t\tif !gotEndpoints.Equal(expectedEndpoints) {\n\t\t\t\t\tframework.Logf(\"Ignoring unexpected output wgetting endpoints of service %s: %v\", serviceIP, gotEndpoints.Difference(expectedEndpoints))\n\t\t\t\t}\n\t\t\t\tpassed = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tframework.Logf(\"Unable to reach the following endpoints of service %s: %v\", serviceIP, expectedEndpoints.Difference(gotEndpoints))\n\t\t}\n\t\tif !passed {\n\t\t\t// Sort the lists so they're easier to visually diff.\n\t\t\texp := expectedEndpoints.List()\n\t\t\tgot := gotEndpoints.List()\n\t\t\tsort.StringSlice(exp).Sort()\n\t\t\tsort.StringSlice(got).Sort()\n\t\t\treturn fmt.Errorf(\"service verification failed for: %s\\nexpected %v\\nreceived %v\", serviceIP, exp, got)\n\t\t}\n\t}\n\treturn nil\n}", "func testGatePipelineGetMissing() *httptest.Server {\n\treturn httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.NotFound(w, r)\n\t}))\n}", "func ServiceAvailable(ctx *Context, url string, timeout time.Duration) bool {\n\n\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url)\n\n\tclient := &http.Client{Timeout: timeout}\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url, \"error\", err, \"available\", false)\n\t\tLog(ERROR, ctx, \"ServiceAvailable\", \"url\", url, \"error\", err, \"available\", false)\n\t\treturn false\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url, \"code\", resp.StatusCode, \"available\", false)\n\t\treturn false\n\t}\n\n\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url, \"available\", true)\n\treturn true\n}", "func testResponse(t *testing.T, e error, expectedCode int) {\n\ttestServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttypes.HandleServerError(w, e)\n\t}))\n\tdefer testServer.Close()\n\n\tres, err := http.Get(testServer.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif res.StatusCode != expectedCode {\n\t\tt.Errorf(\"Expected status code %v but got %v\", expectedCode, res.StatusCode)\n\t}\n}", "func (s *HTTPTestSuite) TestMakeRequestFailureResponse() {\n\tcheck := assert.New(s.T())\n\n\t// mock http request\n\thttpmock.RegisterResponder(http.MethodGet, s.url,\n\t\thttpmock.NewStringResponder(http.StatusBadGateway, ``))\n\n\t// make http request\n\tstatusCode, response, _, err := s.requestHandler.MakeRequest(s.requestSpecifications)\n\tif err == nil {\n\t\tcheck.Equal(statusCode, http.StatusBadGateway)\n\t\tcheck.Equal(string(response), ``)\n\t}\n\n\t// get the amount of calls for the registered responder\n\tinfo := httpmock.GetCallCountInfo()\n\tcheck.Equal(1, info[http.MethodGet+\" \"+s.url])\n}", "func TestGetFeaturesError(t *testing.T) {\n\tts := httptest.NewServer(\n\t\thttp.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\thttp.Error(w, featureRespError, http.StatusNotFound)\n\t\t}),\n\t)\n\tdefer ts.Close()\n\n\tregistry := HttpRegistry{URL: ts.URL}\n\t_, err := registry.getFeatures(ts.URL)\n\tif err == nil {\n\t\tt.Errorf(\"should fail\")\n\t}\n\n\tmsg := \"Feature was not found\"\n\n\tif err.Error() != msg {\n\t\tt.Errorf(\"expected %s, got %s\", msg, err.Error())\n\t}\n}", "func (s) TestServeSuccess(t *testing.T) {\n\tfs, clientCh, cleanup := setupOverrides(t)\n\tdefer cleanup()\n\n\tserver := NewGRPCServer()\n\tdefer server.Stop()\n\n\tlocalAddr, err := xdstestutils.AvailableHostPort()\n\tif err != nil {\n\t\tt.Fatalf(\"testutils.AvailableHostPort() failed: %v\", err)\n\t}\n\n\t// Call Serve() in a goroutine, and push on a channel when Serve returns.\n\tserveDone := testutils.NewChannel()\n\tgo func() {\n\t\tif err := server.Serve(ServeOptions{Address: localAddr}); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tserveDone.Send(nil)\n\t}()\n\n\t// Wait for an xdsClient to be created.\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tc, err := clientCh.Receive(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for new xdsClient to be created: %v\", err)\n\t}\n\tclient := c.(*fakeclient.Client)\n\n\t// Wait for a listener watch to be registered on the xdsClient.\n\tname, err := client.WaitForWatchListener(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for a ListenerWatch: %v\", err)\n\t}\n\twantName := fmt.Sprintf(\"grpc/server?udpa.resource.listening_address=%s\", localAddr)\n\tif name != wantName {\n\t\tt.Fatalf(\"LDS watch registered for name %q, want %q\", name, wantName)\n\t}\n\n\t// Push an error to the registered listener watch callback and make sure\n\t// that Serve does not return.\n\tclient.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{}, errors.New(\"LDS error\"))\n\tsCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)\n\tdefer sCancel()\n\tif _, err := serveDone.Receive(sCtx); err != context.DeadlineExceeded {\n\t\tt.Fatal(\"Serve() returned after a bad LDS response\")\n\t}\n\n\t// Push a good LDS response, and wait for Serve() to be invoked on the\n\t// underlying grpc.Server.\n\tclient.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: \"routeconfig\"}, nil)\n\tif _, err := fs.serveCh.Receive(ctx); err != nil {\n\t\tt.Fatalf(\"error when waiting for Serve() to be invoked on the grpc.Server\")\n\t}\n}", "func checkNginxMainPage(c internalapi.RuntimeService, podID string, hostPort int32) {\n\tBy(\"get the IP:port needed to be checked\")\n\tvar err error\n\tvar resp *http.Response\n\n\turl := \"http://\"\n\tif hostPort != 0 {\n\t\turl += \"127.0.0.1:\" + strconv.Itoa(int(hostPort))\n\t} else {\n\t\tstatus := getPodSandboxStatus(c, podID)\n\t\tExpect(status.GetNetwork()).NotTo(BeNil(), \"The network in status should not be nil.\")\n\t\tExpect(status.GetNetwork().Ip).NotTo(BeNil(), \"The IP should not be nil.\")\n\t\turl += status.GetNetwork().Ip + \":\" + strconv.Itoa(int(nginxContainerPort))\n\t}\n\tframework.Logf(\"the IP:port is \" + url)\n\n\tBy(\"check the content of \" + url)\n\n\tEventually(func() error {\n\t\tresp, err = http.Get(url)\n\t\treturn err\n\t}, time.Minute, time.Second).Should(BeNil())\n\n\tExpect(resp.StatusCode).To(Equal(200), \"The status code of response should be 200.\")\n\tframework.Logf(\"check port mapping succeed\")\n}", "func TestRespondsWithLove(t *testing.T) {\n\n\tpool, err := dockertest.NewPool(\"\")\n\trequire.NoError(t, err, \"could not connect to Docker\")\n\n\tresource, err := pool.Run(\"docker-gs-ping\", \"latest\", []string{})\n\trequire.NoError(t, err, \"could not start container\")\n\n\tt.Cleanup(func() {\n\t\trequire.NoError(t, pool.Purge(resource), \"failed to remove container\")\n\t})\n\n\tvar resp *http.Response\n\n\terr = pool.Retry(func() error {\n\t\tresp, err = http.Get(fmt.Sprint(\"http://localhost:\", resource.GetPort(\"8080/tcp\"), \"/\"))\n\t\tif err != nil {\n\t\t\tt.Log(\"container not ready, waiting...\")\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\trequire.NoError(t, err, \"HTTP error\")\n\tdefer resp.Body.Close()\n\n\trequire.Equal(t, http.StatusOK, resp.StatusCode, \"HTTP status code\")\n\n\tbody, err := io.ReadAll(resp.Body)\n\trequire.NoError(t, err, \"failed to read HTTP body\")\n\n\t// Finally, test the business requirement!\n\trequire.Contains(t, string(body), \"<3\", \"does not respond with love?\")\n}", "func TestSite(url string) {\n\n\tres, err := http.Get(url)\n\n\tif err != nil {\n\t\tfmt.Println(\"Error while calling url\")\n\t\tfmt.Println(err)\n\t\tos.Exit(-1)\n\t}\n\n\tif res.StatusCode == 200 {\n\t\tfmt.Println(\"site\", url, \"carregado com sucesso!\")\n\t\tWriteLog(url, true)\n\t} else {\n\t\tfmt.Println(\"site\", url, \"com problemas. Status Code:\", res.StatusCode)\n\t\tWriteLog(url, false)\n\t}\n}", "func TestStatsHandlerReturns404OnPost(t *testing.T) {\n req, err := http.NewRequest(\"POST\", \"/stats\", nil) \n if err != nil {\n t.Fatal(err)\n }\n\n rr := httptest.NewRecorder()\n\n context := makeServerContext()\n\n handler := statsHandler{sc:&context}\n handler.ServeHTTP(rr, req)\n\n // Check the status code is what we expect.\n if status := rr.Code; status != http.StatusNotFound {\n t.Errorf(\"stats handler returned wrong status code: got %v want %v\",\n status, http.StatusNotFound)\n }\n}", "func testHTTPRequest(t *testing.T, url string, shouldPass bool) {\n\tctx := TestContext(\"TestHTTPRequest\")\n\n\treq := NewHTTPRequest(ctx, \"GET\", url, \"\")\n\tres, _ := req.Do(ctx)\n\tif shouldPass {\n\t\tif res.Error != \"\" {\n\t\t\tt.Fatal(res.Error)\n\t\t}\n\t} else {\n\t\tif res.Error == \"\" {\n\t\t\tt.Fatal(\"should have failed\")\n\t\t}\n\t}\n}", "func testHTTPResponse(\n\tt *testing.T,\n\tr *gin.Engine,\n\treq *http.Request,\n\tf func(w *httptest.ResponseRecorder) bool,\n) {\n\t// Create a response recorder\n\tw := httptest.NewRecorder()\n\n\t// Create the service and process the passed request\n\tr.ServeHTTP(w, req)\n\n\tif !f(w) { // check if test was successful\n\t\tt.Fail()\n\t}\n}", "func checkGet(t *testing.T, ts *httptest.Server, path string) (*http.Response, []byte) {\n\treturn checkRequest(t, ts, \"GET\", path, nil)\n}", "func TestServerNotExist(t *testing.T) {\n\t_, err := NewClient([]string{\"http://localhost:1234\"})\n\n\t// make sure it is network error\n\t_, ok := err.(net.Error)\n\trequire.True(t, ok)\n}", "func TestProbeHTTPSHTTP(t *testing.T) {\n\tserver := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintln(w, \"Hello world\")\n\t}))\n\tserver.Start()\n\tdefer server.Close()\n\n\tregistry := prometheus.NewRegistry()\n\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\n\tif err := ProbeHTTPS(ctx, newTestLogger(), server.URL, config.Module{}, registry); err == nil {\n\t\tt.Fatalf(\"expected error, but err was nil\")\n\t}\n}", "func okHealthCheck(proxy *Proxy) error {\n\treturn nil\n}", "func TestFakeServer(t *testing.T) {\n\tstartFakeBookingApp()\n\n\tresp := httptest.NewRecorder()\n\n\t// First, test that the expected responses are actually generated\n\thandle(resp, showRequest)\n\tif !strings.Contains(resp.Body.String(), \"300 Main St.\") {\n\t\tt.Errorf(\"Failed to find hotel address in action response:\\n%s\", resp.Body)\n\t\tt.FailNow()\n\t}\n\tresp.Body.Reset()\n\n\thandle(resp, staticRequest)\n\tsessvarsSize := getFileSize(t, path.Join(BasePath, \"public\", \"js\", \"sessvars.js\"))\n\tif int64(resp.Body.Len()) != sessvarsSize {\n\t\tt.Errorf(\"Expected sessvars.js to have %d bytes, got %d:\\n%s\", sessvarsSize, resp.Body.Len(), resp.Body)\n\t\tt.FailNow()\n\t}\n\tresp.Body.Reset()\n\n\thandle(resp, jsonRequest)\n\tif !strings.Contains(resp.Body.String(), `\"Address\":\"300 Main St.\"`) {\n\t\tt.Errorf(\"Failed to find hotel address in JSON response:\\n%s\", resp.Body)\n\t\tt.FailNow()\n\t}\n\tresp.Body.Reset()\n\n\thandle(resp, plaintextRequest)\n\tif resp.Body.String() != \"Hello, World!\" {\n\t\tt.Errorf(\"Failed to find greeting in plaintext response:\\n%s\", resp.Body)\n\t\tt.FailNow()\n\t}\n\n\tresp.Body = nil\n}", "func createServerSuccess(t *testing.T) *httptest.Server {\n\treturn httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tt.Logf(\"Method: %v\", r.Method)\n\t\tt.Logf(\"Path: %v\", r.URL.Path)\n\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\n\t\tsecret := r.Header.Get(magic.APISecretHeader)\n\t\tif secret != testSecret {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\tresp := magic.Response{\n\t\t\t\tErrorCode: \"err_code_unauthorized\",\n\t\t\t\tMessage: \"unauthorized\",\n\t\t\t\tStatus: \"fail\",\n\t\t\t}\n\t\t\tdata, err := json.Marshal(resp)\n\t\t\trequire.NoError(t, err, \"can't marshal test data\")\n\t\t\t_, _ = w.Write(data)\n\t\t\tt.Log(\"401 - Unauthorized\")\n\t\t\treturn\n\t\t}\n\n\t\tswitch r.Method {\n\t\tcase http.MethodGet:\n\t\t\tswitch r.URL.Path {\n\t\t\tcase userInfoV1:\n\t\t\t\tresp := magic.Response{\n\t\t\t\t\tData: &magic.UserInfo{\n\t\t\t\t\t\tEmail: \"[email protected]\",\n\t\t\t\t\t\tIssuer: \"did:ethr:0x4B73C58370AEfcEf86A6021afCDe5673511376B2\",\n\t\t\t\t\t\tPublicAddress: \"0x4B73C58370AEfcEf86A6021afCDe5673511376B2\",\n\t\t\t\t\t},\n\t\t\t\t\tStatus: \"ok\",\n\t\t\t\t}\n\t\t\t\tdata, err := json.Marshal(resp)\n\t\t\t\trequire.NoError(t, err, \"can't marshal test data\")\n\t\t\t\t_, _ = w.Write(data)\n\t\t\t}\n\n\t\tcase http.MethodPost:\n\t\t\tswitch r.URL.Path {\n\t\t\tcase userLogoutV2:\n\t\t\t\tresp := magic.Response{\n\t\t\t\t\tStatus: \"ok\",\n\t\t\t\t}\n\t\t\t\tdata, err := json.Marshal(resp)\n\t\t\t\trequire.NoError(t, err, \"can't marshal test data\")\n\t\t\t\t_, _ = w.Write(data)\n\t\t\t}\n\t\t}\n\t}))\n}", "func TestServeHTTPFailingFS(t *testing.T) {\n\ttests := []struct {\n\t\tfsErr error\n\t\texpectedStatus int\n\t\texpectedErr error\n\t\texpectedHeaders map[string]string\n\t}{\n\t\t{\n\t\t\tfsErr: os.ErrNotExist,\n\t\t\texpectedStatus: http.StatusNotFound,\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tfsErr: os.ErrPermission,\n\t\t\texpectedStatus: http.StatusForbidden,\n\t\t\texpectedErr: os.ErrPermission,\n\t\t},\n\t\t{\n\t\t\tfsErr: errCustom,\n\t\t\texpectedStatus: http.StatusServiceUnavailable,\n\t\t\texpectedErr: errCustom,\n\t\t\texpectedHeaders: map[string]string{\"Retry-After\": \"5\"},\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\t// initialize a file server with the failing FileSystem\n\t\tfileserver := FileServer{Root: failingFS{err: test.fsErr}}\n\n\t\t// prepare the request and response\n\t\trequest, err := http.NewRequest(\"GET\", \"https://foo/\", nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to build request. Error was: %v\", err)\n\t\t}\n\t\tresponseRecorder := httptest.NewRecorder()\n\n\t\tstatus, actualErr := fileserver.ServeHTTP(responseRecorder, request)\n\n\t\t// check the status\n\t\tif status != test.expectedStatus {\n\t\t\tt.Errorf(\"Test %d: Expected status %d, found %d\", i, test.expectedStatus, status)\n\t\t}\n\n\t\t// check the error\n\t\tif actualErr != test.expectedErr {\n\t\t\tt.Errorf(\"Test %d: Expected err %v, found %v\", i, test.expectedErr, actualErr)\n\t\t}\n\n\t\t// check the headers - a special case for server under load\n\t\tif test.expectedHeaders != nil && len(test.expectedHeaders) > 0 {\n\t\t\tfor expectedKey, expectedVal := range test.expectedHeaders {\n\t\t\t\tactualVal := responseRecorder.Header().Get(expectedKey)\n\t\t\t\tif expectedVal != actualVal {\n\t\t\t\t\tt.Errorf(\"Test %d: Expected header %s: %s, found %s\", i, expectedKey, expectedVal, actualVal)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func pinger() error {\n\tresp, err := http.Get(\"http://localhost:3000/health\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"server returned non-200 status code\")\n\t}\n\treturn nil\n}", "func healthCheck(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"Serving request: %s\", r.URL.Path)\n\tfmt.Fprintf(w, \"Ok\")\n}", "func TestGetFailure0(t *testing.T) {\n\tisTesting = true\n\n\tvar params = make(map[string]string)\n\tparams[\"id\"] = \"invalid-id\"\n\n\tvar request = Request{\n\t\tPath: \"/api/devices\",\n\t\tPathParameters: params,\n\t\tHTTPMethod: \"GET\",\n\t}\n\tvar response, _ = Handler(request)\n\n\tif response.StatusCode != 404 {\n\t\tt.Errorf(\"response status code has to be 404 but is %d\", response.StatusCode)\n\t}\n}", "func shouldExportAsHeadlessService(endpoints *corev1.Endpoints, log *logging.Entry) bool {\n\tfor _, subset := range endpoints.Subsets {\n\t\tfor _, addr := range subset.Addresses {\n\t\t\tif addr.Hostname != \"\" {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\tfor _, addr := range subset.NotReadyAddresses {\n\t\t\tif addr.Hostname != \"\" {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\tlog.Infof(\"Service %s/%s should not be exported as headless: no named addresses in its endpoints object\", endpoints.Namespace, endpoints.Name)\n\treturn false\n}", "func TestContextIsAccessible(t *testing.T) {\n\t// case 1: success\n\tsuccHand := func(w http.ResponseWriter, r *http.Request) {\n\t\ttoken := Token(r)\n\t\tif token == \"\" {\n\t\t\tt.Errorf(\"Token is inaccessible in the success handler\")\n\t\t}\n\t}\n\n\thand := New(http.HandlerFunc(succHand))\n\n\t// we need a request that passes. Let's just use a safe method for that.\n\treq := dummyGet()\n\twriter := httptest.NewRecorder()\n\n\thand.ServeHTTP(writer, req)\n}", "func TestMakeRequestThatGetsDenied(t *testing.T) {\n\tproxier := New()\n\tfor {\n\t\tresp, err := proxier.DoRequestRaw(context.Background(), \"GET\", gimmeproxy.GimmeProxyURL, nil)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error %s: \\n\", err.Error())\n\t\t\tbreak\n\t\t}\n\t\tfmt.Printf(\"Response code: %d\\n\", resp.StatusCode)\n\t}\n\n}", "func TestGet_HttpClientReturns404(t *testing.T) {\n\tfs := fixture()\n\tfs.Client.Status = http.StatusNotFound\n\t_, err := fs.Get(\"\")\n\tif !fs.IsNotExist(err) {\n\t\tt.Fatalf(\"Expected IsNotExist(err) to be true; err: %v\", err)\n\t}\n}", "func RunServer(server client.Service) {\n\tr := mux.NewRouter()\n\tr.HandleFunc(server.HealthCheck, func(w http.ResponseWriter, r *http.Request) {\n\t\t// an example API handler\n\t\tjson.NewEncoder(w).Encode(map[string]bool{\"ok\": true})\n\t}).Methods(\"GET\")\n\tsrv := &http.Server{\n\t\tHandler: r,\n\t\tAddr: fmt.Sprintf(\"%s:%d\", server.URL, server.Port),\n\t\tWriteTimeout: 15 * time.Second,\n\t\tReadTimeout: 15 * time.Second,\n\t}\n\tlog.Fatal(srv.ListenAndServe())\n}", "func SetUpService(webServiceEndpoint string,healthCheckEndpoint string,subscriptionServiceUrl string,googleSubscriptionsUrl string,clientId string, clientSecret string, callbackUrl string, issuer string, sessionKey string, cloudCommerceProcurementUrl string, partnerId string, finishUrl string, finishUrlTitle string, testMode string) error {\n\thandler := GetSubscriptionFrontendHandler(subscriptionServiceUrl,googleSubscriptionsUrl,clientId, clientSecret, callbackUrl, issuer, sessionKey, cloudCommerceProcurementUrl, partnerId, finishUrl, finishUrlTitle)\n\n\thealthCheck := mux.NewRouter()\n\thealthCheck.Methods(http.MethodGet).Path(\"/healthz\").HandlerFunc(handler.Healthz)\n\tgo http.ListenAndServe(\":\"+healthCheckEndpoint, healthCheck)\n\n\twebService := mux.NewRouter()\n\tif testModeBool,err := strconv.ParseBool(testMode); err==nil && testModeBool {\n\t\twebService.Methods(http.MethodGet).Path(\"/resetsaas\").HandlerFunc(handler.ResetSaas)\n\t\twebService.Methods(http.MethodGet).Path(\"/signupsaastest\").HandlerFunc(handler.SignupSaasTest)\n\t}\n\twebService.Methods(http.MethodGet).Path(\"/signupprod/{accountId}\").HandlerFunc(handler.SignupProd)\n\twebService.Methods(http.MethodPost).Path(\"/signupsaas\").HandlerFunc(handler.SignupSaas)\n\twebService.Methods(http.MethodGet).Path(\"/login\").HandlerFunc(handler.Auth0Login)\n\twebService.Methods(http.MethodGet).Path(\"/callback\").HandlerFunc(handler.Auth0Callback)\n\twebService.Methods(http.MethodPost).Path(\"/finishSaas\").HandlerFunc(handler.FinishSaas)\n\twebService.Methods(http.MethodPost).Path(\"/finishProd\").HandlerFunc(handler.FinishProd)\n\n\twebService.Methods(http.MethodGet).Path(\"/healthz\").HandlerFunc(handler.Healthz)\n\n\twebService.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.Redirect(w, r, \"https://www.cloudbees.com\", http.StatusFound)\n\t})\n\n\treturn http.ListenAndServe(\":\"+webServiceEndpoint, webService)\n}", "func TestMain(t *testing.T) {\n\tr, _ := http.NewRequest(\"GET\", \"/\", nil)\n\tw := httptest.NewRecorder()\n\tbeego.BeeApp.Handlers.ServeHTTP(w, r)\n\n\tbeego.Trace(\"testing\", \"TestMain\", \"Code[%d]\\n%s\", w.Code, w.Body.String())\n\n\tConvey(\"Subject: Test Station Endpoint\\n\", t, func() {\n\t\tConvey(\"Status Code Should Be 200\", func() {\n\t\t\tSo(w.Code, ShouldEqual, 200)\n\t\t})\n\t\tConvey(\"The Result Should Not Be Empty\", func() {\n\t\t\tSo(w.Body.Len(), ShouldBeGreaterThan, 0)\n\t\t})\n\t})\n}", "func (s *OmnibusTestSuite) httpOnly() {\n\tif s.sslEnabled {\n\t\ts.T().Skip(\"HTTP only test, skipping...\")\n\t}\n}", "func TestMakePublicService(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tsks *v1alpha1.ServerlessService\n\t\twant *corev1.Service\n\t}{{\n\t\tname: \"HTTP - serve\",\n\t\tsks: &v1alpha1.ServerlessService{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"melon\",\n\t\t\t\tName: \"collie\",\n\t\t\t\tUID: \"1982\",\n\t\t\t\t// Those labels are propagated from the Revision->PA.\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tserving.RevisionLabelKey: \"collie\",\n\t\t\t\t\tserving.RevisionUID: \"1982\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: v1alpha1.ServerlessServiceSpec{\n\t\t\t\tProtocolType: networking.ProtocolHTTP1,\n\t\t\t\tMode: v1alpha1.SKSOperationModeServe,\n\t\t\t},\n\t\t},\n\t\twant: &corev1.Service{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"melon\",\n\t\t\t\tName: \"collie\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t// Those should be propagated.\n\t\t\t\t\tserving.RevisionLabelKey: \"collie\",\n\t\t\t\t\tserving.RevisionUID: \"1982\",\n\t\t\t\t\tnetworking.SKSLabelKey: \"collie\",\n\t\t\t\t\tnetworking.ServiceTypeKey: \"Public\",\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{},\n\t\t\t\tOwnerReferences: []metav1.OwnerReference{{\n\t\t\t\t\tAPIVersion: v1alpha1.SchemeGroupVersion.String(),\n\t\t\t\t\tKind: \"ServerlessService\",\n\t\t\t\t\tName: \"collie\",\n\t\t\t\t\tUID: \"1982\",\n\t\t\t\t\tController: ptr.Bool(true),\n\t\t\t\t\tBlockOwnerDeletion: ptr.Bool(true),\n\t\t\t\t}},\n\t\t\t},\n\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\tPorts: []corev1.ServicePort{{\n\t\t\t\t\tName: networking.ServicePortNameHTTP1,\n\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\tPort: networking.ServiceHTTPPort,\n\t\t\t\t\tTargetPort: intstr.FromInt(networking.BackendHTTPPort),\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t}, {\n\t\tname: \"HTTP - proxy\",\n\t\tsks: &v1alpha1.ServerlessService{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"melon\",\n\t\t\t\tName: \"collie\",\n\t\t\t\tUID: \"1982\",\n\t\t\t\t// Those labels are propagated from the Revision->PA.\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tserving.RevisionLabelKey: \"collie\",\n\t\t\t\t\tserving.RevisionUID: \"1982\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: v1alpha1.ServerlessServiceSpec{\n\t\t\t\tMode: v1alpha1.SKSOperationModeProxy,\n\t\t\t\tProtocolType: networking.ProtocolHTTP1,\n\t\t\t},\n\t\t},\n\t\twant: &corev1.Service{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"melon\",\n\t\t\t\tName: \"collie\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t// Those should be propagated.\n\t\t\t\t\tserving.RevisionLabelKey: \"collie\",\n\t\t\t\t\tserving.RevisionUID: \"1982\",\n\t\t\t\t\tnetworking.SKSLabelKey: \"collie\",\n\t\t\t\t\tnetworking.ServiceTypeKey: \"Public\",\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{},\n\t\t\t\tOwnerReferences: []metav1.OwnerReference{{\n\t\t\t\t\tAPIVersion: v1alpha1.SchemeGroupVersion.String(),\n\t\t\t\t\tKind: \"ServerlessService\",\n\t\t\t\t\tName: \"collie\",\n\t\t\t\t\tUID: \"1982\",\n\t\t\t\t\tController: ptr.Bool(true),\n\t\t\t\t\tBlockOwnerDeletion: ptr.Bool(true),\n\t\t\t\t}},\n\t\t\t},\n\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\tPorts: []corev1.ServicePort{{\n\t\t\t\t\tName: networking.ServicePortNameHTTP1,\n\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\tPort: networking.ServiceHTTPPort,\n\t\t\t\t\tTargetPort: intstr.FromInt(networking.BackendHTTPPort),\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t}, {\n\t\tname: \"HTTP2 - serve\",\n\t\tsks: &v1alpha1.ServerlessService{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"siamese\",\n\t\t\t\tName: \"dream\",\n\t\t\t\tUID: \"1988\",\n\t\t\t\t// Those labels are propagated from the Revision->PA.\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tserving.RevisionLabelKey: \"dream\",\n\t\t\t\t\tserving.RevisionUID: \"1988\",\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"cherub\": \"rock\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: v1alpha1.ServerlessServiceSpec{\n\t\t\t\tProtocolType: networking.ProtocolH2C,\n\t\t\t\tMode: v1alpha1.SKSOperationModeServe,\n\t\t\t},\n\t\t},\n\t\twant: &corev1.Service{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"siamese\",\n\t\t\t\tName: \"dream\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t// Those should be propagated.\n\t\t\t\t\tserving.RevisionLabelKey: \"dream\",\n\t\t\t\t\tserving.RevisionUID: \"1988\",\n\t\t\t\t\tnetworking.SKSLabelKey: \"dream\",\n\t\t\t\t\tnetworking.ServiceTypeKey: \"Public\",\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"cherub\": \"rock\",\n\t\t\t\t},\n\t\t\t\tOwnerReferences: []metav1.OwnerReference{{\n\t\t\t\t\tAPIVersion: v1alpha1.SchemeGroupVersion.String(),\n\t\t\t\t\tKind: \"ServerlessService\",\n\t\t\t\t\tName: \"dream\",\n\t\t\t\t\tUID: \"1988\",\n\t\t\t\t\tController: ptr.Bool(true),\n\t\t\t\t\tBlockOwnerDeletion: ptr.Bool(true),\n\t\t\t\t}},\n\t\t\t},\n\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\tPorts: []corev1.ServicePort{{\n\t\t\t\t\tName: networking.ServicePortNameH2C,\n\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\tPort: networking.ServiceHTTP2Port,\n\t\t\t\t\tTargetPort: intstr.FromInt(networking.BackendHTTP2Port),\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t}, {\n\t\tname: \"HTTP2 - serve - no backends\",\n\t\tsks: &v1alpha1.ServerlessService{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"siamese\",\n\t\t\t\tName: \"dream\",\n\t\t\t\tUID: \"1988\",\n\t\t\t\t// Those labels are propagated from the Revision->PA.\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tserving.RevisionLabelKey: \"dream\",\n\t\t\t\t\tserving.RevisionUID: \"1988\",\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"cherub\": \"rock\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: v1alpha1.ServerlessServiceSpec{\n\t\t\t\tProtocolType: networking.ProtocolH2C,\n\t\t\t\tMode: v1alpha1.SKSOperationModeServe,\n\t\t\t},\n\t\t},\n\t\twant: &corev1.Service{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"siamese\",\n\t\t\t\tName: \"dream\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t// Those should be propagated.\n\t\t\t\t\tserving.RevisionLabelKey: \"dream\",\n\t\t\t\t\tserving.RevisionUID: \"1988\",\n\t\t\t\t\tnetworking.SKSLabelKey: \"dream\",\n\t\t\t\t\tnetworking.ServiceTypeKey: \"Public\",\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"cherub\": \"rock\",\n\t\t\t\t},\n\t\t\t\tOwnerReferences: []metav1.OwnerReference{{\n\t\t\t\t\tAPIVersion: v1alpha1.SchemeGroupVersion.String(),\n\t\t\t\t\tKind: \"ServerlessService\",\n\t\t\t\t\tName: \"dream\",\n\t\t\t\t\tUID: \"1988\",\n\t\t\t\t\tController: ptr.Bool(true),\n\t\t\t\t\tBlockOwnerDeletion: ptr.Bool(true),\n\t\t\t\t}},\n\t\t\t},\n\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\tPorts: []corev1.ServicePort{{\n\t\t\t\t\tName: networking.ServicePortNameH2C,\n\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\tPort: networking.ServiceHTTP2Port,\n\t\t\t\t\tTargetPort: intstr.FromInt(networking.BackendHTTP2Port),\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t}, {\n\t\tname: \"HTTP2 - proxy\",\n\t\tsks: &v1alpha1.ServerlessService{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"siamese\",\n\t\t\t\tName: \"dream\",\n\t\t\t\tUID: \"1988\",\n\t\t\t\t// Those labels are propagated from the Revision->PA.\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tserving.RevisionLabelKey: \"dream\",\n\t\t\t\t\tserving.RevisionUID: \"1988\",\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"cherub\": \"rock\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: v1alpha1.ServerlessServiceSpec{\n\t\t\t\tProtocolType: networking.ProtocolH2C,\n\t\t\t\tMode: v1alpha1.SKSOperationModeProxy,\n\t\t\t},\n\t\t},\n\t\twant: &corev1.Service{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: \"siamese\",\n\t\t\t\tName: \"dream\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t// Those should be propagated.\n\t\t\t\t\tserving.RevisionLabelKey: \"dream\",\n\t\t\t\t\tserving.RevisionUID: \"1988\",\n\t\t\t\t\tnetworking.SKSLabelKey: \"dream\",\n\t\t\t\t\tnetworking.ServiceTypeKey: \"Public\",\n\t\t\t\t},\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"cherub\": \"rock\",\n\t\t\t\t},\n\t\t\t\tOwnerReferences: []metav1.OwnerReference{{\n\t\t\t\t\tAPIVersion: v1alpha1.SchemeGroupVersion.String(),\n\t\t\t\t\tKind: \"ServerlessService\",\n\t\t\t\t\tName: \"dream\",\n\t\t\t\t\tUID: \"1988\",\n\t\t\t\t\tController: ptr.Bool(true),\n\t\t\t\t\tBlockOwnerDeletion: ptr.Bool(true),\n\t\t\t\t}},\n\t\t\t},\n\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\tPorts: []corev1.ServicePort{{\n\t\t\t\t\tName: networking.ServicePortNameH2C,\n\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\tPort: networking.ServiceHTTP2Port,\n\t\t\t\t\tTargetPort: intstr.FromInt(networking.BackendHTTP2Port),\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t}}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tgot := MakePublicService(test.sks)\n\t\t\tif diff := cmp.Diff(test.want, got); diff != \"\" {\n\t\t\t\tt.Errorf(\"Public K8s Service mismatch (-want, +got) = %v\", diff)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestHttpServer(t *testing.T) {\n\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintln(w, \"Hello golang test server\")\n\t}))\n\tdefer ts.Close()\n\n\tres, err := http.Get(ts.URL)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgreeting, err := ioutil.ReadAll(res.Body)\n\tres.Body.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s\", greeting)\n}", "func mockURLChecker(url string) bool {\n\tdelay, ok := validURLs[url]\n\tif ok {\n\t\ttime.Sleep(delay)\n\t} else {\n\t\ttime.Sleep(timeout)\n\t}\n\treturn ok\n}", "func TestInitAPIServer(t *testing.T) {\n\tts := initAPITestServer(t)\n\tdefer test.CloseServer(ts)\n\n\tif ts.URL != apiServer.GetUrl() {\n\t\tt.Fatalf(\"Expected API server URL %s, got %s\", ts.URL, apiServer.GetUrl())\n\t}\n\n\tif ts.URL != apiServer.GetUrl() {\n\t\tt.Fatalf(\"Expected API server URL %s, got %s\", ts.URL, apiServer.GetUrl())\n\t}\n\n\tcacheNames := []string{\"userid\", \"submissions\", \"problem\"}\n\tfor _, s := range cacheNames {\n\t\tif _, ok := cache[s]; !ok {\n\t\t\tt.Fatalf(\"Error cache: %v not found\", s)\n\t\t}\n\t}\n\tif _, ok := cache[\"notvalid\"]; ok {\n\t\tt.Fatalf(\"Error cache: %v not expected\", \"notvalid\")\n\t}\n}", "func TestServerReturnBadCode(t *testing.T) {\n\ttestServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tw.Write([]byte(`{}`))\n\t}))\n\t_, err := sendMessage(testServer.URL, \"[email protected]\", \"test\")\n\tif err == nil {\n\t\tt.Fail()\n\t}\n}", "func Test_MockServer(t *testing.T) {\n\t//创建一个模拟的服务器\n\tserver := MockServer()\n\tdefer server.Close()\n\t//Get请求发往模拟服务器的地址\n\tresq, err := http.Get(server.URL)\n\tif err != nil {\n\t\tt.Fatal(\"创建Get失败\")\n\t}\n\tdefer resq.Body.Close()\n\n\tlog.Println(\"code:\", resq.StatusCode)\n\tjson, err := ioutil.ReadAll(resq.Body)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Printf(\"body:%s\\n\", json)\n}", "func TestIndexHandler(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(index))\n\tdefer ts.Close()\n\n\tres, err := http.Get(ts.URL)\n\tif err != nil {\n\t\tt.Errorf(\"Error Getting Index: %s\", err)\n\t}\n\n\tif res.StatusCode != 200 {\n\t\tt.Errorf(\"Expected 200 | Got: %v\", res.StatusCode)\n\t}\n}", "func TestLivenessHttp(c *client.Client) bool {\n\treturn runLivenessTest(c, \"http-liveness.yaml\")\n}", "func TestStaticFileServer(t *testing.T) {\n\tr := newRouter()\n\tmockServer := httptest.NewServer(r)\n\n\t//We want to go in `GET /assets/` route to get the index.html file response\n\tresp, err := http.Get(mockServer.URL + \"/assets/\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tt.Errorf(\"Status should be 200, got %d\", resp.StatusCode) //Keeping status '200' i.e. OK\n\t}\n\n\tcontentType := resp.Header.Get(\"Content-Type\") //HTML File can be huge to test so we do testing on the\n\texpectedContentType := \"text/html; charset=utf-8\" //content-type header is \"text/html; charset=utf-8\" making sure that an html file has been served\n\n\tif expectedContentType != contentType {\n\t\tt.Errorf(\"Wrong content type, expected %s, got %s\", expectedContentType, contentType) //checking if there is expected content we wanted or not\n\t}\n\n}", "func waitServerReady(t *testing.T, addr string) {\n\tfor i := 0; i < 50; i++ {\n\t\t_, err := http.DefaultClient.Get(addr)\n\t\t// assume server ready when no err anymore\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\tcontinue\n\t}\n}", "func (g GenericService) Serve() {\n g.init()\n glog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%d\", port), nil))\n}", "func TestSmoke(t *testing.T) {\n\ta := assert.New(t)\n\tconst backendCount = 4\n\tconst payloadSize = 4096\n\tconst requestCount = 64\n\tconst requestWorkers = 8\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Minute)\n\tdefer cancel()\n\n\tbackends := make([]*CharGenServer, backendCount)\n\ttier := config.Tier{\n\t\tDialFailureTimeout: 10 * time.Second,\n\t}\n\tfor i := range backends {\n\t\tvar err error\n\t\tbackends[i], err = startChargen(ctx, payloadSize)\n\t\tif !a.NoError(err) {\n\t\t\treturn\n\t\t}\n\t\ttier.Targets = append(tier.Targets, config.Target{\n\t\t\tHosts: []string{backends[i].Addr.IP.String()},\n\t\t\tPort: backends[i].Addr.Port,\n\t\t\tProto: config.TCP,\n\t\t})\n\t}\n\n\tcfg := &config.Config{\n\t\tFrontends: []config.Frontend{\n\t\t\t{\n\t\t\t\tRebalanceDuration: time.Millisecond,\n\t\t\t\tBackendPool: config.BackendPool{\n\t\t\t\t\t// Disable extra pings so our request counts are correct.\n\t\t\t\t\tLatencyBucket: -1,\n\t\t\t\t\tTiers: []config.Tier{tier},\n\t\t\t\t},\n\t\t\t\tBindAddress: \":13013\",\n\t\t\t\tIdleDuration: time.Minute,\n\t\t\t},\n\t\t},\n\t}\n\tif !a.NoError(cfg.Validate()) {\n\t\treturn\n\t}\n\n\tfe := frontend.Frontend{}\n\tif !a.NoError(fe.Ensure(ctx, cfg)) {\n\t\treturn\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(requestWorkers)\n\tvar remainingRequests = int32(requestCount)\n\tfor i := 0; i < requestWorkers; i++ {\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfor {\n\t\t\t\tif atomic.AddInt32(&remainingRequests, -1) < 0 {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tconn, err := net.Dial(\"tcp\", \"127.0.0.1:13013\")\n\t\t\t\tif !a.NoError(err) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcount, err := io.Copy(ioutil.Discard, conn)\n\t\t\t\t_ = conn.Close()\n\t\t\t\ta.NoError(err)\n\t\t\t\ta.Equal(payloadSize, int(count))\n\t\t\t}\n\t\t}()\n\t}\n\twg.Wait()\n\tfe.Wait()\n\n\t// Ensure that the total number of requests was made.\n\tcount := uint64(0)\n\tfor _, cg := range backends {\n\t\tcount += cg.ConnectionCount()\n\t}\n\ta.Equal(requestCount, int(count))\n\n\tdata, err := yaml.Marshal(&fe)\n\ta.NoError(err)\n\tlog.Print(string(data))\n\n\t// Test tearing down a frontend.\n\tcfg.Frontends = []config.Frontend{}\n\ta.NoError(fe.Ensure(ctx, cfg))\n\t_, err = net.Dial(\"tcp\", \"127.0.0.1:13013\")\n\ta.Errorf(err, \"connection refused\")\n}", "func TestGetFailure1(t *testing.T) {\n\tisTesting = true\n\n\tvar params = make(map[string]string)\n\tparams[\"id\"] = \"\"\n\n\tvar request = Request{\n\t\tPath: \"/api/devices\",\n\t\tPathParameters: params,\n\t\tHTTPMethod: \"GET\",\n\t}\n\tvar response, _ = Handler(request)\n\n\tif response.StatusCode != 404 {\n\t\tt.Errorf(\"response status code has to be 404 but is %d\", response.StatusCode)\n\t}\n}", "func (s *Service) Run() error {\n s.pipeline = s.pipeline.Add(HandlerFunc(s.routeRequest))\n \n server := &http.Server{\n Addr: s.port,\n Handler: s,\n ReadTimeout: s.readTimeout,\n WriteTimeout: s.writeTimeout,\n IdleTimeout: s.idleTimeout,\n }\n \n alt.Debugf(\"%s: Listening on %v\", s.name, s.port)\n return server.ListenAndServe()\n}", "func (s) TestServeBootstrapFailure(t *testing.T) {\n\t// Since we have not setup fakes for anything, this will attempt to do real\n\t// xDS bootstrap and that will fail because the bootstrap environment\n\t// variable is not set.\n\tserver := NewGRPCServer()\n\tdefer server.Stop()\n\n\tlocalAddr, err := xdstestutils.AvailableHostPort()\n\tif err != nil {\n\t\tt.Fatalf(\"testutils.AvailableHostPort() failed: %v\", err)\n\t}\n\n\tserveDone := testutils.NewChannel()\n\tgo func() {\n\t\terr := server.Serve(ServeOptions{Address: localAddr})\n\t\tserveDone.Send(err)\n\t}()\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tv, err := serveDone.Receive(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"error when waiting for Serve() to exit: %v\", err)\n\t}\n\tif err, ok := v.(error); !ok || err == nil {\n\t\tt.Fatal(\"Serve() did not exit with error\")\n\t}\n}", "func TestValidNoticesEndpoint(t *testing.T) {\n\t// Initialize the database connection\n\tmodels.InitDB()\n\n\t// Make the request\n\trequest, _ := http.NewRequest(\"GET\", \"/notices/1\", nil)\n\tresponse := httptest.NewRecorder()\n\tRouterNotice().ServeHTTP(response, request)\n\n\tvar resp map[string]interface{}\n\tjson.NewDecoder(response.Body).Decode(&resp)\n\n\t// Check if what we wanted is what we got\n\tassert.Equal(t, float64(http.StatusOK), resp[\"status_code\"], \"OK response is expected\")\n}", "func (r *Responder) ServiceUnavailable() { r.write(http.StatusServiceUnavailable) }", "func serve(p *Pylon, port int, healthRoute string) {\r\n\tmux := http.NewServeMux()\r\n\tmux.Handle(\"/\", NewPylonHandler(p))\r\n\tmux.Handle(healthRoute, NewPylonHealthHandler(p))\r\n\tserver := &http.Server{\r\n\t\tAddr: \":\" + strconv.Itoa(port),\r\n\t\tHandler: mux,\r\n\t\tReadTimeout: 20 * time.Second,\r\n\t\tWriteTimeout: 20 * time.Second,\r\n\t\tMaxHeaderBytes: 1 << 20,\r\n\t}\r\n\r\n\tfor _, s := range p.Services {\r\n\t\tlogDebug(\"Starting initial health check of service: \" + s.Name)\r\n\t\td := &net.Dialer{\r\n\t\t\tTimeout: defaultDialerTimeout,\r\n\t\t}\r\n\t\tif s.HealthCheck.DialTO != 0 {\r\n\t\t\td.Timeout = time.Second * time.Duration(s.HealthCheck.DialTO)\r\n\t\t}\r\n\t\t// Do an initial health check\r\n\t\tgo handleHealthCheck(s, d)\r\n\r\n\t\tif s.HealthCheck.Enabled {\r\n\t\t\tgo startPeriodicHealthCheck(s, time.Second * time.Duration(s.HealthCheck.Interval), d)\r\n\t\t\tlogDebug(\"Periodic Health checks started for service: \" + s.Name)\r\n\t\t}\r\n\t}\r\n\r\n\tlogInfo(\"Serving on \" + strconv.Itoa(port))\r\n\tserver.ListenAndServe()\r\n}", "func TestHealthHandler(t *testing.T) {\n\t// clear out existing checks.\n\tDefaultRegistry = NewRegistry()\n\n\t// protect an http server\n\thandler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(http.StatusNoContent)\n\t}))\n\n\t// wrap it in our health handler\n\thandler = Handler(handler)\n\n\t// use this swap check status\n\tupdater := NewStatusUpdater()\n\tRegister(\"test_check\", updater)\n\n\t// now, create a test server\n\tserver := httptest.NewServer(handler)\n\n\tcheckUp := func(t *testing.T, message string) {\n\t\tresp, err := http.Get(server.URL)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"error getting success status: %v\", err)\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tif resp.StatusCode != http.StatusNoContent {\n\t\t\tt.Fatalf(\"unexpected response code from server when %s: %d != %d\", message, resp.StatusCode, http.StatusNoContent)\n\t\t}\n\t\t// NOTE(stevvooe): we really don't care about the body -- the format is\n\t\t// not standardized or supported, yet.\n\t}\n\n\tcheckDown := func(t *testing.T, message string) {\n\t\tresp, err := http.Get(server.URL)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"error getting down status: %v\", err)\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tif resp.StatusCode != http.StatusServiceUnavailable {\n\t\t\tt.Fatalf(\"unexpected response code from server when %s: %d != %d\", message, resp.StatusCode, http.StatusServiceUnavailable)\n\t\t}\n\t}\n\n\t// server should be up\n\tcheckUp(t, \"initial health check\")\n\n\t// now, we fail the health check\n\tupdater.Update(fmt.Errorf(\"the server is now out of commission\"))\n\tcheckDown(t, \"server should be down\") // should be down\n\n\t// bring server back up\n\tupdater.Update(nil)\n\tcheckUp(t, \"when server is back up\") // now we should be back up.\n}", "func TestHealthCheckHandler(t *testing.T) {\n req, err := http.NewRequest(\"GET\", \"/healthcheck\", nil)\n if err != nil {\n t.Fatal(err)\n }\n s := server{\n router : httprouter.New(),\n }\n\n rr := httptest.NewRecorder()\n handler := s.HealthCheckHandler()\n\n s.router.GET(\"/healthcheck\", handler)\n\n s.router.ServeHTTP(rr, req)\n\n // Check if the status code and response body are the one expected.\n if status := rr.Code; status != http.StatusOK {\n t.Errorf(\"handler returned wrong status code: got %v want %v\",\n status, http.StatusOK)\n }\n expected := `{\"alive\": true}`\n if rr.Body.String() != expected {\n t.Errorf(\"handler returned unexpected body: got %v want %v\",\n rr.Body.String(), expected)\n }\n}", "func (fsm *DeployFSMContext) checkServiceReady() (bool, error) {\n\truntime := fsm.Runtime\n\t// do not check if nil for compatibility\n\tif fsm.Deployment.Extra.ServicePhaseStartAt != nil {\n\t\tstartCheckPoint := fsm.Deployment.Extra.ServicePhaseStartAt.Add(30 * time.Second)\n\t\tif time.Now().Before(startCheckPoint) {\n\t\t\tfsm.pushLog(fmt.Sprintf(\"checking too early, delay to: %s\", startCheckPoint.String()))\n\t\t\t// too early to check\n\t\t\treturn false, nil\n\t\t}\n\t}\n\tisReplicasZero := false\n\tfor _, s := range fsm.Spec.Services {\n\t\tif s.Deployments.Replicas == 0 {\n\t\t\tisReplicasZero = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif isReplicasZero {\n\t\tfsm.pushLog(\"checking status by inspect\")\n\t\t// we do double check to prevent `fake Healthy`\n\t\t// runtime.ScheduleName must have\n\t\tsg, err := fsm.getServiceGroup()\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn sg.Status == \"Ready\" || sg.Status == \"Healthy\", nil\n\t}\n\n\t// 获取addon状态\n\tserviceGroup, err := fsm.getServiceGroup()\n\tif err != nil {\n\t\tfsm.pushLog(fmt.Sprintf(\"获取service状态失败,%s\", err.Error()))\n\t\treturn false, nil\n\t}\n\tfsm.pushLog(fmt.Sprintf(\"checking status: %s, servicegroup: %v\", serviceGroup.Status, runtime.ScheduleName))\n\t// 如果状态是failed,说明服务或者job运行失败\n\tif serviceGroup.Status == apistructs.StatusFailed {\n\t\treturn false, errors.New(serviceGroup.LastMessage)\n\t}\n\t// 如果状态是ready或者healthy,说明服务已经发起来了\n\truntimeStatus := apistructs.RuntimeStatusUnHealthy\n\tif serviceGroup.Status == apistructs.StatusReady || serviceGroup.Status == apistructs.StatusHealthy {\n\t\truntimeStatus = apistructs.RuntimeStatusHealthy\n\t}\n\truntimeItem := fsm.Runtime\n\tif runtimeItem.Status != runtimeStatus {\n\t\truntimeItem.Status = runtimeStatus\n\t\tif err := fsm.db.UpdateRuntime(runtime); err != nil {\n\t\t\tlogrus.Errorf(\"failed to update runtime status changed, runtime: %v, err: %v\", runtime.ID, err.Error())\n\t\t\treturn false, nil\n\t\t}\n\t}\n\tif runtimeStatus == apistructs.RuntimeStatusHealthy {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}", "func isAppAvailable(t *testing.T, healthCheckEndPoint string) bool {\n\tclient := &http.Client{}\n\tresp, err := client.Get(healthCheckEndPoint)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get a response from health probe: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\treturn resp.StatusCode == http.StatusNoContent\n}", "func TestAllowedHostsEvent(t *testing.T) {\n\tvar err error\n\terr = testcert.GenerateCert(\"mail2.guerrillamail.com\", \"\", 365*24*time.Hour, false, 2048, \"P256\", \"../../tests/\")\n\tif err != nil {\n\t\tt.Error(\"failed to generate a test certificate\", err)\n\t\tt.FailNow()\n\t}\n\tdefer cleanTestArtifacts(t)\n\tmainlog, err = getTestLog()\n\tif err != nil {\n\t\tt.Error(\"could not get logger,\", err)\n\t\tt.FailNow()\n\t}\n\tif err := ioutil.WriteFile(\"configJsonD.json\", []byte(configJsonD), 0644); err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\t// start the server by emulating the serve command\n\n\tconf := &guerrilla.AppConfig{} // blank one\n\tif err = conf.Load([]byte(configJsonD)); err != nil { // load configJsonD\n\t\tt.Error(err)\n\t}\n\tcmd := &cobra.Command{}\n\tconfigPath = \"configJsonD.json\"\n\n\tgo func() {\n\t\tserve(cmd, []string{})\n\t}()\n\t// wait for start\n\tif _, err := grepTestlog(\"Listening on TCP 127.0.0.1:2552\", 0); err != nil {\n\t\tt.Error(\"server didn't start\")\n\t}\n\n\t// now connect and try RCPT TO with an invalid host\n\tif conn, buffin, err := test.Connect(conf.Servers[1], 20); err != nil {\n\t\tt.Error(\"Could not connect to new server\", conf.Servers[1].ListenInterface, err)\n\t} else {\n\t\tif result, err := test.Command(conn, buffin, \"HELO example.com\"); err == nil {\n\t\t\texpect := \"250 secure.test.com Hello\"\n\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\tt.Error(\"Expected\", expect, \"but got\", result)\n\t\t\t} else {\n\t\t\t\tif result, err = test.Command(conn, buffin, \"RCPT TO:<[email protected]>\"); err == nil {\n\t\t\t\t\texpect := \"454 4.1.1 Error: Relay access denied: grr.la\"\n\t\t\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\t\t\tt.Error(\"Expected:\", expect, \"but got:\", result)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t_ = conn.Close()\n\t}\n\n\t// now change the config by adding a host to allowed hosts\n\n\tnewConf := conf\n\tnewConf.AllowedHosts = append(newConf.AllowedHosts, \"grr.la\")\n\tif jsonbytes, err := json.Marshal(newConf); err == nil {\n\t\tif err = ioutil.WriteFile(\"configJsonD.json\", jsonbytes, 0644); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t} else {\n\t\tt.Error(err)\n\t}\n\t// send a sighup signal to the server to reload config\n\tsigHup()\n\n\tif _, err := grepTestlog(\"allowed_hosts config changed\", 0); err != nil {\n\t\tt.Error(\"allowed_hosts config not changed\")\n\t\tt.FailNow()\n\t}\n\n\t// now repeat the same conversion, RCPT TO should be accepted\n\tif conn, buffin, err := test.Connect(conf.Servers[1], 20); err != nil {\n\t\tt.Error(\"Could not connect to new server\", conf.Servers[1].ListenInterface, err)\n\t} else {\n\t\tif result, err := test.Command(conn, buffin, \"HELO example.com\"); err == nil {\n\t\t\texpect := \"250 secure.test.com Hello\"\n\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\tt.Error(\"Expected\", expect, \"but got\", result)\n\t\t\t} else {\n\t\t\t\tif result, err = test.Command(conn, buffin, \"RCPT TO:<[email protected]>\"); err == nil {\n\t\t\t\t\texpect := \"250 2.1.5 OK\"\n\t\t\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\t\t\tt.Error(\"Expected:\", expect, \"but got:\", result)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t_ = conn.Close()\n\t}\n\n\t// shutdown wait for exit\n\td.Shutdown()\n\n\t// wait for shutdown\n\tif _, err := grepTestlog(\"Backend shutdown completed\", 0); err != nil {\n\t\tt.Error(\"server didn't stop\")\n\t}\n\n}", "func TestCheckHealth(t *testing.T) {\n\ttd := newTestData(t, \"#!/bin/true\")\n\tdefer td.Close()\n\n\tif _, err := td.Get(\"/check_health\"); err != nil {\n\t\tt.Error(\"Checking devserver health failed: \", err)\n\t}\n}", "func (s *ServerSuite) TestServerHTTPSAutoCertInvalid(c *C) {\n\te := testutils.NewHandler(func(w http.ResponseWriter, _ *http.Request) {\n\t\tw.Write([]byte(\"hi https\"))\n\t})\n\tdefer e.Close()\n\n\t// Start an ACME Stub server locally that serves certificates for \"example.org\"\n\tman := &autocert.Manager{\n\t\tPrompt: autocert.AcceptTOS,\n\t}\n\turl, finish := startACMEServerStub(c, man, \"example.org\", \"\")\n\tdefer finish()\n\n\t// Create a Host definition for non-example.org, with an AutoCert setting that points to local stub URL\n\t// (obtained from the stub start above), which only accepts requests for example.org\n\tb := MakeBatch(Batch{\n\t\tHost: \"non-example.com\",\n\t\tAddr: \"localhost:41000\",\n\t\tRoute: `Path(\"/\")`,\n\t\tURL: e.URL,\n\t\tProtocol: engine.HTTPS,\n\t\tAutoCert: &engine.AutoCertSettings{\n\t\t\tDirectoryURL: url,\n\t\t},\n\t})\n\n\tc.Assert(s.mux.UpsertHost(b.H), IsNil)\n\tc.Assert(s.mux.UpsertServer(b.BK, b.S), IsNil)\n\tc.Assert(s.mux.UpsertFrontend(b.F), IsNil)\n\tc.Assert(s.mux.UpsertListener(b.L), IsNil)\n\n\tc.Assert(s.mux.Start(), IsNil)\n\n\t// Ensure that an error is returned when a certificate generation is attempted.\n\t_, _, err := testutils.Get(b.FrontendURL(\"/\"), testutils.Host(\"example.com\"))\n\tc.Assert(err, NotNil)\n}", "func Test_DeviceService_Get_EmptyIP(t *testing.T) {\n\ts := DeviceService{}\n\t_, err := s.Get(\"\")\n\tassert.Error(t, err)\n}", "func MockDragonflyProvider() *httptest.Server {\n\treturn httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch r.RequestURI {\n\t\tcase healthCheckEndpoint:\n\t\t\tif r.Method != http.MethodGet {\n\t\t\t\tw.WriteHeader(http.StatusNotImplemented)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\tcase preheatEndpoint:\n\t\t\tif r.Method != http.MethodPost {\n\t\t\t\tw.WriteHeader(http.StatusNotImplemented)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tdata, err := io.ReadAll(r.Body)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\t_, _ = w.Write([]byte(err.Error()))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\timage := &PreheatImage{}\n\t\t\tif err := json.Unmarshal(data, image); err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\t_, _ = w.Write([]byte(err.Error()))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif image.ImageName == \"\" {\n\t\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif _, ok := preheatMap[image.Digest]; ok {\n\t\t\t\tw.WriteHeader(http.StatusAlreadyReported)\n\t\t\t\t_, _ = w.Write([]byte(`{\"ID\":\"\"}`))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tpreheatMap[image.Digest] = struct{}{}\n\n\t\t\tif image.Type == \"image\" &&\n\t\t\t\timage.URL == \"https://harbor.com\" &&\n\t\t\t\timage.ImageName == \"busybox\" &&\n\t\t\t\timage.Tag == \"latest\" {\n\t\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t\t_, _ = w.Write([]byte(`{\"ID\":\"dragonfly-id\"}`))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tcase strings.Replace(preheatTaskEndpoint, \"{task_id}\", \"dragonfly-id\", 1):\n\t\t\tif r.Method != http.MethodGet {\n\t\t\t\tw.WriteHeader(http.StatusNotImplemented)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstatus := &dragonflyPreheatInfo{\n\t\t\t\tID: \"dragonfly-id\",\n\t\t\t\tStartTime: time.Now().UTC().String(),\n\t\t\t\tFinishTime: time.Now().Add(5 * time.Minute).UTC().String(),\n\t\t\t\tStatus: \"SUCCESS\",\n\t\t\t}\n\t\t\tbytes, _ := json.Marshal(status)\n\t\t\t_, _ = w.Write(bytes)\n\t\tcase strings.Replace(preheatTaskEndpoint, \"{task_id}\", \"preheat-job-exist-with-no-id\", 1):\n\t\t\tif r.Method != http.MethodGet {\n\t\t\t\tw.WriteHeader(http.StatusNotImplemented)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstatus := &dragonflyPreheatInfo{\n\t\t\t\tID: \"preheat-exist-with-no-id\",\n\t\t\t\tStartTime: time.Now().UTC().String(),\n\t\t\t\tFinishTime: time.Now().Add(5 * time.Minute).UTC().String(),\n\t\t\t\tStatus: \"FAILED\",\n\t\t\t\tErrorMsg: \"{\\\"Code\\\":208,\\\"Msg\\\":\\\"preheat task already exists, id:\\\"}\",\n\t\t\t}\n\t\t\tbytes, _ := json.Marshal(status)\n\t\t\t_, _ = w.Write(bytes)\n\t\tcase strings.Replace(preheatTaskEndpoint, \"{task_id}\", \"preheat-job-normal-failed\", 1):\n\t\t\tif r.Method != http.MethodGet {\n\t\t\t\tw.WriteHeader(http.StatusNotImplemented)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstatus := &dragonflyPreheatInfo{\n\t\t\t\tID: \"preheat-job-exist-with-id-1\",\n\t\t\t\tStartTime: time.Now().UTC().String(),\n\t\t\t\tFinishTime: time.Now().Add(5 * time.Minute).UTC().String(),\n\t\t\t\tStatus: \"FAILED\",\n\t\t\t\tErrorMsg: \"{\\\"Code\\\":208,\\\"Msg\\\":\\\"some msg\\\"}\",\n\t\t\t}\n\t\t\tbytes, _ := json.Marshal(status)\n\t\t\t_, _ = w.Write(bytes)\n\t\tcase strings.Replace(preheatTaskEndpoint, \"{task_id}\", \"preheat-job-exist-with-id-1\", 1):\n\t\t\tif r.Method != http.MethodGet {\n\t\t\t\tw.WriteHeader(http.StatusNotImplemented)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstatus := &dragonflyPreheatInfo{\n\t\t\t\tID: \"preheat-job-exist-with-id-1\",\n\t\t\t\tStartTime: time.Now().UTC().String(),\n\t\t\t\tFinishTime: time.Now().Add(5 * time.Minute).UTC().String(),\n\t\t\t\tStatus: \"FAILED\",\n\t\t\t\tErrorMsg: \"{\\\"Code\\\":208,\\\"Msg\\\":\\\"preheat task already exists, id:preheat-job-exist-with-id-1-1\\\"}\",\n\t\t\t}\n\t\t\tbytes, _ := json.Marshal(status)\n\t\t\t_, _ = w.Write(bytes)\n\t\tcase strings.Replace(preheatTaskEndpoint, \"{task_id}\", \"preheat-job-exist-with-id-1-1\", 1):\n\t\t\tif r.Method != http.MethodGet {\n\t\t\t\tw.WriteHeader(http.StatusNotImplemented)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tcase strings.Replace(preheatTaskEndpoint, \"{task_id}\", \"preheat-job-err-body-json\", 1):\n\t\t\tif r.Method != http.MethodGet {\n\t\t\t\tw.WriteHeader(http.StatusNotImplemented)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbodyStr := \"\\\"err body\\\"\"\n\t\t\t_, _ = w.Write([]byte(bodyStr))\n\t\tdefault:\n\t\t\tw.WriteHeader(http.StatusNotImplemented)\n\t\t}\n\t}))\n}", "func HTTP(healthService string, httpTimeout time.Duration) bool {\n\tclient := http.Client{\n\t\tTimeout: httpTimeout,\n\t}\n\n\tresp, err := client.Get(healthService)\n\t// Check if response timeouts or returns an HTTP error\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer resp.Body.Close()\n\tbytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tif strings.Contains(string(bytes), \"healthy\") {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func TestPlainHTTPServer(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\ts, _, _ := serverutils.StartServer(t, base.TestServerArgs{\n\t\t// The default context uses embedded certs.\n\t\tInsecure: true,\n\t})\n\tdefer s.Stopper().Stop()\n\tts := s.(*TestServer)\n\n\thttpClient, err := s.GetHTTPClient()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\thttpURL := \"http://\" + ts.Ctx.HTTPAddr + healthPath\n\tif resp, err := httpClient.Get(httpURL); err != nil {\n\t\tt.Fatalf(\"error requesting health at %s: %s\", httpURL, err)\n\t} else {\n\t\tdefer resp.Body.Close()\n\t\tvar data serverpb.HealthResponse\n\t\tif err := jsonpb.Unmarshal(resp.Body, &data); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n\n\thttpsURL := \"https://\" + ts.Ctx.HTTPAddr + healthPath\n\tif _, err := httpClient.Get(httpsURL); err == nil {\n\t\tt.Fatalf(\"unexpected success fetching %s\", httpsURL)\n\t}\n}", "func TestConsoleService(t *testing.T) {\n\tc := new(ConsoleService)\n\n\tc.Started()\n\tc.Stopped()\n\tc.Error(\"Error msg\")\n\tc.Init()\n\tc.Close()\n}", "func TestGetDataFromUrlNon200HttpCode(t *testing.T) {\n\tdefer gock.Off()\n\n\tapiUrl := \"http://example.com\"\n\tapiPath := \"status\"\n\n\tgock.New(apiUrl).\n\t\tGet(apiPath).\n\t\tReply(201).\n\t\tBodyString(\"\")\n\n\t_, err := getDataFromURL(apiUrl+\"/\"+apiPath, ioutil.ReadAll)\n\n\tassert.Error(t, err)\n}", "func httpUnavailable(w http.ResponseWriter) bool {\n\tif dvid.RequestsOK() {\n\t\treturn false\n\t}\n\thttp.Error(w, \"DVID server is unavailable.\", http.StatusServiceUnavailable)\n\treturn true\n}", "func initAPITestServerInvalid(t *testing.T, response []string) *httptest.Server {\n\tts := test.InitAPITestServerInvalid(t, response)\n\tInitAPIServer(ts.URL)\n\treturn ts\n}", "func TestApi_ListMotorcycles_Empty(t *testing.T) {\n\n\t// ARRANGE\n\n\t// Configure the application...\n\troles := map[authorizationrole.AuthorizationRole]bool{\n\t\tauthorizationrole.AdminAuthorizationRole: true,\n\t}\n\n\tauthService, _ := security.NewAuthService(true, roles)\n\tmotorcycleRepository, _ := repository.NewMotorcycleRepository()\n\trouter := httprouter.New()\n\n\t// Create an instance of the API web service.\n\tourApi, err := NewApi(roles, authService, motorcycleRepository, router)\n\tif err != nil {\n\t\tprintln(\"Failed to create an instance of the API web service: &s\", err.Error())\n\t\treturn\n\t}\n\n\t// An http handler wrapper around httprouter's handler. It permits us to use\n\t// the test server and httpExpected.\n\thandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tourApi.ListMotorcyclesHandler(w, r, httprouter.Params{})\n\t})\n\n\tserver := httptest.NewServer(handler)\n\tdefer server.Close()\n\n\t// ACT\n\tresp, err := http.Get(server.URL)\n\n\t// ASSERT\n\tassert.True(t, resp.StatusCode == http.StatusOK)\n}", "func doNotAllowedTest(t *testing.T, method string, uri string, expected_allow_header string) {\n\tclient := testHttpClient()\n\treq, err := http.NewRequest(method, testServer.URL+uri, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif res.StatusCode != 405 {\n\t\tt.Errorf(\"%s %s : Expected HTTP Status Code 405, got %d\\n\", method, uri, res.StatusCode)\n\t}\n\tif res.Header.Get(\"Allow\") == \"\" {\n\t\tt.Errorf(\"%s %s : Expected an 'Allow' HTTP response header, none found\\n\", method, uri)\n\t}\n\tif res.Header.Get(\"Allow\") != expected_allow_header {\n\t\tt.Errorf(\"%s %s : Expected 'Allow' HTTP response header of %s, got %s\\n\", method, uri, expected_allow_header, res.Header.Get(\"Allow\"))\n\t}\n}", "func TestNewServer(t *testing.T) {\n\tserver := NewServer(\"\", 8000)\n\tif server.Port != 8000 || server.Host != \"\" {\n\t\tt.Error(\"Server did not construct properly.\")\n\t}\n\n\tif len(server.repo.GetAll()) != 0 {\n\t\tt.Error(\"Expected session list to be empty\")\n\t}\n}", "func checkEnvoyStats(host string, port uint16) error {\n\tstate, ws, err := util.GetReadinessStats(host, port)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get readiness stats: %v\", err)\n\t}\n\n\tif state != nil && admin.ServerInfo_State(*state) != admin.ServerInfo_LIVE {\n\t\treturn fmt.Errorf(\"server is not live, current state is: %v\", admin.ServerInfo_State(*state).String())\n\t}\n\n\tif !ws {\n\t\treturn fmt.Errorf(\"workers have not yet started\")\n\t}\n\n\treturn nil\n}" ]
[ "0.6174951", "0.61433923", "0.6138589", "0.6069285", "0.60072887", "0.59959316", "0.59840834", "0.59650725", "0.59454465", "0.59126145", "0.5888355", "0.5883821", "0.5823625", "0.57960033", "0.5772618", "0.57676363", "0.57636327", "0.5759152", "0.5754714", "0.5753475", "0.5743784", "0.57278484", "0.57197785", "0.5692156", "0.567928", "0.5670665", "0.5634851", "0.56275904", "0.5623726", "0.5604523", "0.55895555", "0.55671805", "0.55658937", "0.55548316", "0.5552388", "0.5547461", "0.55444986", "0.5542308", "0.55328536", "0.5512119", "0.55114466", "0.55111015", "0.5492865", "0.5486742", "0.54818547", "0.547516", "0.54742193", "0.54727525", "0.5471796", "0.5470313", "0.546916", "0.54576004", "0.54509604", "0.5432465", "0.5429976", "0.54292893", "0.54040146", "0.53866243", "0.5381038", "0.5372278", "0.53664523", "0.53576756", "0.53528583", "0.53482", "0.53470534", "0.5345043", "0.5341314", "0.53305465", "0.5328604", "0.5321076", "0.5320393", "0.5318572", "0.5316704", "0.53155655", "0.53123844", "0.5309114", "0.53061837", "0.5303197", "0.5302905", "0.52977663", "0.5296914", "0.52943224", "0.5290557", "0.5288699", "0.5287743", "0.5281903", "0.5278942", "0.5277761", "0.52763337", "0.5273718", "0.5265982", "0.5265047", "0.52612674", "0.5260472", "0.5259608", "0.52584076", "0.52581805", "0.525457", "0.52543414", "0.5253987" ]
0.5735269
21
Test that service can be served without logger
func TestServiceWithoutLogger(t *testing.T) { s := res.NewService("test") s.SetLogger(nil) s.Handle("model", res.GetResource(func(r res.GetRequest) { r.NotFound() })) session := restest.NewSession(t, s, restest.WithKeepLogger) defer session.Close() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Test_NotFound(t *testing.T) {\n\tvar (\n\t\tnotFoundMsg ErrorMessage\n\t\tresp *http.Response\n\t)\n\n\tsvc := NewService()\n\tts := httptest.NewServer(svc.NewRouter(\"*\"))\n\tdefer ts.Close()\n\n\treq, _ := http.NewRequest(\"GET\", ts.URL+\"/not_found\", nil)\n\n\toutputLog := helpers.CaptureOutput(func() {\n\t\tresp, _ = http.DefaultClient.Do(req)\n\t})\n\n\tif got, want := resp.StatusCode, http.StatusNotFound; got != want {\n\t\tt.Fatalf(\"Invalid status code, got %d but want %d\", got, want)\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Got an error when reading body: %s\", err.Error())\n\t}\n\terr = json.Unmarshal(data, &notFoundMsg)\n\tif err != nil {\n\t\tt.Fatalf(\"Got an error when parsing json: %s\", err.Error())\n\t}\n\tif got, want := notFoundMsg.Code, http.StatusNotFound; got != want {\n\t\tt.Fatalf(\"Wrong code return, got %d but want %d\", got, want)\n\t}\n\tif got, want := notFoundMsg.Message, \"Not Found\"; got != want {\n\t\tt.Fatalf(\"Wrong message return, got %s but want %s\", got, want)\n\t}\n\n\tmatched, err := regexp.MatchString(`uri=/not_found `, outputLog)\n\tif matched != true || err != nil {\n\t\tt.Fatalf(\"request is not logged :\\n%s\", outputLog)\n\t}\n}", "func TestMiddlewares_OnPanic(t *testing.T) {\n\tassert := assertlib.New(t)\n\thook, restoreFct := logging.MockSharedLoggerHook()\n\tdefer restoreFct()\n\tapp, _ := New()\n\trouter := app.HTTPHandler\n\trouter.Get(\"/dummy\", func(http.ResponseWriter, *http.Request) {\n\t\tpanic(\"error in service\")\n\t})\n\tsrv := httptest.NewServer(router)\n\tdefer srv.Close()\n\n\tnbLogsBeforeRequest := len(hook.AllEntries())\n\trequest, _ := http.NewRequest(\"GET\", srv.URL+\"/dummy\", http.NoBody)\n\trequest.Header.Set(\"X-Forwarded-For\", \"1.1.1.1\")\n\tresponse, err := http.DefaultClient.Do(request)\n\tassert.NoError(err)\n\tif err != nil {\n\t\treturn\n\t}\n\trespBody, _ := ioutil.ReadAll(response.Body)\n\t_ = response.Body.Close()\n\n\t// check that the error has been handled by the recover\n\tassert.Equal(http.StatusInternalServerError, response.StatusCode)\n\tassert.Equal(\"Internal Server Error\\n\", string(respBody))\n\tassert.Equal(\"text/plain; charset=utf-8\", response.Header.Get(\"Content-type\"))\n\tallLogs := hook.AllEntries()\n\tassert.Equal(2, len(allLogs)-nbLogsBeforeRequest)\n\t// check that the req id is correct\n\tassert.Equal(allLogs[len(allLogs)-1].Data[\"req_id\"], allLogs[len(allLogs)-2].Data[\"req_id\"])\n\t// check that the recovere put the error info in the logs\n\tassert.Equal(\"error in service\", hook.LastEntry().Data[\"panic\"])\n\tassert.NotNil(hook.LastEntry().Data[\"stack\"])\n\t// check that the real IP is used in the logs\n\tassert.Equal(\"1.1.1.1\", allLogs[len(allLogs)-1].Data[\"remote_addr\"])\n\tassert.Equal(\"1.1.1.1\", allLogs[len(allLogs)-2].Data[\"remote_addr\"])\n}", "func Test_IndexHandler(t *testing.T) {\n\tvar (\n\t\tversionMsg Service\n\t\tresp *http.Response\n\t)\n\n\tsvc := NewService()\n\n\tts := httptest.NewServer(svc.NewRouter(\"*\"))\n\tdefer ts.Close()\n\n\treq, _ := http.NewRequest(\"GET\", ts.URL+\"/\", nil)\n\n\toutputLog := helpers.CaptureOutput(func() {\n\t\tresp, _ = http.DefaultClient.Do(req)\n\t})\n\n\tif got, want := resp.StatusCode, 200; got != want {\n\t\tt.Fatalf(\"Invalid status code, got %d but want %d\", got, want)\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Got an error when reading body: %s\", err.Error())\n\t}\n\n\terr = json.Unmarshal(data, &versionMsg)\n\tif err != nil {\n\t\tt.Fatalf(\"Got an error when parsing json: %s\", err.Error())\n\t}\n\tif got, want := versionMsg.Version, svc.Version; got != want {\n\t\tt.Fatalf(\"Wrong version return, got %s but want %s\", got, want)\n\t}\n\tif got, want := versionMsg.Name, svc.Name; got != want {\n\t\tt.Fatalf(\"Wrong version return, got %s but want %s\", got, want)\n\t}\n\n\tmatched, err := regexp.MatchString(`uri=/ `, outputLog)\n\tif matched != true || err != nil {\n\t\tt.Fatalf(\"request is not logged :\\n%s\", outputLog)\n\t}\n}", "func TestInvalidLogger(t *testing.T) {\n\tassert := assert.New(t)\n\n\tw := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t\tEvents: []string{\"iot\"},\n\t}\n\tw.Config.URL = \"http://localhost:9999/foo\"\n\tw.Config.ContentType = \"application/json\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tClient: &http.Client{},\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t}.New()\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n}", "func (s *OmnibusTestSuite) httpOnly() {\n\tif s.sslEnabled {\n\t\ts.T().Skip(\"HTTP only test, skipping...\")\n\t}\n}", "func TestReturns200IfThereAreNoChecks(t *testing.T) {\n\trecorder := httptest.NewRecorder()\n\n\treq, err := http.NewRequest(\"GET\", \"https://fakeurl.com/debug/health\", nil)\n\tif err != nil {\n\t\tt.Errorf(\"Failed to create request.\")\n\t}\n\n\tStatusHandler(recorder, req)\n\n\tif recorder.Code != 200 {\n\t\tt.Errorf(\"Did not get a 200.\")\n\t}\n}", "func TestServiceSetLogger(t *testing.T) {\n\ts := res.NewService(\"test\")\n\tl := logger.NewMemLogger()\n\ts.SetLogger(l)\n\tif s.Logger() != l {\n\t\tt.Errorf(\"expected Logger to return the logger passed to SetLogger, but it didn't\")\n\t}\n\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\n\tsession := restest.NewSession(t, s, restest.WithKeepLogger)\n\tdefer session.Close()\n}", "func makeHTTPTestServerNoLogs(t testing.TB, fnmc func(mc *config.MayaConfig)) *TestServer {\n\treturn makeHTTPTestServerWithWriter(t, ioutil.Discard, fnmc)\n}", "func TestGetUserServiceDoesntExist (t *testing.T){\n\t_, err := GetUserService(\"\")\n\tassert.Equal(t, 404, err.HTTPStatus)\n}", "func testGatePipelineGetMissing() *httptest.Server {\n\treturn httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.NotFound(w, r)\n\t}))\n}", "func noRouteResponseLogger(c *gin.Context) {\n\tdetails := obtainBodyLogWriter(c)\n\n\tdumpPayload := repository.DumpResponsePayload{\n\t\tHeaders: details.Blw.Header(),\n\t\tBody: details.Blw.Body,\n\t\tStatus: http.StatusNotFound,\n\t}\n\n\tif utils.CheckExcludedPaths(c.FullPath()) {\n\t\tgo repository.DumpRequestResponse(c, Config.ApplicationID, DB, dumpPayload, readBody(details.Rdr))\n\t}\n\n\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\"code\": http.StatusNotFound,\n\t\t\"message\": \"The requested resource could not be found!\",\n\t})\n}", "func TestServer_InitNetworkLogging(t *testing.T) {\n\tconst errString = \"must error to get Start to return!\"\n\tfor _, test := range []struct {\n\t\tName string\n\t\tEnv map[string]string\n\t\tInstall config.Install\n\t\tRuntime config.Runtime\n\t\tInitFn witchcraft.InitFunc\n\t\tVerifyLog func(t *testing.T, logOutput []byte)\n\t}{\n\t\t{\n\t\t\tName: \"Missing URIs\",\n\t\t\tInstall: config.Install{UseConsoleLog: true},\n\t\t\tInitFn: func(ctx context.Context, info witchcraft.InitInfo) (func(), error) {\n\t\t\t\tsvc1log.FromContext(ctx).Info(\"Inside initFunc\")\n\t\t\t\treturn nil, werror.ErrorWithContextParams(ctx, errString)\n\t\t\t},\n\t\t\tVerifyLog: func(t *testing.T, logOutput []byte) {\n\t\t\t\tassert.Contains(t, string(logOutput), \"Inside initFunc\")\n\t\t\t\tassert.Contains(t, string(logOutput), errString)\n\t\t\t\t// No messages about TCP logger should be logged\n\t\t\t\tassert.NotContains(t, string(logOutput), \"TCP\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Configured URI, missing environment metadata\",\n\t\t\tInstall: config.Install{UseConsoleLog: true},\n\t\t\tRuntime: config.Runtime{\n\t\t\t\tServiceDiscovery: httpclient.ServicesConfig{Services: map[string]httpclient.ClientConfig{\n\t\t\t\t\t\"sls-log-tcp-json-receiver\": {\n\t\t\t\t\t\tURIs: []string{\"tcp://network-log-forwarder.domain:8514\"},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t},\n\t\t\tInitFn: func(ctx context.Context, info witchcraft.InitInfo) (func(), error) {\n\t\t\t\tsvc1log.FromContext(ctx).Info(\"Inside initFunc\")\n\t\t\t\treturn nil, werror.ErrorWithContextParams(ctx, errString)\n\t\t\t},\n\t\t\tVerifyLog: func(t *testing.T, logOutput []byte) {\n\t\t\t\tassert.Contains(t, string(logOutput), \"TCP logging will not be enabled since all environment variables are not set.\")\n\t\t\t\tassert.Contains(t, string(logOutput), \"Inside initFunc\")\n\t\t\t\tassert.Contains(t, string(logOutput), errString)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Environment URI, missing environment metadata\",\n\t\t\tInstall: config.Install{UseConsoleLog: true},\n\t\t\tRuntime: config.Runtime{},\n\t\t\tEnv: map[string]string{\n\t\t\t\t\"NETWORK_LOGGING_URL\": \"tcp://network-log-forwarder.domain:8514\",\n\t\t\t},\n\t\t\tInitFn: func(ctx context.Context, info witchcraft.InitInfo) (func(), error) {\n\t\t\t\tsvc1log.FromContext(ctx).Info(\"Inside initFunc\")\n\t\t\t\treturn nil, werror.ErrorWithContextParams(ctx, errString)\n\t\t\t},\n\t\t\tVerifyLog: func(t *testing.T, logOutput []byte) {\n\t\t\t\tassert.Contains(t, string(logOutput), \"TCP logging will not be enabled since all environment variables are not set.\")\n\t\t\t\tassert.Contains(t, string(logOutput), \"Inside initFunc\")\n\t\t\t\tassert.Contains(t, string(logOutput), errString)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Environment URI and metadata, missing TLS info\",\n\t\t\tInstall: config.Install{UseConsoleLog: true},\n\t\t\tRuntime: config.Runtime{},\n\t\t\tEnv: map[string]string{\n\t\t\t\t\"NETWORK_LOGGING_URL\": \"tcp://network-log-forwarder.domain:8514\",\n\t\t\t\t\"LOG_ENVELOPE_DEPLOYMENT_NAME\": \"deployment\",\n\t\t\t\t\"LOG_ENVELOPE_ENVIRONMENT_NAME\": \"environment\",\n\t\t\t\t\"LOG_ENVELOPE_ENVIRONMENT_ID\": \"env_id\",\n\t\t\t\t\"LOG_ENVELOPE_HOST\": \"hostname\",\n\t\t\t\t\"LOG_ENVELOPE_NODE_ID\": \"node_id\",\n\t\t\t\t\"LOG_ENVELOPE_PRODUCT_NAME\": \"product\",\n\t\t\t\t\"LOG_ENVELOPE_PRODUCT_VERSION\": \"version\",\n\t\t\t\t\"LOG_ENVELOPE_SERVICE_NAME\": \"service\",\n\t\t\t\t\"LOG_ENVELOPE_SERVICE_ID\": \"service_id\",\n\t\t\t\t\"LOG_ENVELOPE_STACK_NAME\": \"stack\",\n\t\t\t\t\"LOG_ENVELOPE_STACK_ID\": \"stack_id\",\n\t\t\t},\n\t\t\tInitFn: func(ctx context.Context, info witchcraft.InitInfo) (func(), error) {\n\t\t\t\tsvc1log.FromContext(ctx).Info(\"Inside initFunc\")\n\t\t\t\treturn nil, werror.ErrorWithContextParams(ctx, errString)\n\t\t\t},\n\t\t\tVerifyLog: func(t *testing.T, logOutput []byte) {\n\t\t\t\tassert.Contains(t, string(logOutput), \"TCP logging will not be enabled since TLS config is unset or invalid.\")\n\t\t\t\tassert.Contains(t, string(logOutput), \"Inside initFunc\")\n\t\t\t\tassert.Contains(t, string(logOutput), errString)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Configured URI and environment metadata, missing TLS info\",\n\t\t\tInstall: config.Install{UseConsoleLog: true},\n\t\t\tRuntime: config.Runtime{\n\t\t\t\tServiceDiscovery: httpclient.ServicesConfig{Services: map[string]httpclient.ClientConfig{\n\t\t\t\t\t\"sls-log-tcp-json-receiver\": {\n\t\t\t\t\t\tURIs: []string{\"tcp://network-log-forwarder.domain:8514\"},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t},\n\t\t\tEnv: map[string]string{\n\t\t\t\t\"LOG_ENVELOPE_DEPLOYMENT_NAME\": \"deployment\",\n\t\t\t\t\"LOG_ENVELOPE_ENVIRONMENT_NAME\": \"environment\",\n\t\t\t\t\"LOG_ENVELOPE_ENVIRONMENT_ID\": \"env_id\",\n\t\t\t\t\"LOG_ENVELOPE_HOST\": \"hostname\",\n\t\t\t\t\"LOG_ENVELOPE_NODE_ID\": \"node_id\",\n\t\t\t\t\"LOG_ENVELOPE_PRODUCT_NAME\": \"product\",\n\t\t\t\t\"LOG_ENVELOPE_PRODUCT_VERSION\": \"version\",\n\t\t\t\t\"LOG_ENVELOPE_SERVICE_NAME\": \"service\",\n\t\t\t\t\"LOG_ENVELOPE_SERVICE_ID\": \"service_id\",\n\t\t\t\t\"LOG_ENVELOPE_STACK_NAME\": \"stack\",\n\t\t\t\t\"LOG_ENVELOPE_STACK_ID\": \"stack_id\",\n\t\t\t},\n\t\t\tInitFn: func(ctx context.Context, info witchcraft.InitInfo) (func(), error) {\n\t\t\t\tsvc1log.FromContext(ctx).Info(\"Inside initFunc\")\n\t\t\t\treturn nil, werror.ErrorWithContextParams(ctx, errString)\n\t\t\t},\n\t\t\tVerifyLog: func(t *testing.T, logOutput []byte) {\n\t\t\t\tassert.Contains(t, string(logOutput), \"TCP logging will not be enabled since TLS config is unset or invalid.\")\n\t\t\t\tassert.Contains(t, string(logOutput), \"Inside initFunc\")\n\t\t\t\tassert.Contains(t, string(logOutput), errString)\n\t\t\t},\n\t\t},\n\t} {\n\t\tt.Run(test.Name, func(t *testing.T) {\n\t\t\tos.Clearenv()\n\t\t\tfor k, v := range test.Env {\n\t\t\t\trequire.NoError(t, os.Setenv(k, v))\n\t\t\t}\n\t\t\tlogOutputBuffer := &bytes.Buffer{}\n\t\t\terr := witchcraft.NewServer().\n\t\t\t\tWithInitFunc(test.InitFn).\n\t\t\t\tWithInstallConfig(test.Install).\n\t\t\t\tWithRuntimeConfig(test.Runtime).\n\t\t\t\tWithLoggerStdoutWriter(logOutputBuffer).\n\t\t\t\tWithECVKeyProvider(witchcraft.ECVKeyNoOp()).\n\t\t\t\tWithDisableGoRuntimeMetrics().\n\t\t\t\tWithMetricsBlacklist(map[string]struct{}{\"server.uptime\": {}, \"logging.sls\": {}, \"logging.sls.length\": {}}).\n\t\t\t\tWithSelfSignedCertificate().\n\t\t\t\tStart()\n\t\t\tassert.EqualError(t, err, errString)\n\t\t\ttest.VerifyLog(t, logOutputBuffer.Bytes())\n\t\t})\n\t}\n}", "func serveDummy(ctx context.Context, cfg *config.Config) error {\n\t// serve a http healthcheck endpoint\n\tgo func() {\n\t\terr := serveHTTPHealthcheck(ctx, cfg)\n\t\tif err != nil {\n\t\t\tlogger.Errorf(ctx, \"Unable to serve http\", cfg.GetGrpcHostAddress(), err)\n\t\t}\n\t}()\n\n\tgrpcServer := newGRPCDummyServer(ctx, cfg)\n\n\tgrpcListener, err := net.Listen(\"tcp\", cfg.GetGrpcHostAddress())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Infof(ctx, \"Serving DataCatalog Insecure on port %v\", cfg.GetGrpcHostAddress())\n\treturn grpcServer.Serve(grpcListener)\n}", "func (m *CloudWatchLogsServiceMock) CreateNewServiceIfUnHealthy() {\n\n}", "func (g GenericService) Serve() {\n g.init()\n glog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%d\", port), nil))\n}", "func notSupported(w http.ResponseWriter, r *http.Request, body []byte, creds auth.Creds, vars map[string]string, req_id string) {\n\tglog.Warningf(\"Docker pattern not accepted, URI=%s\", r.RequestURI)\n\tNoEndpointHandler(w, r)\n}", "func TestFailedEndpoint0(t *testing.T) {\n\tisTesting = true\n\tvar request = Request{\n\t\tPath: \"/api/devices\",\n\t\tHTTPMethod: \"PUT\",\n\t}\n\tvar response, _ = Handler(request)\n\tif response.StatusCode != 404 {\n\t\tt.Errorf(\"response status code has to be 404 but is %d\", response.StatusCode)\n\t}\n\tif response.Body != `{\"message\":\"requested endpoint not found\"}` {\n\t\tt.Errorf(\"body is: %s\", response.Body)\n\t}\n}", "func TestCallToPublicService(t *testing.T) {\n\tt.Parallel()\n\n\tclients := Setup(t)\n\n\tt.Log(\"Creating a Service for the helloworld test app.\")\n\tnames := test.ResourceNames{\n\t\tService: test.ObjectNameForTest(t),\n\t\tImage: test.HelloWorld,\n\t}\n\n\ttest.EnsureTearDown(t, clients, &names)\n\n\tresources, err := v1test.CreateServiceReady(t, clients, &names)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create initial Service: %v: %v\", names.Service, err)\n\t}\n\n\tif resources.Route.Status.URL.Host == \"\" {\n\t\tt.Fatalf(\"Route is missing .Status.URL: %#v\", resources.Route.Status)\n\t}\n\tif resources.Route.Status.Address == nil {\n\t\tt.Fatalf(\"Route is missing .Status.Address: %#v\", resources.Route.Status)\n\t}\n\n\tgatewayTestCases := []struct {\n\t\tname string\n\t\turl *url.URL\n\t\taccessibleExternally bool\n\t}{\n\t\t{\"local_address\", resources.Route.Status.Address.URL.URL(), false},\n\t\t{\"external_address\", resources.Route.Status.URL.URL(), true},\n\t}\n\n\tfor _, tc := range gatewayTestCases {\n\t\ttc := tc\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tif !test.ServingFlags.DisableLogStream {\n\t\t\t\tcancel := logstream.Start(t)\n\t\t\t\tdefer cancel()\n\t\t\t}\n\t\t\ttestProxyToHelloworld(t, clients, tc.url, false /*inject*/, tc.accessibleExternally)\n\t\t})\n\t}\n}", "func TestHandler_Root_Unauthorized(t *testing.T) {\n\th := NewTestHandler()\n\tdefer h.Close()\n\n\tresp, _ := http.Get(h.Server.URL)\n\tresp.Body.Close()\n\tequals(t, 200, resp.StatusCode)\n}", "func serviceUnavailable(rw http.ResponseWriter, r *http.Request) {\n\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {}", "func TestStatsHandlerReturns404OnPost(t *testing.T) {\n req, err := http.NewRequest(\"POST\", \"/stats\", nil) \n if err != nil {\n t.Fatal(err)\n }\n\n rr := httptest.NewRecorder()\n\n context := makeServerContext()\n\n handler := statsHandler{sc:&context}\n handler.ServeHTTP(rr, req)\n\n // Check the status code is what we expect.\n if status := rr.Code; status != http.StatusNotFound {\n t.Errorf(\"stats handler returned wrong status code: got %v want %v\",\n status, http.StatusNotFound)\n }\n}", "func TestHashHandlerReturns404OnGet(t *testing.T) {\n req, err := http.NewRequest(\"GET\", \"/hash\", nil) \n if err != nil {\n t.Fatal(err)\n }\n\n rr := httptest.NewRecorder()\n\n context := makeServerContext()\n\n handler := hashHandler{sc:&context}\n handler.ServeHTTP(rr, req)\n\n // Check the status code is what we expect.\n if status := rr.Code; status != http.StatusNotFound {\n t.Errorf(\"hash handler returned wrong status code: got %v want %v\",\n status, http.StatusNotFound)\n }\n}", "func TestMiddlewareWithLogging(t *testing.T) {\n\thandler := WithLogging(nil, http.HandlerFunc(testHandler))\n\n\treq := httptest.NewRequest(http.MethodGet, \"/api/v1/\", nil)\n\tw := httptest.NewRecorder()\n\n\thandler.ServeHTTP(w, req)\n\n\t// Check the status code is what we expect.\n\tif status := w.Code; status != http.StatusOK {\n\t\tt.Errorf(\"handler returned wrong status code: got %v want %v\", status, http.StatusOK)\n\t}\n\t// Check the response body is what we expect.\n\texpected := `{\"alive\": true}`\n\tif w.Body.String() != expected {\n\t\tt.Errorf(\"handler returned unexpected body: got %v want %v\", w.Body.String(), expected)\n\t}\n\n}", "func doNothing(w http.ResponseWriter, r *http.Request) {}", "func DisableLogging() {}", "func ShouldLogDebug(err error) bool {\n\tswitch {\n\tcase IsDeadlineExceeded(err), IsForbiddenResponse(err):\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (suite *InjectorSuite) TestDefaultHttpClient() {\n\tsuite.NotNil(suite.httpClient)\n}", "func doNothing(error, http.ResponseWriter, *http.Request) {}", "func serveDummy(rw http.ResponseWriter, req *http.Request) {\n\tverbose := req.URL.Query().Get(\"verbose\")\n\n\tif err := req.ParseForm(); err != nil {\n\t\tdummyResponseFail(rw, verbose, \"Failed to parse request\")\n\t\treturn\n\t}\n\n\t// Verbose in QS has precedence\n\tif verbose == \"\" {\n\t\tverbose = req.Form.Get(\"verbose\")\n\t}\n\n\tif redirect := req.Form.Get(\"redirect\"); redirect != \"\" {\n\t\tdummyRedirect(rw, redirect)\n\t} else {\n\t\tdummyResponseSuccess(rw, verbose)\n\t}\n}", "func TestFailedEndpoint1(t *testing.T) {\n\tisTesting = true\n\tvar request = Request{\n\t\tPath: \"/api/device\",\n\t\tHTTPMethod: \"GET\",\n\t}\n\tvar response, _ = Handler(request)\n\tif response.StatusCode != 404 {\n\t\tt.Errorf(\"response status code has to be 404 but is %d\", response.StatusCode)\n\t}\n\tif response.Body != `{\"message\":\"requested endpoint not found\"}` {\n\t\tt.Errorf(\"body is: %s\", response.Body)\n\t}\n}", "func TestGetQueryWhenLogStoreNotReady(t *testing.T) {\n\t// set up test server and mocked LogStore\n\tmockLogStore := new(MockedLogStore)\n\tserver := newTestServer(mockLogStore)\n\ttestServer := httptest.NewServer(server.server.Handler)\n\tdefer testServer.Close()\n\tclient := testServer.Client()\n\n\t//\n\t// set up mock expectations\n\t//\n\n\tmockLogStore.On(\"Ready\").Return(false, fmt.Errorf(\"connection refused\"))\n\n\t//\n\t// make call\n\t//\n\tqueryURL, _ := url.Parse(testServer.URL + \"/query\")\n\tqueryParams := queryURL.Query()\n\tqueryParams.Set(\"namespace\", \"default\")\n\tqueryParams.Set(\"pod_name\", \"nginx-deploymeny-abcde\")\n\tqueryParams.Set(\"container_name\", \"nginx\")\n\tqueryParams.Set(\"start_time\", \"2018-01-01T12:00:00.000Z\")\n\tqueryParams.Set(\"end_time\", \"2018-01-01T13:00:00.000Z\")\n\tqueryURL.RawQuery = queryParams.Encode()\n\n\tresp, _ := client.Get(queryURL.String())\n\t// should return 503 (Service Unavailable)\n\tassert.Equalf(t, http.StatusServiceUnavailable, resp.StatusCode, \"unexpected response code\")\n\tassert.Equalf(t, `{\"message\":\"data store is not ready\",\"detail\":\"connection refused\"}`, readBody(t, resp), \"unexpected response\")\n\n\t// verify that expected calls were made\n\tmockLogStore.AssertExpectations(t)\n}", "func TestServiceStart(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, nil)\n}", "func HttpTest() {\n\tStartHttpServer()\n}", "func notImplemented(rw http.ResponseWriter, r *http.Request) {\n\n}", "func TestServices(t *testing.T) { check.TestingT(t) }", "func doNotFoundTest(t *testing.T, method string, uri string) {\n\tclient := testHttpClient()\n\treq, err := http.NewRequest(method, testServer.URL+uri, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif res.StatusCode != 404 {\n\t\tt.Errorf(\"%s %s : Expected HTTP Status Code 404, got %d\\n\", method, uri, res.StatusCode)\n\t}\n}", "func CheckServiceUnavailable(w *httptest.ResponseRecorder) {\n\tCheckResponseBody(w, 503, \"{\\\"messages\\\":[\\\"There was an error, please try again later\\\"],\\\"errors\\\":{\\\"error\\\":[\\\"service unavailable\\\"]}}\")\n}", "func TestDo_TelemetryDisabled(t *testing.T) {\n\ttype testServerResponse struct {\n\t\tAPIResource\n\t\tMessage string `json:\"message\"`\n\t}\n\n\tmessage := \"Hello, client.\"\n\trequestNum := 0\n\n\ttestServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// none of the requests should include telemetry metrics\n\t\tassert.Equal(t, r.Header.Get(\"X-Stripe-Client-Telemetry\"), \"\")\n\n\t\tresponse := testServerResponse{Message: message}\n\n\t\tdata, err := json.Marshal(response)\n\t\tassert.NoError(t, err)\n\n\t\t_, err = w.Write(data)\n\t\tassert.NoError(t, err)\n\n\t\trequestNum++\n\t}))\n\tdefer testServer.Close()\n\n\tbackend := GetBackendWithConfig(\n\t\tAPIBackend,\n\t\t&BackendConfig{\n\t\t\tLeveledLogger: debugLeveledLogger,\n\t\t\tMaxNetworkRetries: Int64(0),\n\t\t\tURL: String(testServer.URL),\n\t\t},\n\t).(*BackendImplementation)\n\n\t// When telemetry is enabled, the metrics for a request are sent with the\n\t// _next_ request via the `X-Stripe-Client-Telemetry header`. To test that\n\t// metrics aren't being sent, we need to fire off two requests in sequence.\n\tfor i := 0; i < 2; i++ {\n\t\trequest, err := backend.NewRequest(\n\t\t\thttp.MethodGet,\n\t\t\t\"/hello\",\n\t\t\t\"sk_test_123\",\n\t\t\t\"application/x-www-form-urlencoded\",\n\t\t\tnil,\n\t\t)\n\t\tassert.NoError(t, err)\n\n\t\tvar response testServerResponse\n\t\terr = backend.Do(request, nil, &response)\n\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, message, response.Message)\n\t}\n\n\t// We should have seen exactly two requests.\n\tassert.Equal(t, 2, requestNum)\n}", "func TestConsoleService(t *testing.T) {\n\tc := new(ConsoleService)\n\n\tc.Started()\n\tc.Stopped()\n\tc.Error(\"Error msg\")\n\tc.Init()\n\tc.Close()\n}", "func TestService(t *testing.T) {\n\t// Create service to test\n\ts := res.NewService(\"foo\")\n\ts.Handle(\"bar.$id\",\n\t\tres.Access(res.AccessGranted),\n\t\tres.GetModel(func(r res.ModelRequest) {\n\t\t\tr.Model(struct {\n\t\t\t\tMessage string `json:\"msg\"`\n\t\t\t}{r.PathParam(\"id\")})\n\t\t}),\n\t)\n\n\t// Create test session\n\tc := restest.NewSession(t, s)\n\tdefer c.Close()\n\n\t// Test sending get request and validate response\n\tc.Get(\"foo.bar.42\").\n\t\tResponse().\n\t\tAssertModel(map[string]string{\"msg\": \"42\"})\n}", "func PreLoggingFilter(_ http.ResponseWriter, r *http.Request, _ httprouter.Params) bool {\n\tutil.LOG.Infof(\"started - API {%s} being called by client {%s} through {%s}\", r.URL.String(), r.RemoteAddr, r.Method)\n\treturn false\n}", "func TestHealthCheckHandler(t *testing.T) {\n req, err := http.NewRequest(\"GET\", \"/healthcheck\", nil)\n if err != nil {\n t.Fatal(err)\n }\n s := server{\n router : httprouter.New(),\n }\n\n rr := httptest.NewRecorder()\n handler := s.HealthCheckHandler()\n\n s.router.GET(\"/healthcheck\", handler)\n\n s.router.ServeHTTP(rr, req)\n\n // Check if the status code and response body are the one expected.\n if status := rr.Code; status != http.StatusOK {\n t.Errorf(\"handler returned wrong status code: got %v want %v\",\n status, http.StatusOK)\n }\n expected := `{\"alive\": true}`\n if rr.Body.String() != expected {\n t.Errorf(\"handler returned unexpected body: got %v want %v\",\n rr.Body.String(), expected)\n }\n}", "func NewService(c Config) *Service {\n \n s := &Service{}\n s.instance = c.Instance\n s.hostname = c.Hostname\n s.userAgent = c.UserAgent\n s.port = c.Endpoint\n s.router = mux.NewRouter()\n s.entityHandler = c.EntityHandler\n s.readTimeout = c.ReadTimeout\n s.writeTimeout = c.WriteTimeout\n s.idleTimeout = c.IdleTimeout\n \n if c.Name == \"\" {\n s.name = \"service\"\n }else{\n s.name = c.Name\n }\n \n if c.Debug || os.Getenv(\"GOREST_DEBUG\") == \"true\" {\n s.debug = true\n }\n \n if c.TraceRegexps != nil {\n if s.traceRequests == nil {\n s.traceRequests = make(map[string]*regexp.Regexp)\n }\n for _, e := range c.TraceRegexps {\n s.traceRequests[e.String()] = e\n }\n }\n if t := os.Getenv(\"GOREST_TRACE\"); t != \"\" {\n if s.traceRequests == nil {\n s.traceRequests = make(map[string]*regexp.Regexp)\n }\n for _, e := range strings.Split(t, \";\") {\n s.traceRequests[e] = regexp.MustCompile(e)\n }\n }\n if s.debug {\n for k, _ := range s.traceRequests {\n fmt.Println(\"rest: trace:\", k)\n }\n }\n \n s.suppress = make(map[string]struct{})\n if v := os.Getenv(\"GOREST_TRACE_SUPPRESS_HEADERS\"); v != \"\" {\n if !strings.EqualFold(v, \"none\") {\n for _, e := range strings.Split(v, \",\") {\n s.suppress[strings.ToLower(e)] = struct{}{}\n }\n }\n }else{\n s.suppress[\"authorization\"] = struct{}{}\n }\n \n return s\n}", "func healthCheck(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"Serving request: %s\", r.URL.Path)\n\tfmt.Fprintf(w, \"Ok\")\n}", "func TestWithProfilingEnabled(t *testing.T) {\n\tmockLogStore := new(MockedLogStore)\n\tserver := NewHTTP(&Config{BindAddress: \"127.0.0.1:8080\", EnableProfiling: true}, mockLogStore)\n\ttestServer := httptest.NewServer(server.server.Handler)\n\tdefer testServer.Close()\n\tclient := testServer.Client()\n\n\tresp, _ := client.Get(testServer.URL + \"/debug/pprof/heap\")\n\trequire.Equalf(t, http.StatusOK, resp.StatusCode, \"unexpected status code\")\n}", "func LogsContainerNotFound(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.ContainerController, id string, follow bool, since *time.Time, stderr bool, stdout bool, tail string, timestamps bool, until *time.Time) (http.ResponseWriter, error) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tquery := url.Values{}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", follow)}\n\t\tquery[\"follow\"] = sliceVal\n\t}\n\tif since != nil {\n\t\tsliceVal := []string{(*since).Format(time.RFC3339)}\n\t\tquery[\"since\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", stderr)}\n\t\tquery[\"stderr\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", stdout)}\n\t\tquery[\"stdout\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{tail}\n\t\tquery[\"tail\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", timestamps)}\n\t\tquery[\"timestamps\"] = sliceVal\n\t}\n\tif until != nil {\n\t\tsliceVal := []string{(*until).Format(time.RFC3339)}\n\t\tquery[\"until\"] = sliceVal\n\t}\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/api/v2/container/%v/logs\", id),\n\t\tRawQuery: query.Encode(),\n\t}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\tpanic(\"invalid test \" + err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tprms[\"id\"] = []string{fmt.Sprintf(\"%v\", id)}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", follow)}\n\t\tprms[\"follow\"] = sliceVal\n\t}\n\tif since != nil {\n\t\tsliceVal := []string{(*since).Format(time.RFC3339)}\n\t\tprms[\"since\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", stderr)}\n\t\tprms[\"stderr\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", stdout)}\n\t\tprms[\"stdout\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{tail}\n\t\tprms[\"tail\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{fmt.Sprintf(\"%v\", timestamps)}\n\t\tprms[\"timestamps\"] = sliceVal\n\t}\n\tif until != nil {\n\t\tsliceVal := []string{(*until).Format(time.RFC3339)}\n\t\tprms[\"until\"] = sliceVal\n\t}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"ContainerTest\"), rw, req, prms)\n\tlogsCtx, _err := app.NewLogsContainerContext(goaCtx, req, service)\n\tif _err != nil {\n\t\te, ok := _err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(\"invalid test data \" + _err.Error()) // bug\n\t\t}\n\t\treturn nil, e\n\t}\n\n\t// Perform action\n\t_err = ctrl.Logs(logsCtx)\n\n\t// Validate response\n\tif _err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", _err, logBuf.String())\n\t}\n\tif rw.Code != 404 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 404\", rw.Code)\n\t}\n\tvar mt error\n\tif resp != nil {\n\t\tvar _ok bool\n\t\tmt, _ok = resp.(error)\n\t\tif !_ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of error\", resp, resp)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}", "func TestWebServer(t *testing.T) {\n\n\tctx, _ := context.WithCancel(context.Background())\n\n\tif testing.Short() {\n\t\tt.Skip()\n\t}\n\n\t// create a new kuberhealthy\n\tt.Log(\"Making fake check\")\n\tkh := makeTestKuberhealthy(t)\n\n\t// add a fake check to it\n\tfc := NewFakeCheck()\n\tt.Log(\"Adding fake check\")\n\tkh.AddCheck(fc)\n\n\tt.Log(\"Starting Kuberhealthy checks\")\n\tgo kh.Start(ctx)\n\t// give the checker time to make CRDs\n\tt.Log(\"Waiting for checks to run\")\n\ttime.Sleep(time.Second * 2)\n\tt.Log(\"Stopping Kuberhealthy checks\")\n\tkh.StopChecks()\n\n\t// now run our test against the web server handler\n\tt.Log(\"Simulating web request\")\n\trecorder := httptest.NewRecorder()\n\treq, err := http.NewRequest(\"GET\", \"/\", bytes.NewBufferString(\"\"))\n\tif err != nil {\n\t\tt.Fatal(\"Error creating request\", err)\n\t}\n\terr = kh.healthCheckHandler(recorder, req)\n\tif err != nil {\n\t\tt.Fatal(\"Error from health check handler:\", err)\n\t}\n\n\t// check the http status from the server\n\tt.Log(\"Checking status code\")\n\tif recorder.Code != http.StatusOK {\n\t\tt.Fatal(\"Bad response from handler\", recorder.Code)\n\t}\n\n\t// output the response from the server\n\tt.Log(\"Reading reponse body\")\n\tb, err := ioutil.ReadAll(recorder.Body)\n\tif err != nil {\n\t\tt.Fatal(\"Error reading response body\", err)\n\t}\n\n\tt.Log(string(b))\n\n}", "func TestGet_HttpClientReturns404(t *testing.T) {\n\tfs := fixture()\n\tfs.Client.Status = http.StatusNotFound\n\t_, err := fs.Get(\"\")\n\tif !fs.IsNotExist(err) {\n\t\tt.Fatalf(\"Expected IsNotExist(err) to be true; err: %v\", err)\n\t}\n}", "func TestEndpointURL(t *testing.T) {\n\t// these client calls should fail since we'll break the URL paths\n\tsimulator.Test(func(ctx context.Context, vc *vim25.Client) {\n\t\tlsim.BreakLookupServiceURLs()\n\n\t\t{\n\t\t\t_, err := ssoadmin.NewClient(ctx, vc)\n\t\t\tif err == nil {\n\t\t\t\tt.Error(\"expected error\")\n\t\t\t}\n\t\t\tif !strings.Contains(err.Error(), http.StatusText(404)) {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t}\n\n\t\t{\n\t\t\tc, err := sts.NewClient(ctx, vc)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\treq := sts.TokenRequest{\n\t\t\t\tUserinfo: url.UserPassword(\"[email protected]\", \"password\"),\n\t\t\t}\n\t\t\t_, err = c.Issue(ctx, req)\n\t\t\tif err == nil {\n\t\t\t\tt.Error(\"expected error\")\n\t\t\t}\n\t\t\tif !strings.Contains(err.Error(), http.StatusText(404)) {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t}\n\t})\n\n\t// these client calls should not fail\n\tsimulator.Test(func(ctx context.Context, vc *vim25.Client) {\n\t\t{\n\t\t\t// NewClient calls ServiceInstance methods\n\t\t\t_, err := ssoadmin.NewClient(ctx, vc)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\n\t\t{\n\t\t\tc, err := sts.NewClient(ctx, vc)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\treq := sts.TokenRequest{\n\t\t\t\tUserinfo: url.UserPassword(\"[email protected]\", \"password\"),\n\t\t\t}\n\n\t\t\t_, err = c.Issue(ctx, req)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t})\n}", "func TestNoDbLog(t *testing.T) {\n\tl := &mockLogger{}\n\topt := Options{}\n\topt.Logger = l\n\n\topt.Errorf(\"test\")\n\trequire.Equal(t, \"ERROR: test\", l.output)\n\topt.Infof(\"test\")\n\trequire.Equal(t, \"INFO: test\", l.output)\n\topt.Warningf(\"test\")\n\trequire.Equal(t, \"WARNING: test\", l.output)\n}", "func NotFoundHandler() ServiceHttpHandler { return ServiceHttpHandler{Handler: NotFound} }", "func TestLoggerUnderLevel(t *testing.T) {\n\tb := new(bytes.Buffer)\n\n\th := LogTo(httpx.HandlerFunc(func(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\t\tw.WriteHeader(201)\n\t\treturn nil\n\t}), stdLogger(logger.ERROR, b))\n\n\tctx := context.Background()\n\tresp := httptest.NewRecorder()\n\treq, _ := http.NewRequest(\"GET\", \"/\", nil)\n\n\tif err := h.ServeHTTPContext(ctx, resp, req); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tgot := b.String()\n\n\twant := \"\"\n\tif strings.Contains(got, want) != true {\n\t\tt.Fatalf(\"%s; want %s\", got, want)\n\t}\n}", "func TestPing(t *testing.T) {\n\tassert := assert.New(t)\n\tflag.Lookup(\"logtostderr\").Value.Set(\"true\")\n\n\tgin.SetMode(gin.TestMode)\n\trouter := gin.New()\n\n\trouter.GET(\"/ping\", PingRoute())\n\n\treq, _ := http.NewRequest(\"GET\", \"/ping\", nil)\n\tres := httptest.NewRecorder()\n\n\trouter.ServeHTTP(res, req)\n\tassert.Equal(204, res.Code)\n}", "func TestScaleClusterNotLoggedIn(t *testing.T) {\n\t// This server should not get any request, because we avoid unauthenticated requests.\n\t// That's why it issues an error in case it does.\n\tmockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tt.Log(\"TestScaleClusterNotLoggedIn mockServer request:\", r.Method, r.URL)\n\t\tt.Error(\"A request has been sent although we don't have a token.\")\n\t}))\n\tdefer mockServer.Close()\n\n\tconfigDir, _ := ioutil.TempDir(\"\", config.ProgramName)\n\tconfig.Initialize(configDir)\n\n\ttestArgs := scaleClusterArguments{\n\t\tapiEndpoint: mockServer.URL,\n\t\tclusterID: \"cluster-id\",\n\t}\n\n\terr := verifyScaleClusterPreconditions(testArgs, []string{testArgs.clusterID})\n\tif !IsNotLoggedInError(err) {\n\t\tt.Error(\"Expected notLoggedInError, got\", err)\n\t}\n\n}", "func TestProxyValid(t *testing.T) {\n\n\treq, err := http.NewRequest(\"GET\", \"/\", nil)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tw := httptest.NewRecorder()\n\tbeego.Debug(\"hola\")\n\t//ctx := context.NewContext()\n\t//beego.Debug(ctx)\n\t//ctx.Reset(w, req)\n\t//ctx.Input = context.NewInput()\n\n\t//ctx.Input.SetData(\"hola\", \"maria\")\n\t//beego.Debug(ctx.Input.GetData(\"hola\"))\n\n\t//date := beego.Date(time.Date(2016, 05, 18, 12, 37, 30, 0, gmt), time.UnixDate)\n\tbeego.BeeApp.Handlers.ServeHTTP(w, req)\n\n\tbeego.Trace(\"testing\", \"TestProxyValid\", \"Code[%d]\\n%s\", w.Code, w.Body.String())\n\n\tConvey(\"Subject: Test Station Endpoint\\n\", t, func() {\n\t\tConvey(\"Status Code Should Be 200\", func() {\n\t\t\tSo(w.Code, ShouldEqual, 200)\n\t\t})\n\t})\n\n}", "func mockNeverRun() bool { return false }", "func TestNonExistentHost(test *testing.T) {\n\tt := NewGomegaWithT(test)\n\n\t// Options that do not require administrative privileges.\n\topts := []nettrace.TraceOpt{\n\t\t&nettrace.WithLogging{},\n\t\t&nettrace.WithHTTPReqTrace{\n\t\t\tHeaderFields: nettrace.HdrFieldsOptWithValues,\n\t\t},\n\t\t&nettrace.WithSockTrace{},\n\t\t&nettrace.WithDNSQueryTrace{},\n\t}\n\tclient, err := nettrace.NewHTTPClient(nettrace.HTTPClientCfg{\n\t\tReqTimeout: 5 * time.Second,\n\t}, opts...)\n\tt.Expect(err).ToNot(HaveOccurred())\n\n\treq, err := http.NewRequest(\"GET\", \"https://non-existent-host.com\", nil)\n\tt.Expect(err).ToNot(HaveOccurred())\n\tresp, err := client.Do(req)\n\tt.Expect(err).To(HaveOccurred())\n\tt.Expect(resp).To(BeNil())\n\ttrace, _, err := client.GetTrace(\"non-existent host\")\n\tt.Expect(err).ToNot(HaveOccurred())\n\ttraceBeginAsRel := nettrace.Timestamp{IsRel: true, Rel: 0}\n\n\t// Dial trace\n\tt.Expect(trace.Dials).To(HaveLen(1)) // one failed Dial (DNS failed)\n\tdial := trace.Dials[0]\n\tt.Expect(dial.TraceID).ToNot(BeZero())\n\trelTimeIsInBetween(t, dial.DialBeginAt, traceBeginAsRel, trace.TraceEndAt)\n\trelTimeIsInBetween(t, dial.DialEndAt, dial.DialBeginAt, trace.TraceEndAt)\n\tt.Expect(dial.DstAddress).To(Equal(\"non-existent-host.com:443\"))\n\tt.Expect(dial.ResolverDials).ToNot(BeEmpty())\n\tfor _, resolvDial := range dial.ResolverDials {\n\t\trelTimeIsInBetween(t, resolvDial.DialBeginAt, dial.DialBeginAt, dial.DialEndAt)\n\t\trelTimeIsInBetween(t, resolvDial.DialEndAt, resolvDial.DialBeginAt, dial.DialEndAt)\n\t\tt.Expect(resolvDial.Nameserver).ToNot(BeZero())\n\t\tif !resolvDial.EstablishedConn.Undefined() {\n\t\t\tt.Expect(resolvDial.DialErr).To(BeZero())\n\t\t\tt.Expect(trace.UDPConns.Get(resolvDial.EstablishedConn)).ToNot(BeNil())\n\t\t}\n\t}\n\tt.Expect(dial.DialErr).ToNot(BeZero())\n\tt.Expect(dial.EstablishedConn).To(BeZero())\n\n\t// DNS trace\n\tt.Expect(trace.DNSQueries).ToNot(BeEmpty())\n\tfor _, dnsQuery := range trace.DNSQueries {\n\t\tt.Expect(dnsQuery.FromDial == dial.TraceID).To(BeTrue())\n\t\tt.Expect(dnsQuery.TraceID).ToNot(BeZero())\n\t\tudpConn := trace.UDPConns.Get(dnsQuery.Connection)\n\t\tt.Expect(udpConn).ToNot(BeNil())\n\n\t\tt.Expect(dnsQuery.DNSQueryMsgs).To(HaveLen(1))\n\t\tdnsMsg := dnsQuery.DNSQueryMsgs[0]\n\t\trelTimeIsInBetween(t, dnsMsg.SentAt, udpConn.SocketCreateAt, udpConn.ConnCloseAt)\n\t\tt.Expect(dnsMsg.Questions).To(HaveLen(1))\n\t\tt.Expect(dnsMsg.Questions[0].Name).To(HavePrefix(\"non-existent-host.com.\"))\n\t\tt.Expect(dnsMsg.Questions[0].Type).To(Or(\n\t\t\tEqual(nettrace.DNSResTypeA), Equal(nettrace.DNSResTypeAAAA)))\n\t\tt.Expect(dnsMsg.Truncated).To(BeFalse())\n\n\t\tt.Expect(dnsQuery.DNSReplyMsgs).To(HaveLen(1))\n\t\tdnsReply := dnsQuery.DNSReplyMsgs[0]\n\t\trelTimeIsInBetween(t, dnsReply.RecvAt, dnsMsg.SentAt, udpConn.ConnCloseAt)\n\t\tt.Expect(dnsReply.ID == dnsMsg.ID).To(BeTrue())\n\t\tt.Expect(dnsReply.RCode).To(Equal(nettrace.DNSRCodeNXDomain))\n\t\tt.Expect(dnsReply.Answers).To(BeEmpty())\n\t\tt.Expect(dnsReply.Truncated).To(BeFalse())\n\t}\n\n\t// UDP connection trace\n\tt.Expect(trace.UDPConns).ToNot(BeEmpty())\n\tfor _, udpConn := range trace.UDPConns {\n\t\tt.Expect(udpConn.TraceID).ToNot(BeZero())\n\t\tt.Expect(udpConn.FromDial == dial.TraceID).To(BeTrue())\n\t\trelTimeIsInBetween(t, udpConn.SocketCreateAt, dial.DialBeginAt, dial.DialEndAt)\n\t\trelTimeIsInBetween(t, udpConn.ConnCloseAt, udpConn.SocketCreateAt, dial.DialEndAt)\n\t\tt.Expect(net.ParseIP(udpConn.AddrTuple.SrcIP)).ToNot(BeNil())\n\t\tt.Expect(net.ParseIP(udpConn.AddrTuple.DstIP)).ToNot(BeNil())\n\t\tt.Expect(udpConn.AddrTuple.SrcPort).ToNot(BeZero())\n\t\tt.Expect(udpConn.AddrTuple.DstPort).ToNot(BeZero())\n\t\tt.Expect(udpConn.SocketTrace).ToNot(BeNil())\n\t\tt.Expect(udpConn.SocketTrace.SocketOps).ToNot(BeEmpty())\n\t\tfor _, socketOp := range udpConn.SocketTrace.SocketOps {\n\t\t\trelTimeIsInBetween(t, socketOp.CallAt, udpConn.SocketCreateAt, udpConn.ConnCloseAt)\n\t\t\trelTimeIsInBetween(t, socketOp.ReturnAt, socketOp.CallAt, udpConn.ConnCloseAt)\n\t\t}\n\t\tt.Expect(udpConn.Conntract).To(BeNil()) // WithConntrack requires root privileges\n\t\tt.Expect(udpConn.TotalRecvBytes).ToNot(BeZero())\n\t\tt.Expect(udpConn.TotalSentBytes).ToNot(BeZero())\n\t}\n\n\t// TCP connection trace\n\tt.Expect(trace.TCPConns).To(BeEmpty())\n\n\t// TLS tunnel trace\n\tt.Expect(trace.TLSTunnels).To(BeEmpty())\n\n\t// HTTP request trace\n\tt.Expect(trace.HTTPRequests).To(HaveLen(1))\n\thttpReq := trace.HTTPRequests[0]\n\tt.Expect(httpReq.TraceID).ToNot(BeZero())\n\tt.Expect(httpReq.TCPConn).To(BeZero())\n\tt.Expect(httpReq.ProtoMajor).To(BeEquivalentTo(1))\n\tt.Expect(httpReq.ProtoMinor).To(BeEquivalentTo(1))\n\trelTimeIsInBetween(t, httpReq.ReqSentAt, traceBeginAsRel, trace.TraceEndAt)\n\tt.Expect(httpReq.ReqError).ToNot(BeZero())\n\tt.Expect(httpReq.ReqMethod).To(Equal(\"GET\"))\n\tt.Expect(httpReq.ReqURL).To(Equal(\"https://non-existent-host.com\"))\n\tt.Expect(httpReq.ReqHeader).To(BeEmpty())\n\tt.Expect(httpReq.ReqContentLen).To(BeZero())\n\tt.Expect(httpReq.RespRecvAt.Undefined()).To(BeTrue())\n\tt.Expect(httpReq.RespStatusCode).To(BeZero())\n\tt.Expect(httpReq.RespHeader).To(BeEmpty())\n\tt.Expect(httpReq.RespContentLen).To(BeZero())\n\n\terr = client.Close()\n\tt.Expect(err).ToNot(HaveOccurred())\n}", "func getTest(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, \"test\")\n}", "func notFoundLogger(logger log.Logger) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlogger.Log(\"err\", http.StatusText(http.StatusNotFound), \"url\", r.URL)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t})\n}", "func TestServiceWith_WithoutMatchingPattern(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"collection\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\terr := s.Service().With(\"test.model\", func(r res.Resource) {})\n\t\tif err == nil {\n\t\t\tt.Errorf(\"expected With to return an error, but it didn't\")\n\t\t}\n\t})\n}", "func TestLoggingSidecarInjectionRequired(t *testing.T) {\n\ttestCases := []struct {\n\t\tclusterSettings map[string]string\n\t\texpected bool\n\t}{\n\t\t{map[string]string{\"LOGGING_SIDECAR\": \"true\"}, true},\n\t\t{map[string]string{\"LOGGING_SIDECAR\": \"false\"}, false},\n\t\t{map[string]string{\"REGION\": \"va6\"}, false},\n\t}\n\tfor _, tc := range testCases {\n\t\tt.Run(fmt.Sprintf(\"Logging sidecar setting: %+v\", tc), func(t *testing.T) {\n\t\t\tresult := loggingSidecarInjectionRequired(tc.clusterSettings)\n\t\t\tif result != tc.expected {\n\t\t\t\tt.Errorf(\"Test Failed: Unexpected result %t, expected %t\", result, tc.expected)\n\t\t\t}\n\t\t})\n\t}\n}", "func Test_GoogleContainerRegistry_WhenNoAuth(t *testing.T) {\n\tvar called bool\n\tdownstream := newDownstream(t, expectedGoogleContainerRegistry, &called)\n\tdefer downstream.Close()\n\n\tendpoint := Endpoint{Source: GoogleContainerRegistry, KeyPath: \"gcr_key\", GCR: nil}\n\tfp, handler, err := HandlerFromEndpoint(\"test/fixtures\", downstream.URL, endpoint)\n\tassert.NoError(t, err)\n\n\thookServer := httptest.NewTLSServer(handler)\n\tdefer hookServer.Close()\n\n\tc := hookServer.Client()\n\treq, err := http.NewRequest(\"POST\", hookServer.URL+\"/hook/\"+fp, bytes.NewReader(loadFixture(t, \"gcr_payload\")))\n\tassert.NoError(t, err)\n\n\tres, err := c.Do(req)\n\tassert.NoError(t, err)\n\tassert.True(t, called)\n\tassert.Equal(t, 200, res.StatusCode)\n\tassert.Empty(t, res.Body)\n}", "func configureServer(s *http.Server, _, _ string) {\n\t// Turn-off random logging by Go net/http\n\ts.ErrorLog = log.New(&nullWriter{}, \"\", 0)\n}", "func Example() {\n\tlogger := logging.ConfigureDevelopment(os.Stderr)\n\tsink := blackhole.NewSink()\n\ts, err := http.NewServer(http.Config{\n\t\tPort: optional.Int(3033),\n\t\tHost: optional.String(\"127.0.0.1\"),\n\t\tSink: sink,\n\t})\n\tif err != nil {\n\t\tlogger.Fatal(err.Error())\n\t}\n\tlogger.Fatal(s.ListenAndServe().Error())\n}", "func mustStartDebugServer(fsc *frontendServerConfig) {\n\t// Start the internal server on the internal port if requested.\n\tif fsc.DebugPort != \"\" {\n\t\t// Add the profiling endpoints to the internal router.\n\t\tinternalRouter := mux.NewRouter()\n\n\t\t// Set up the health check endpoint.\n\t\tinternalRouter.HandleFunc(\"/healthz\", httputils.ReadyHandleFunc)\n\n\t\t// Register pprof handlers\n\t\tinternalRouter.HandleFunc(\"/debug/pprof/\", pprof.Index)\n\t\tinternalRouter.HandleFunc(\"/debug/pprof/symbol\", pprof.Symbol)\n\t\tinternalRouter.HandleFunc(\"/debug/pprof/profile\", pprof.Profile)\n\t\tinternalRouter.HandleFunc(\"/debug/pprof/{profile}\", pprof.Index)\n\n\t\tgo func() {\n\t\t\tsklog.Infof(\"Internal server on http://127.0.0.1\" + fsc.DebugPort)\n\t\t\tsklog.Fatal(http.ListenAndServe(fsc.DebugPort, internalRouter))\n\t\t}()\n\t}\n}", "func Test_Proxy_Empty_Upstream_Servers(t *testing.T) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tutils.AssertEqual(t, \"Servers cannot be empty\", r)\n\t\t}\n\t}()\n\tapp := fiber.New()\n\tapp.Use(Balancer(Config{Servers: []string{}}))\n}", "func TestApi_ListMotorcycles_Empty(t *testing.T) {\n\n\t// ARRANGE\n\n\t// Configure the application...\n\troles := map[authorizationrole.AuthorizationRole]bool{\n\t\tauthorizationrole.AdminAuthorizationRole: true,\n\t}\n\n\tauthService, _ := security.NewAuthService(true, roles)\n\tmotorcycleRepository, _ := repository.NewMotorcycleRepository()\n\trouter := httprouter.New()\n\n\t// Create an instance of the API web service.\n\tourApi, err := NewApi(roles, authService, motorcycleRepository, router)\n\tif err != nil {\n\t\tprintln(\"Failed to create an instance of the API web service: &s\", err.Error())\n\t\treturn\n\t}\n\n\t// An http handler wrapper around httprouter's handler. It permits us to use\n\t// the test server and httpExpected.\n\thandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tourApi.ListMotorcyclesHandler(w, r, httprouter.Params{})\n\t})\n\n\tserver := httptest.NewServer(handler)\n\tdefer server.Close()\n\n\t// ACT\n\tresp, err := http.Get(server.URL)\n\n\t// ASSERT\n\tassert.True(t, resp.StatusCode == http.StatusOK)\n}", "func TestGetFailure0(t *testing.T) {\n\tisTesting = true\n\n\tvar params = make(map[string]string)\n\tparams[\"id\"] = \"invalid-id\"\n\n\tvar request = Request{\n\t\tPath: \"/api/devices\",\n\t\tPathParameters: params,\n\t\tHTTPMethod: \"GET\",\n\t}\n\tvar response, _ = Handler(request)\n\n\tif response.StatusCode != 404 {\n\t\tt.Errorf(\"response status code has to be 404 but is %d\", response.StatusCode)\n\t}\n}", "func TestWebServerNotOK(t *testing.T) {\n\n\tctx, _ := context.WithCancel(context.Background())\n\n\t// create a new kuberhealthy\n\tkh := makeTestKuberhealthy(t)\n\n\t// add a fake check to it with a not ok return\n\tfc := NewFakeCheck()\n\tdesiredError := randomdata.SillyName()\n\tfc.Errors = []string{desiredError}\n\tfc.OK = false\n\tkh.AddCheck(fc)\n\n\t// run the checker for enough time to make and update CRD entries, then stop it\n\tgo kh.Start(ctx)\n\ttime.Sleep(time.Second * 5)\n\tkh.StopChecks()\n\n\t// now run our test against the web server handler\n\trecorder := httptest.NewRecorder()\n\treq, err := http.NewRequest(\"GET\", \"/\", bytes.NewBufferString(\"\"))\n\tif err != nil {\n\t\tt.Fatal(\"Error creating request\", err)\n\t}\n\terr = kh.healthCheckHandler(recorder, req)\n\tif err != nil {\n\t\tt.Fatal(\"Error from health check handler:\", err)\n\t}\n\n\t// check the http status from the server\n\tif recorder.Code != http.StatusOK {\n\t\tt.Fatal(\"Bad response from handler\", recorder.Code)\n\t}\n\n\t// output the response from the server\n\tb, err := ioutil.ReadAll(recorder.Body)\n\tif err != nil {\n\t\tt.Fatal(\"Error reading response body\", err)\n\t}\n\tt.Log(string(b))\n\n\t// decode the response body to validate the contents\n\tvar state health.State\n\tjson.Unmarshal(b, &state)\n\n\tif len(state.Errors) < 1 {\n\t\tt.Fatal(\"The expected error message was not set.\")\n\t}\n\tif state.Errors[0] != desiredError {\n\t\tt.Fatal(\"The expected error message was not set. Got\", state.Errors[0], \"wanted\", desiredError)\n\t}\n\n\t// check that OK is false\n\tif state.OK != false {\n\t\tt.Fatal(\"Did not observe status page failure when one was expected\")\n\t}\n\n}", "func TestProbeHTTPSHTTP(t *testing.T) {\n\tserver := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintln(w, \"Hello world\")\n\t}))\n\tserver.Start()\n\tdefer server.Close()\n\n\tregistry := prometheus.NewRegistry()\n\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\n\tif err := ProbeHTTPS(ctx, newTestLogger(), server.URL, config.Module{}, registry); err == nil {\n\t\tt.Fatalf(\"expected error, but err was nil\")\n\t}\n}", "func GloballyDisableDebugLogForTest() {\r\n\tglobalLogger.consoleLevel.SetLevel(zapcore.ErrorLevel)\r\n}", "func TestHttp404(t *testing.T) {\n\tjolokia := genJolokiaClientStub(invalidJSON, 404, Servers, []string{HeapMetric})\n\n\tvar acc testutil.Accumulator\n\terr := acc.GatherError(jolokia.Gather)\n\n\trequire.Error(t, err)\n\trequire.Equal(t, 0, len(acc.Metrics))\n\trequire.Contains(t, err.Error(), \"has status code 404\")\n}", "func (suite *SingletonFlushTestSuite) TestSingletonContainerFlush() {\n\t// Pre-conditions before test\n\tsuite.ServiceID = uuid.NewString()\n\tsuite.ServiceToken = uuid.NewString()\n\t// Configure the envoy template with ContainerLimit flush.\n\tconfigErr := configureSingletonFlush(\"ContainerLimit\", \"100\", \"60\", suite.ServiceID, suite.ServiceToken)\n\trequire.Nilf(suite.T(), configErr, \"Error configuring envoy.yaml for container flush : %v\", configErr)\n\t// Create service, apps, metrics and usage limits in apisonator\n\tsuite.initializeApisonatorState()\n\t// Start the proxy.\n\tupErr := StartProxy(\"./\", \"./temp.yaml\")\n\trequire.Nilf(suite.T(), upErr, \"Error starting proxy: %v\", upErr)\n\trequire.Eventually(suite.T(), func() bool {\n\t\tres, err := http.Get(\"http://localhost:9095/\")\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tdefer res.Body.Close()\n\t\treturn true\n\t}, 15*time.Second, 1*time.Second, \"Envoy has not started\")\n\t// Test scenario begins here\n\tclient := &http.Client{}\n\treq, errReq := http.NewRequest(\"GET\", \"http://127.0.0.1:9095/\", nil)\n\trequire.Nilf(suite.T(), errReq, \"Error creating the HTTP request: %v\", errReq)\n\treq.Header = http.Header{\n\t\t\"Host\": []string{\"localhost\"},\n\t\t\"x-app-id\": []string{\"test-app-id-1\"},\n\t}\n\tfor i := 0; i < 4; i++ {\n\t\tres, _ := client.Do(req)\n\t\tfmt.Printf(\"Response: %v\\n\", res)\n\t}\n\ttime.Sleep(3 * time.Second)\n\trequire.Eventually(suite.T(), func() bool {\n\t\tusage, usageErr := getApisonatorUsage(suite.ServiceID, suite.ServiceToken, \"test-app-id-1\")\n\t\tif usageErr != nil {\n\t\t\tfmt.Printf(\"Error fetching apisonator usage: %v\", usageErr)\n\t\t\treturn false\n\t\t}\n\t\tif usage.Current == int64(4) {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}, 5*time.Second, 1*time.Second, \"Invalid number for usages for the metric hits in apisonator\")\n}", "func MustService(connectURL string) *Service {\n\trv, err := NewService(connectURL)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn rv\n}", "func SkipIfNoRoutingAPI() {\n\t// TODO: #161159794 remove this function and check a nicer error message when available\n\tvar response struct {\n\t\tRoutingEndpoint string `json:\"routing_endpoint\"`\n\t}\n\tCurl(&response, \"/v2/info\")\n\n\tif response.RoutingEndpoint == \"\" {\n\t\tSkip(\"Test requires routing endpoint on /v2/info\")\n\t}\n}", "func SkipHealthEndpoint(r *http.Request) bool {\n\treturn r.URL.Path == \"/health\"\n}", "func (suite *HealthSuite) TestSwaggerRoute() {\n\treq, _ := http.NewRequest(\"GET\", \"/\", nil)\n\tsuite.Router.ServeHTTP(suite.Response, req)\n\n\tsuite.Equal(http.StatusMovedPermanently, suite.Response.Code)\n}", "func (h Handler) TestEndpoint() error {\n\tr, err := http.Get(h.url)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif r.StatusCode != 200 {\n\t\treturn errors.New(\"Endpoint not replying typical 200 answer on ping\")\n\t}\n\n\treturn nil\n}", "func shouldExportAsHeadlessService(endpoints *corev1.Endpoints, log *logging.Entry) bool {\n\tfor _, subset := range endpoints.Subsets {\n\t\tfor _, addr := range subset.Addresses {\n\t\t\tif addr.Hostname != \"\" {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\tfor _, addr := range subset.NotReadyAddresses {\n\t\t\tif addr.Hostname != \"\" {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\tlog.Infof(\"Service %s/%s should not be exported as headless: no named addresses in its endpoints object\", endpoints.Namespace, endpoints.Name)\n\treturn false\n}", "func ReqLogger(rw *http.ResponseWriter, responseStatus *int, URL *url.URL, Method string, start *time.Time) {\n\tif *responseStatus != 200 && *responseStatus != 308 {\n\t\t(*rw).WriteHeader(*responseStatus)\n\t}\n\tlog.Printf(\"%s %s %d %s\\n\", Method, URL, *responseStatus, time.Since(*start))\n}", "func WithoutLogLocation() ServeOpt {\n\treturn serveConfigFunc(func(in *ServeConfig) error {\n\t\tin.disableLogLocation = true\n\t\treturn nil\n\t})\n}", "func (s *TrackerSuite) TestStartNotStopped() {\n\n\tevent := s.service.StartNew()\n\tassert.Equal(s.T(), ErrorStart, s.service.Start(event))\n}", "func TestCreateRouter(t *testing.T) {\n\t// Create router\n\tt.Log(\"Creating router\")\n\tdata := makeRouterPayload(\n\t\tfilepath.Join(\"testdata\", \"create_router_nop_logger_nop_exp.json.tmpl\"),\n\t\tglobalTestContext)\n\n\twithDeployedRouter(t, data,\n\t\tfunc(router *models.Router) {\n\t\t\tt.Log(\"Testing router endpoint: POST \" + router.Endpoint)\n\t\t\twithRouterResponse(t,\n\t\t\t\thttp.MethodPost,\n\t\t\t\trouter.Endpoint,\n\t\t\t\tnil,\n\t\t\t\t\"{}\",\n\t\t\t\tfunc(response *http.Response, responsePayload []byte) {\n\t\t\t\t\tassert.Equal(t, http.StatusOK, response.StatusCode,\n\t\t\t\t\t\t\"Unexpected response (code %d): %s\",\n\t\t\t\t\t\tresponse.StatusCode, string(responsePayload))\n\t\t\t\t\tactualResponse := gjson.GetBytes(responsePayload, \"json.response\").String()\n\t\t\t\t\texpectedResponse := `{\n\t\t\t\t\t \"experiment\": {},\n\t\t\t\t\t \"route_responses\": [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t \"data\": {\n\t\t\t\t\t\t\t\"version\": \"control\"\n\t\t\t\t\t\t },\n\t\t\t\t\t\t \"is_default\": true,\n\t\t\t\t\t\t \"route\": \"control\"\n\t\t\t\t\t\t}\n\t\t\t\t\t ]\n\t\t\t\t\t}`\n\t\t\t\t\tassert.JSONEq(t, expectedResponse, actualResponse)\n\t\t\t\t})\n\n\t\t\tt.Log(\"Test endpoints for router logs\")\n\t\t\tbaseURL, projectID, routerID := globalTestContext.APIBasePath, globalTestContext.ProjectID, router.ID\n\t\t\turl := fmt.Sprintf(\"%s/projects/%d/routers/%d/logs\", baseURL, projectID, routerID)\n\t\t\tcomponentTypes := []string{\"\", \"router\", \"ensembler\", \"enricher\"}\n\t\t\tvar podLogs []service.PodLog\n\n\t\t\tfor _, c := range componentTypes {\n\t\t\t\tqueryString := \"\"\n\t\t\t\tif c != \"\" {\n\t\t\t\t\tqueryString = \"?component_type=\" + c\n\t\t\t\t}\n\t\t\t\tt.Log(\"GET\", url+queryString)\n\t\t\t\tresp, err := http.Get(url + queryString)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, http.StatusOK, resp.StatusCode)\n\t\t\t\tpodLogs = getPodLogs(t, resp)\n\t\t\t\tassert.Greater(t, len(podLogs), 0)\n\t\t\t}\n\t\t},\n\t\tnil,\n\t)\n}", "func setup() {\n\t// test server\n\tmux = http.NewServeMux()\n\tmux.Handle(\"/foo1\", http.HandlerFunc(limitResponseHandler))\n\tserver = httptest.NewServer(mux)\n\n\t// appnexus client configured to use test server\n\tclient, _ = NewClient(server.URL)\n\twaiter = false\n}", "func TestNatsServer_NoRoutesSpecified(t *testing.T) {\n\tconfig := &server.NATSServerConfig{\n\t\tCluster: messaging.ClusterName(\"osyterpack-test\"),\n\n\t\tTLSConfig: serverTLSConfig(),\n\t\tClusterTLSConfig: clusterTLSConfig(),\n\t\tLogLevel: server.DEBUG,\n\t}\n\tnatsServer, err := server.NewNATSServer(config)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create new NATSServer : %v\", err)\n\t}\n\tnatsServer.Start()\n\tdefer natsServer.Shutdown()\n\tlogNATServerInfo([]server.NATSServer{natsServer}, \"TestNatsServer_NoRoutesSpecified\")\n}", "func TestComposeService_Logs_ServiceFiltering(t *testing.T) {\n\tmockCtrl := gomock.NewController(t)\n\tdefer mockCtrl.Finish()\n\n\tapi, cli := prepareMocks(mockCtrl)\n\ttested := composeService{\n\t\tdockerCli: cli,\n\t}\n\n\tname := strings.ToLower(testProject)\n\n\tctx := context.Background()\n\tapi.EXPECT().ContainerList(ctx, moby.ContainerListOptions{\n\t\tAll: true,\n\t\tFilters: filters.NewArgs(oneOffFilter(false), projectFilter(name), hasConfigHashLabel()),\n\t}).Return(\n\t\t[]moby.Container{\n\t\t\ttestContainer(\"serviceA\", \"c1\", false),\n\t\t\ttestContainer(\"serviceA\", \"c2\", false),\n\t\t\t// serviceB will be filtered out by the project definition to\n\t\t\t// ensure we ignore \"orphan\" containers\n\t\t\ttestContainer(\"serviceB\", \"c3\", false),\n\t\t\ttestContainer(\"serviceC\", \"c4\", false),\n\t\t},\n\t\tnil,\n\t)\n\n\tfor _, id := range []string{\"c1\", \"c2\", \"c4\"} {\n\t\tid := id\n\t\tapi.EXPECT().\n\t\t\tContainerInspect(anyCancellableContext(), id).\n\t\t\tReturn(\n\t\t\t\tmoby.ContainerJSON{\n\t\t\t\t\tContainerJSONBase: &moby.ContainerJSONBase{ID: id},\n\t\t\t\t\tConfig: &container.Config{Tty: true},\n\t\t\t\t},\n\t\t\t\tnil,\n\t\t\t)\n\t\tapi.EXPECT().ContainerLogs(anyCancellableContext(), id, gomock.Any()).\n\t\t\tReturn(io.NopCloser(strings.NewReader(\"hello \"+id+\"\\n\")), nil).\n\t\t\tTimes(1)\n\t}\n\n\t// this simulates passing `--filename` with a Compose file that does NOT\n\t// reference `serviceB` even though it has running services for this proj\n\tproj := &types.Project{\n\t\tServices: types.Services{\n\t\t\t{Name: \"serviceA\"},\n\t\t\t{Name: \"serviceC\"},\n\t\t},\n\t}\n\tconsumer := &testLogConsumer{}\n\topts := compose.LogOptions{\n\t\tProject: proj,\n\t}\n\terr := tested.Logs(ctx, name, consumer, opts)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, []string{\"hello c1\"}, consumer.LogsForContainer(\"c1\"))\n\trequire.Equal(t, []string{\"hello c2\"}, consumer.LogsForContainer(\"c2\"))\n\trequire.Empty(t, consumer.LogsForContainer(\"c3\"))\n\trequire.Equal(t, []string{\"hello c4\"}, consumer.LogsForContainer(\"c4\"))\n}", "func init() {\n\tDisableLog()\n}", "func init() {\n\tDisableLog()\n}", "func init() {\n\tDisableLog()\n}", "func TestLogging(t *testing.T) {\n\tt.Parallel()\n\tt.Run(\"log\", log)\n\tt.Run(\"logServiceErrors\", logServiceErrors)\n\tt.Run(\"logDeviceErrors\", logDeviceErrors)\n}", "func TestServiceDelete(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tvar svcIP string\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tsvcIP = svc.Status.LoadBalancer.Ingress[0].IP\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status to be updated\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(svcIP)) {\n\t\tt.Fatal(\"Service IP hasn't been allocated\")\n\t}\n\n\terr := fixture.svcClient.Services(\"default\").Delete(context.Background(), \"service-a\", meta_v1.DeleteOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tif fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(svcIP)) {\n\t\tt.Fatal(\"Service IP hasn't been released\")\n\t}\n}", "func TestLoggingDebugToFileConfig(t *testing.T) {\n\n\t/*declare settings*/\n\tmaxAge := \"1h\"\n\tflushConfig := csconfig.FlushDBCfg{\n\t\tMaxAge: &maxAge,\n\t}\n\tdbconfig := csconfig.DatabaseCfg{\n\t\tType: \"sqlite\",\n\t\tDbPath: \"./ent\",\n\t\tFlush: &flushConfig,\n\t}\n\tcfg := csconfig.LocalApiServerCfg{\n\t\tListenURI: \"127.0.0.1:8080\",\n\t\tLogMedia: \"file\",\n\t\tLogDir: \".\",\n\t\tDbConfig: &dbconfig,\n\t}\n\tlvl := log.DebugLevel\n\texpectedFile := \"./crowdsec_api.log\"\n\texpectedLines := []string{\"/test42\"}\n\tcfg.LogLevel = &lvl\n\n\tos.Remove(\"./crowdsec.log\")\n\tos.Remove(expectedFile)\n\n\t// Configure logging\n\tif err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\tapi, err := NewServer(&cfg)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create api : %s\", err)\n\t}\n\tif api == nil {\n\t\tt.Fatalf(\"failed to create api #2 is nbill\")\n\t}\n\n\tw := httptest.NewRecorder()\n\treq, _ := http.NewRequest(\"GET\", \"/test42\", nil)\n\treq.Header.Set(\"User-Agent\", UserAgent)\n\tapi.router.ServeHTTP(w, req)\n\tassert.Equal(t, 404, w.Code)\n\t//wait for the request to happen\n\ttime.Sleep(500 * time.Millisecond)\n\n\t//check file content\n\tdata, err := ioutil.ReadFile(expectedFile)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to read file : %s\", err)\n\t}\n\n\tfor _, expectedStr := range expectedLines {\n\t\tif !strings.Contains(string(data), expectedStr) {\n\t\t\tt.Fatalf(\"expected %s in %s\", expectedStr, string(data))\n\t\t}\n\t}\n\n\tos.Remove(\"./crowdsec.log\")\n\tos.Remove(expectedFile)\n\n}", "func logSetup() {\n\n\tlog.Printf(\"Server will run on: %s\\n\", getListenAddress())\n}", "func logSetup() {\n\tlog.Printf(\"Server will run on: %s\\n\", getListenAddress())\n}", "func TestPanicInHandler(t *testing.T) {\n\tbuffer := new(strings.Builder)\n\trouter := New()\n\trouter.Use(RecoveryWithWriter(buffer))\n\trouter.GET(\"/recovery\", func(_ *Context) {\n\t\tpanic(\"Oupps, Houston, we have a problem\")\n\t})\n\t// RUN\n\tw := PerformRequest(router, \"GET\", \"/recovery\")\n\t// TEST\n\tassert.Equal(t, http.StatusInternalServerError, w.Code)\n\tassert.Contains(t, buffer.String(), \"panic recovered\")\n\tassert.Contains(t, buffer.String(), \"Oupps, Houston, we have a problem\")\n\tassert.Contains(t, buffer.String(), t.Name())\n\tassert.NotContains(t, buffer.String(), \"GET /recovery\")\n\n\t// Debug mode prints the request\n\tSetMode(DebugMode)\n\t// RUN\n\tw = PerformRequest(router, \"GET\", \"/recovery\")\n\t// TEST\n\tassert.Equal(t, http.StatusInternalServerError, w.Code)\n\tassert.Contains(t, buffer.String(), \"GET /recovery\")\n\n\tSetMode(TestMode)\n}", "func notFound(rw http.ResponseWriter, r *http.Request) {\n\n}", "func healthz(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusNoContent)\n}", "func TestMiddlewareWithEmpty(t *testing.T) {\n\thandler := WithEmpty(nil, http.HandlerFunc(testHandler))\n\n\treq := httptest.NewRequest(http.MethodGet, \"/api/v1/\", nil)\n\tw := httptest.NewRecorder()\n\n\thandler.ServeHTTP(w, req)\n\n\t// Check the status code is what we expect.\n\tif status := w.Code; status != http.StatusOK {\n\t\tt.Errorf(\"handler returned wrong status code: got %v want %v\", status, http.StatusOK)\n\t}\n\t// Check the response body is what we expect.\n\texpected := `{\"alive\": true}`\n\tif w.Body.String() != expected {\n\t\tt.Errorf(\"handler returned unexpected body: got %v want %v\", w.Body.String(), expected)\n\t}\n\n}", "func TestIndex_badpath(t *testing.T) {\n\ttemplateString := \"\"\n\ttestTempl := template.Must(template.New(\"test\").Parse(templateString))\n\n\tdone := make(chan struct{})\n\tdefer close(done)\n\n\tlogger := log.New(ioutil.Discard, \"\", 0)\n\tts := httptest.NewServer(Index(logger, \"not-a-folder\", done, testTempl))\n\tdefer ts.Close()\n\n\tres, err := http.Get(ts.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tassert.Equal(t, 500, res.StatusCode, \"got wrong response\")\n}" ]
[ "0.6161699", "0.59672475", "0.5942423", "0.57929426", "0.578552", "0.57729983", "0.5761654", "0.5737634", "0.57193595", "0.5637695", "0.56319416", "0.56254923", "0.56139284", "0.55134445", "0.55043334", "0.54994684", "0.54989934", "0.54716086", "0.54499424", "0.54347134", "0.54209197", "0.53971076", "0.53767943", "0.5370934", "0.5364374", "0.5354081", "0.53509283", "0.5330663", "0.53159726", "0.53145105", "0.52644944", "0.5261657", "0.52414906", "0.5234722", "0.5233472", "0.52285767", "0.52211624", "0.52154255", "0.5214367", "0.5211625", "0.52096426", "0.52006185", "0.519417", "0.51916456", "0.519075", "0.5189803", "0.5181905", "0.51731986", "0.5160869", "0.51561195", "0.51428354", "0.51379323", "0.5135283", "0.51299644", "0.5129516", "0.51272833", "0.51261985", "0.51215035", "0.5119975", "0.5117794", "0.5114372", "0.5108093", "0.5102655", "0.509627", "0.5093606", "0.508745", "0.50728697", "0.507021", "0.50620246", "0.50604373", "0.5059392", "0.50485986", "0.50420916", "0.5037468", "0.5014649", "0.50137436", "0.5009618", "0.500752", "0.5001567", "0.49976423", "0.49966562", "0.49925622", "0.4985219", "0.49744633", "0.4970716", "0.4968005", "0.49640188", "0.4960932", "0.4960932", "0.4960932", "0.49595568", "0.49507412", "0.4941806", "0.4939115", "0.49375778", "0.49203414", "0.49194142", "0.4915018", "0.49134755", "0.4911218" ]
0.72680014
0
Test that Logger returns the logger set with SetLogger
func TestServiceSetLogger(t *testing.T) { s := res.NewService("test") l := logger.NewMemLogger() s.SetLogger(l) if s.Logger() != l { t.Errorf("expected Logger to return the logger passed to SetLogger, but it didn't") } s.Handle("model", res.GetResource(func(r res.GetRequest) { r.NotFound() })) session := restest.NewSession(t, s, restest.WithKeepLogger) defer session.Close() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Logger() spidomtr.RunnerHandler {\n\treturn &TestLogger{}\n}", "func GetLogger() *log.Logger { return std.GetLogger() }", "func SetLogger(l utils.Logger) {\n\tlog = l\n}", "func SetLogger(l Logger) {\n\tlog = l\n}", "func SetLogger(l logger.Logger) {\n\tlog = l\n}", "func SetLogger(logger logger) {\n\tlog = logger\n}", "func setLogger(logClient logger.Interface) {\n\tlog = logClient\n}", "func SetLogger(l *log.Logger) {\n\tLog = l\n}", "func UseLogger(logger logging.Logger) {\n\tlog = logger\n}", "func SetLogger(l Logger) {\n\tmainLogger = l\n}", "func SetLogger(l Logger) {\n\tdLog.Fulfill(l)\n}", "func loggerForTest() log.Logger {\n\treturn log.NewLogger(log.Debug, nil, nil)\n}", "func GetLogger() (Logger, error) {\n return &dummyLogger{}, nil\n}", "func TestLogger(t *testing.T) *logrus.Entry {\n\treturn logrus.WithField(\"test\", t.Name())\n}", "func SetLogger(l *logger.Logger) {\n\tlog = l\n}", "func (f *FakeOutput) Logger() *zap.SugaredLogger { return f.SugaredLogger }", "func SetLogger(logger Logger) {\n\tlog = logger\n}", "func SetLogger(lg Logger) {\n\tlogger = lg\n}", "func SetLogger(logger logrus.FieldLogger) {\n\tlog = logger\n}", "func SetLogger(logger logrus.FieldLogger) {\n\tlog = logger\n}", "func Getlogger() Logger {\r\n\treturn log\r\n}", "func SetLogger(newLogger *log.Logger) {\n\tusedLogger = newLogger\n}", "func getTestLogger() logging.Logger {\n\treturn logging.ConsoleLogger{SetTimeStamp: false}\n}", "func SetLogger(l *logrus.Logger) {\n\tlog = l\n}", "func SetLogger(l log.Logger) {\n\tmu.Lock()\n\tlogger = l\n\tmu.Unlock()\n}", "func SetLogger(l logr.Logger) {\n\tsingleton.Propagate(l)\n}", "func TestLogger(t *testing.T) Logger {\n\treturn &testLogger{\n\t\tt: t,\n\t}\n}", "func SetLogger(l log.Logger) {\n\tlogger = l\n}", "func SetLogger(l log.Logger) {\n\tlogger = l\n}", "func setMockLogger(l logger.Level) <-chan []byte {\n\tmw, errChan := newMockWriter()\n\tmockLogger := logger.NewLogger(\n\t\tlogger.LoggerConfig{\n\t\t\tWriter: mw,\n\t\t\tLevel: logger.NewAtomicLevelAt(l),\n\t\t\tEncoder: logger.NewJSONEncoder(logger.NewProductionConfig().EncoderConfig),\n\t\t},\n\t)\n\tlogger.SetLogger(mockLogger)\n\treturn errChan\n}", "func SetLogger(logger *logging.Logger) {\n\tlog = logger\n}", "func SetLogger(lgr Logger) {\n\tlogger = lgr\n}", "func Test() Logger {\n\treturn Logger{\n\t\tout: &bytes.Buffer{},\n\t\terr: &bytes.Buffer{},\n\t}\n}", "func SetLogger(l Logger) {\n\tlogger = l\n}", "func SetLogger(l Logger) {\n\tlogger = l\n}", "func SetLogger(l Logger) {\n\tlogger = l\n}", "func SetLogger(l Logger) {\n\tlogger = l\n}", "func SetLogger(l LeveledLogger) {\n\tlogger = l\n}", "func GetLogger() Logger {\n\treturn log\n}", "func UseLogger(logger dex.Logger) {\n\tlog = logger\n}", "func UseLogger(logger seelog.LoggerInterface) {\n\tlog = logger\n}", "func GetTestLogger() log.Logger {\n\tlogger := log.New(\"Alarmie\", \"Test Logger\")\n\tlogHandler := log.LvlFilterHandler(log.LvlDebug, log.StdoutHandler)\n\tlogger.SetHandler(logHandler)\n\treturn logger\n}", "func SetLogger(log interface{}) {\n\tvar entry logger.LogEntry\n\n\tswitch log.(type) {\n\tcase *zap.Logger:\n\t\tlog, ok := log.(*zap.Logger)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tentry = logger.NewZap(log)\n\tcase *logrus.Logger:\n\t\tlog, ok := log.(*logrus.Logger)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tentry = logger.NewLogrus(log)\n\tdefault:\n\t\treturn\n\t}\n\n\tLoggerV3 = entry.WithFields(loggerV3Fields)\n}", "func TestLogger_SetHandler(t *testing.T) {\n\tassert := asst.New(t)\n\n\tlogger := NewFunctionLogger(nil)\n\tth := NewTestHandler()\n\tlogger.SetHandler(th)\n\tlogger.Info(\"hello logger\")\n\tassert.True(th.HasLog(InfoLevel, \"hello logger\"))\n\t// TODO: test time ...\n}", "func (suite *PopTestSuite) Logger() *zap.Logger {\n\tif suite.logger == nil {\n\t\tsuite.logger = zaptest.NewLogger(suite.T())\n\t}\n\treturn suite.logger\n}", "func SetLogger(ctx context.Context, logger *logrus.Entry) {\n\tfields := rootlessLog.Data\n\trootlessLog = logger.WithFields(fields)\n}", "func SetLogger(logger *logrus.Logger) {\n\tlog = logger\n}", "func SetLogger(l StdLogger) {\n\tlogger = &prefixLogger{StdLogger: l, prefix: \"zerodt:\"}\n}", "func SetLogger(l Logger) {\n\tif l != nil {\n\t\tlogger = l\n\t}\n}", "func SetLogger(newLogger log.Interface) {\n\tif newLogger == nil {\n\t\tpanic(\"logger can not be nil\")\n\t}\n\tlogger = newLogger\n}", "func SetLogger(l Logger) {\n\tglobal = l\n}", "func SetLogger(Logger *log.Logger) {\n\tlogger = Logger\n}", "func GetLogger() Logger {\n\treturn logger\n}", "func GetLogger() Logger {\n\treturn logger\n}", "func SetLogger(l *logrus.Logger) {\n\tlogger = l\n}", "func SetLogger(l *logrus.Logger) {\n\tlogger = l\n}", "func SetLogger(l Logger) {\n\t_, isDefault := l.(*logger)\n\tif isDefault && oakLogger != nil {\n\t\t// The user set the logger themselves,\n\t\t// don't reset to the default logger\n\t\treturn\n\t}\n\toakLogger = l\n\tError = l.Error\n\tWarn = l.Warn\n\tInfo = l.Info\n\tVerb = l.Verb\n\t// If this logger supports the additional functionality described\n\t// by the FullLogger interface, enable those functions. Otherwise\n\t// they are NOPs. (the default logger supports these functions.)\n\tif fl, ok := l.(FullLogger); ok {\n\t\tfullOakLogger = fl\n\t\tFileWrite = fl.FileWrite\n\t\tGetLogLevel = fl.GetLogLevel\n\t\tSetDebugFilter = fl.SetDebugFilter\n\t\tSetDebugLevel = fl.SetDebugLevel\n\t\tCreateLogFile = fl.CreateLogFile\n\t}\n}", "func SetLogger(logLevelVar string) {\n\tlevel, err := log.ParseLevel(logLevelVar)\n\tif err != nil {\n\t\tlevel = log.InfoLevel\n\t}\n\tlog.SetLevel(level)\n\n\tlog.SetReportCaller(true)\n\tcustomFormatter := new(log.TextFormatter)\n\tcustomFormatter.TimestampFormat = \"2006-01-02 15:04:05\"\n\tcustomFormatter.QuoteEmptyFields = true\n\tcustomFormatter.FullTimestamp = true\n\tcustomFormatter.CallerPrettyfier = func(f *runtime.Frame) (string, string) {\n\t\trepopath := strings.Split(f.File, \"/\")\n\t\tfunction := strings.Replace(f.Function, \"go-pkgdl/\", \"\", -1)\n\t\treturn fmt.Sprintf(\"%s\\t\", function), fmt.Sprintf(\" %s:%d\\t\", repopath[len(repopath)-1], f.Line)\n\t}\n\n\tlog.SetFormatter(customFormatter)\n\tfmt.Println(\"Log level set at \", level)\n}", "func (m *MockAgentManager) Logger() *logrus.Entry {\n\tret := m.ctrl.Call(m, \"Logger\")\n\tret0, _ := ret[0].(*logrus.Entry)\n\treturn ret0\n}", "func useLogger(subsystemID string, logger btclog.Logger) {\n\tif _, ok := subsystemLoggers[subsystemID]; !ok {\n\t\treturn\n\t}\n\tsubsystemLoggers[subsystemID] = logger\n\n\tswitch subsystemID {\n\tcase \"TEST\":\n\t\tlog = logger\n\n\tcase \"BTCD\":\n\t\tbtcdLog = logger\n\t\tbtcdcommander.UseLogger(logger)\n\n\tcase \"REGT\":\n\t\tregTesterLog = logger\n\t\tregtester.UseLogger(logger)\n\t}\n}", "func setTestLogger() *observer.ObservedLogs {\n\tcore, obs := observer.New(zapcore.DebugLevel)\n\tlogger := zap.New(core).WithOptions(\n\t\tzap.AddCaller(),\n\t\tzap.AddCallerSkip(1),\n\t)\n\tUse(logger)\n\treturn obs\n}", "func SetLogger(l Logger) {\n\tglobalLoggerLock.Lock()\n\tdefer globalLoggerLock.Unlock()\n\tglobalLogger.Store(logWrapper{logger: l})\n}", "func SetLogger(log Logger) {\n\tlogLock.Lock()\n\tdefer logLock.Unlock()\n\tlogger = log\n}", "func TestLogger(t *testing.T) log.Logger {\n\tt.Helper()\n\n\tl := log.NewSyncLogger(log.NewLogfmtLogger(os.Stderr))\n\tl = log.WithPrefix(l,\n\t\t\"test\", t.Name(),\n\t\t\"ts\", log.Valuer(testTimestamp),\n\t)\n\n\treturn l\n}", "func SetLogger(_logger *logrus.Logger) {\n\tlogger = _logger\n}", "func SetLogger(logger Logger) {\n\tif logger == nil {\n\t\tlog = noopLogger{}\n\t} else {\n\t\tlog = logger\n\t}\n}", "func (c *FakeZkConn) SetLogger(l zk.Logger) {\n\tc.history.addToHistory(\"SetLogger\", l)\n}", "func (c *Cucumber) Logger() *log.Logger {\n\treturn c.logger\n}", "func (drc *DummyRectificationClient) SetLogger(l logging.LogSink) {\n\tl.Warnf(\"dummy begin\")\n\tdrc.logger = l\n}", "func SetLogger(l LevelledLogger) {\n\tlogger = &logPrefixer{log: l}\n}", "func SetLogger(log interface{}) {\n\tvar entry logger.LogEntry\n\n\tswitch log.(type) {\n\tcase *zap.Logger:\n\t\tlog, ok := log.(*zap.Logger)\n\t\tif !ok {\n\n\t\t\treturn\n\t\t}\n\t\tentry = logger.NewZap(log)\n\tcase *logrus.Logger:\n\t\tlog, ok := log.(*logrus.Logger)\n\t\tif !ok {\n\n\t\t\treturn\n\t\t}\n\t\tentry = logger.NewLogrus(log)\n\tdefault:\n\n\t\treturn\n\t}\n\n\tBuilderLog = entry.WithFields(builderLogFields)\n}", "func SetLogger(l *zap.Logger) *tempLogger {\n\ttmp := &tempLogger{\n\t\tprevious: *(wrappedLogger.zap),\n\t}\n\twrappedLogger.zap = l\n\treturn tmp\n}", "func SetLogger(logger Logger) error {\n\t// currentlog := logger\n\treturn nil\n}", "func SetLogger(customLogger Logger) {\n\tlogger = customLogger\n}", "func SetLogger(logger Logger) {\n\tif logger != nil {\n\t\tdebugLogger = logger\n\t}\n}", "func SetLogger(l *log.Logger) {\n StdOutLogger = l\n}", "func SetLogger(l *zap.Logger) {\n\t_loggerMu.Lock()\n\t_logger = l\n\t_loggerMu.Unlock()\n}", "func UseLogger(\n\tlogger *cl.SubSystem) {\n\tLog = logger\n\tlog = Log.Ch\n}", "func SetLogger(logger log_Logger) {\n\tglobalLoggerLock.Lock()\n\tglobalLogger = logger\n\tglobalLoggerLock.Unlock()\n}", "func setupLogger() {\n\tsl := logrus.New()\n\tsrvLog = sl.WithField(\"context\", \"server\")\n}", "func SetLogger(raisLogger *logger.Logger) {\n\tl = raisLogger\n}", "func GetLogger() *Logger {\n\treturn logger\n}", "func SetLogger(printLog, isSilent bool) {\n\tlog.SetFlags(log.Ldate | log.Ltime | log.Lshortfile)\n\n\tl := &lumberjack.Logger{\n\t\tFilename: path.Join(cfg.LogDir, \"gou.log\"),\n\t\tMaxSize: 5, // megabytes\n\t\tMaxBackups: 10,\n\t\tMaxAge: 28, //days\n\t}\n\tswitch {\n\tcase isSilent:\n\t\tfmt.Println(\"logging is discarded\")\n\t\tlog.SetOutput(ioutil.Discard)\n\tcase printLog:\n\t\tfmt.Println(\"outputs logs to stdout and \", cfg.LogDir)\n\t\tm := io.MultiWriter(os.Stdout, l)\n\t\tlog.SetOutput(m)\n\tdefault:\n\t\tfmt.Println(\"output logs to \", cfg.LogDir)\n\t\tlog.SetOutput(l)\n\t}\n}", "func SetLogger(logger *logrus.Entry) {\n\tfields := monitorLog.Data\n\tmonitorLog = logger.WithFields(fields)\n}", "func SetLogger(logger internal.Logging) {\n\tinternal.Logger = logger\n}", "func SetLogger() {\n\tlogFile = handleLogFile()\n\tcfg := config.GetConf()\n\tl := logrus.New()\n\tl.SetFormatter(&logrus.TextFormatter{})\n\tl.SetReportCaller(true)\n\tl.SetLevel(parseLevel(cfg.Global.LogLevel))\n\tl.SetOutput(logFile)\n\tlogHandle = l\n\tlogEntry = logrus.NewEntry(logHandle)\n}", "func GetLogger() *logs.BeeLogger {\n\treturn log\n}", "func SetLogger(logger logur.Logger) {\n\tlogger = logur.WithField(logger, \"component\", \"mysql\")\n\n\t_ = mysql.SetLogger(logur.NewErrorPrintLogger(logger))\n}", "func SetLogger(externalLog interface{}) {\n\tswitch externalLog.(type) {\n\tcase *zap.Logger:\n\t\tlog, ok := externalLog.(*zap.Logger)\n\t\tif !ok {\n\n\t\t\treturn\n\t\t}\n\t\tLog = NewZap(log)\n\tcase *logrus.Logger:\n\t\tlog, ok := externalLog.(*logrus.Logger)\n\t\tif !ok {\n\n\t\t\treturn\n\t\t}\n\t\tLog = NewLogrus(log)\n\tdefault:\n\n\t\treturn\n\t}\n}", "func SetLogger(logger *xlog.Logger) {\n\tlogger = logger.Named(ecode.ModClientGrpc).WithOptions(zap.AddCallerSkip(defaultCallerSkip))\n\tgrpclog.SetLoggerV2(&loggerWrapper{logger: logger, sugar: logger.Sugar()})\n}", "func GetLogger() common.Logger {\n\treturn logger\n}", "func TestDefaultLog(t *testing.T) {\n\n\tlogrus.SetLevel(logrus.TraceLevel)\n\n\tlog.GetLogger(\"default\").WithField(\"test\", \"DefaultLog\").Info(\"Hello\")\n\tlog.GetLogger(\"default\").WithField(\"test\", \"DefaultLog\").Debug(\"Hello\")\n\tlog.GetLogger(\"default\").WithField(\"test\", \"DefaultLog\").Trace(\"Hello\")\n\tlog.GetLogger(\"default\").WithField(\"test\", \"DefaultLog\").Warning(\"Hello\")\n\tlog.GetLogger(\"default\").WithField(\"test\", \"DefaultLog\").Error(\"Hello\")\n}", "func (cm *CertMan) Logger(logger logger) {\n\tcm.log = logger\n}", "func SetLogger(logger logur.Logger) error {\n\tif logger == nil {\n\t\treturn errors.New(\"logger is nil\")\n\t}\n\n\terrLog = logur.WithField(logger, \"component\", \"mongodb\")\n\n\treturn nil\n}", "func UseLogger(backend *logs.Backend, level logs.Level) {\n\tlog = backend.Logger(logSubsytem)\n\tlog.SetLevel(level)\n\tspawn = panics.GoroutineWrapperFunc(log)\n}", "func SetLogger(logger logger) {\n\tif logger != nil {\n\t\tglobalLogger = &mgoLogger{\n\t\t\tlogger: logger,\n\t\t}\n\t}\n}", "func LoggerOf(id string) *Logger {\n if id == \"\" {\n id = default_id\n }\n if _, ok := loggers[default_id]; !ok {\n nlogger := &Logger{\n logger: log.New(os.Stdout, LEVELS[DEBUG], log.Lshortfile|log.LstdFlags),\n level: DEBUG,\n id: default_id,\n }\n loggers[default_id] = nlogger\n }\n if _, ok := loggers[id]; !ok {\n loggers[default_id].Fatalf(\"logger %s not exist.\", id)\n }\n return loggers[id]\n}", "func SetLogger(customLogger log.StdLogger) {\n\tlog.SetLogger(customLogger)\n}", "func (s *StanLogger) SetLogger(log Logger, logtime, debug, trace bool, logfile string) {\n\ts.mu.Lock()\n\ts.log = log\n\ts.ltime = logtime\n\ts.debug = debug\n\ts.trace = trace\n\ts.lfile = logfile\n\ts.mu.Unlock()\n}", "func SetLogger(cmp *mcmp.Component, l *Logger) {\n\tcmp.SetValue(cmpKeyLogger, l)\n\n\t// If the base Logger on this Component gets changed, then the cached Logger\n\t// from From on this Component, and all of its Children, ought to be reset,\n\t// so that any changes can be reflected in their loggers.\n\tvar resetFromLogger func(*mcmp.Component)\n\tresetFromLogger = func(cmp *mcmp.Component) {\n\t\tcmp.SetValue(cmpKeyCachedLogger, nil)\n\t\tfor _, childCmp := range cmp.Children() {\n\t\t\tresetFromLogger(childCmp)\n\t\t}\n\t}\n\tresetFromLogger(cmp)\n}" ]
[ "0.73071015", "0.7217698", "0.71225667", "0.7067874", "0.70242816", "0.7004462", "0.6998388", "0.69833803", "0.6974619", "0.69734454", "0.6970742", "0.69570327", "0.6933845", "0.69240355", "0.6911089", "0.69096345", "0.69042265", "0.6903018", "0.6900997", "0.6900997", "0.69007397", "0.6897347", "0.6876597", "0.68712884", "0.6858687", "0.6852122", "0.6849424", "0.6847987", "0.6847987", "0.6826522", "0.68262815", "0.6823956", "0.68083227", "0.6806886", "0.6806886", "0.6806886", "0.6806886", "0.6804022", "0.6801171", "0.6771606", "0.67691284", "0.6762576", "0.6748946", "0.6748868", "0.67440933", "0.67387295", "0.67323726", "0.6714103", "0.6712281", "0.6710823", "0.669885", "0.6677042", "0.66548693", "0.66548693", "0.6654145", "0.6654145", "0.664519", "0.6644889", "0.66371816", "0.6631844", "0.6623448", "0.66207314", "0.6620346", "0.6614409", "0.66139466", "0.66119254", "0.6609438", "0.6595646", "0.6595075", "0.658757", "0.6577606", "0.6568392", "0.6564004", "0.6557927", "0.65517473", "0.6542558", "0.65420055", "0.65348464", "0.65285426", "0.65247816", "0.6524235", "0.6516797", "0.65152895", "0.6505884", "0.6488356", "0.6486338", "0.64785624", "0.6470667", "0.6468508", "0.6451683", "0.64435023", "0.6434093", "0.64312106", "0.64311147", "0.6422109", "0.64141965", "0.64125675", "0.639557", "0.63950795", "0.63933134" ]
0.75092244
0
Test that With returns an error if there is no registered pattern matching the resource
func TestServiceWith_WithoutMatchingPattern(t *testing.T) { runTest(t, func(s *res.Service) { s.Handle("collection", res.GetResource(func(r res.GetRequest) { r.NotFound() })) }, func(s *restest.Session) { err := s.Service().With("test.model", func(r res.Resource) {}) if err == nil { t.Errorf("expected With to return an error, but it didn't") } }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Router) Resource(pattern string, resource Resource) {\n\tsub := r.Group(pattern)\n\n\tif usesRes, ok := resource.(ResourceUses); ok {\n\t\tif len(usesRes.Uses()) > 0 {\n\t\t\tsub.Use(usesRes.Uses()...)\n\t\t}\n\t}\n\n\tfor _, m := range allowedHTTPMethods {\n\t\tif hfn, ok := isHandlerFuncInResource(m, resource); ok {\n\t\t\ts := sub.Subrouter()\n\t\t\tif mws, ok := isMiddlewareInResource(m, resource); ok {\n\t\t\t\ts.Use(mws()...)\n\t\t\t}\n\t\t\ts.HandleFunc(m, \"/\", hfn)\n\t\t}\n\t}\n}", "func MatchPattern(logger logr.Logger, resource, pattern interface{}) error {\n\t// newAnchorMap - to check anchor key has values\n\tac := anchor.NewAnchorMap()\n\telemPath, err := validateResourceElement(logger, resource, pattern, pattern, \"/\", ac)\n\tif err != nil {\n\t\tif skip(err) {\n\t\t\tlogger.V(2).Info(\"resource skipped\", \"reason\", ac.AnchorError.Error())\n\t\t\treturn &PatternError{err, \"\", true}\n\t\t}\n\n\t\tif fail(err) {\n\t\t\tlogger.V(2).Info(\"failed to apply rule on resource\", \"msg\", ac.AnchorError.Error())\n\t\t\treturn &PatternError{err, elemPath, false}\n\t\t}\n\n\t\t// check if an anchor defined in the policy rule is missing in the resource\n\t\tif ac.KeysAreMissing() {\n\t\t\tlogger.V(3).Info(\"missing anchor in resource\")\n\t\t\treturn &PatternError{err, \"\", false}\n\t\t}\n\n\t\treturn &PatternError{err, elemPath, false}\n\t}\n\n\treturn nil\n}", "func checkResource(config interface{}, resource *unstructured.Unstructured) (bool, error) {\n\n\t// we are checking if config is a subset of resource with default pattern\n\tpath, err := validateResourceWithPattern(resource.Object, config)\n\tif err != nil {\n\t\tglog.V(4).Infof(\"config not a subset of resource. failed at path %s: %v\", path, err)\n\t\treturn false, err\n\t}\n\treturn true, nil\n}", "func (rest *TestResourceREST) TestFailRegisterResourceNonServiceAccount() {\n\tsa := account.Identity{\n\t\tUsername: \"unknown-account\",\n\t}\n\tservice, controller := rest.SecuredController(sa)\n\n\tresourceDescription := \"Resource description\"\n\tresourceID := \"\"\n\tresourceScopes := []string{}\n\n\tresourceOwnerID := rest.testIdentity.ID\n\n\tpayload := &app.RegisterResourcePayload{\n\t\tDescription: &resourceDescription,\n\t\tName: \"My new resource\",\n\t\tParentResourceID: nil,\n\t\tResourceScopes: resourceScopes,\n\t\tResourceID: &resourceID,\n\t\tResourceOwnerID: resourceOwnerID.String(),\n\t\tType: \"Area\",\n\t}\n\n\ttest.RegisterResourceUnauthorized(rest.T(), service.Context, service, controller, payload)\n}", "func ValidResource(api *kit.API, lookupOrgByResourceID func(context.Context, influxdb.ID) (influxdb.ID, error)) kit.Middleware {\n\treturn func(next http.Handler) http.Handler {\n\t\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\t\tstatusW := kit.NewStatusResponseWriter(w)\n\t\t\tid, err := influxdb.IDFromString(chi.URLParam(r, \"id\"))\n\t\t\tif err != nil {\n\t\t\t\tapi.Err(w, ErrCorruptID(err))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tctx := r.Context()\n\n\t\t\torgID, err := lookupOrgByResourceID(ctx, *id)\n\t\t\tif err != nil {\n\t\t\t\tapi.Err(w, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnext.ServeHTTP(statusW, r.WithContext(context.WithValue(ctx, ctxOrgKey, orgID)))\n\t\t}\n\t\treturn http.HandlerFunc(fn)\n\t}\n}", "func TestMatchesWithMalformedPatterns(t *testing.T) {\n\tmatches, err := Matches(\"/any/path/there\", []string{\"[\"})\n\tif err == nil {\n\t\tt.Fatal(\"Should have failed because of a malformed syntax in the pattern\")\n\t}\n\tif matches {\n\t\tt.Fatalf(\"Should not have match anything\")\n\t}\n}", "func TestDROStructuralValidatorMemberNotFound(t *testing.T) {\n\tvalidator := NewDROStructuralValidator(newMockRepository(nil))\n\tobj := testObjectResource([]string{\"NotfindableID\"})\n\terr := validator.ValidateResource(obj)\n\tassert.NotNil(t, err)\n}", "func TestResourcesCtrlFindByHostname(t *testing.T) {\n\ta := assert.New(t)\n\tr := require.New(t)\n\tparams := utils.NewFakeParamsGetter()\n\trender := utils.NewFakeRender()\n\tinter := &resourcesCtrlResourcesInter{}\n\trecorder := httptest.NewRecorder()\n\tctrl := NewResourcesCtrl(inter, render, params, nil)\n\tresourceOut := &models.Resource{}\n\n\t// No error, a resource is returned\n\tctrl.FindByHostname(recorder, utils.FakeRequest(\"GET\", \"http://foo.bar/resources/host.com\", nil))\n\tr.Equal(200, render.Status)\n\terr := json.NewDecoder(recorder.Body).Decode(resourceOut)\n\tr.NoError(err)\n\ta.NotNil(resourceOut)\n\tutils.Clear(params, render, recorder)\n\n\t// The interactor returns a database error\n\tinter.errDB = true\n\tctrl.FindByHostname(recorder, utils.FakeRequest(\"GET\", \"http://foo.bar/resources/host.com\", nil))\n\tr.Equal(500, render.Status)\n\tr.NotEmpty(recorder.Body.Bytes())\n\tr.NotNil(render.APIError)\n\ta.IsType(errs.API.Internal, render.APIError)\n\tutils.Clear(params, render, recorder)\n\n\t// Resource not found\n\tinter.errDB = false\n\tinter.errNotFound = true\n\tctrl.FindByHostname(recorder, utils.FakeRequest(\"GET\", \"http://foo.bar/resources/host.com\", nil))\n\tr.Equal(404, render.Status)\n\tr.NotEmpty(recorder.Body.Bytes())\n\tr.NotNil(render.APIError)\n\ta.IsType(errs.API.NotFound, render.APIError)\n\tutils.Clear(params, render, recorder)\n}", "func TestLookupEndpointPanicsOnInvalidType(t *testing.T) {\n\tdefer func() {\n\t\tif r := recover(); r == nil {\n\t\t\tt.Error(\"lookupEndpoint should panic if an invalid conntype is specified.\")\n\t\t}\n\t}()\n\tlookupEndpoint(context.Background(), nil, nil, jid.JID{}, \"wssorbashorsomething\")\n}", "func TestMatchesWithNoPatterns(t *testing.T) {\n\tmatches, err := Matches(\"/any/path/there\", []string{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif matches {\n\t\tt.Fatalf(\"Should not have match anything\")\n\t}\n}", "func TestDiscoveryResourceGate(t *testing.T) {\n\tresources := map[string][]metav1.APIResource{\n\t\t\"allLegacy\": {\n\t\t\t{Name: \"clusterpolicies\", Kind: \"ClusterPolicies\"},\n\t\t\t{Name: \"clusterpolicybindings\", Kind: \"ClusterPolicyBindings\"},\n\t\t\t{Name: \"policies\", Kind: \"Policies\"},\n\t\t\t{Name: \"policybindings\", Kind: \"PolicyBindings\"},\n\t\t\t{Name: \"foo\", Kind: \"Foo\"},\n\t\t},\n\t\t\"partialLegacy\": {\n\t\t\t{Name: \"clusterpolicies\", Kind: \"ClusterPolicies\"},\n\t\t\t{Name: \"clusterpolicybindings\", Kind: \"ClusterPolicyBindings\"},\n\t\t\t{Name: \"foo\", Kind: \"Foo\"},\n\t\t},\n\t\t\"noLegacy\": {\n\t\t\t{Name: \"foo\", Kind: \"Foo\"},\n\t\t\t{Name: \"bar\", Kind: \"Bar\"},\n\t\t},\n\t}\n\n\tlegacyTests := map[string]struct {\n\t\texistingResources *metav1.APIResourceList\n\t\texpectErrStr string\n\t}{\n\t\t\"scheme-legacy-all-supported\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: authorization.LegacySchemeGroupVersion.String(),\n\t\t\t\tAPIResources: resources[\"allLegacy\"],\n\t\t\t},\n\t\t\texpectErrStr: \"\",\n\t\t},\n\t\t\"scheme-legacy-some-supported\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: authorization.LegacySchemeGroupVersion.String(),\n\t\t\t\tAPIResources: resources[\"partialLegacy\"],\n\t\t\t},\n\t\t\texpectErrStr: \"the server does not support legacy policy resources\",\n\t\t},\n\t\t\"scheme-legacy-none-supported\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: authorization.LegacySchemeGroupVersion.String(),\n\t\t\t\tAPIResources: resources[\"noLegacy\"],\n\t\t\t},\n\t\t\texpectErrStr: \"the server does not support legacy policy resources\",\n\t\t},\n\t\t\"scheme-all-supported\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: authorization.SchemeGroupVersion.String(),\n\t\t\t\tAPIResources: resources[\"allLegacy\"],\n\t\t\t},\n\t\t\texpectErrStr: \"\",\n\t\t},\n\t\t\"scheme-some-supported\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: authorization.SchemeGroupVersion.String(),\n\t\t\t\tAPIResources: resources[\"partialLegacy\"],\n\t\t\t},\n\t\t\texpectErrStr: \"the server does not support legacy policy resources\",\n\t\t},\n\t\t\"scheme-none-supported\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: authorization.SchemeGroupVersion.String(),\n\t\t\t\tAPIResources: resources[\"noLegacy\"],\n\t\t\t},\n\t\t\texpectErrStr: \"the server does not support legacy policy resources\",\n\t\t},\n\t}\n\n\tdiscoveryTests := map[string]struct {\n\t\texistingResources *metav1.APIResourceList\n\t\tinputGVR []schema.GroupVersionResource\n\t\texpectedGVR []schema.GroupVersionResource\n\t\texpectedAll bool\n\t}{\n\t\t\"discovery-subset\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: \"v1\",\n\t\t\t\tAPIResources: resources[\"noLegacy\"],\n\t\t\t},\n\t\t\tinputGVR: []schema.GroupVersionResource{\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"foo\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"bar\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"noexist\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedGVR: []schema.GroupVersionResource{\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"foo\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"discovery-none\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: \"v1\",\n\t\t\t\tAPIResources: resources[\"noLegacy\"],\n\t\t\t},\n\t\t\tinputGVR: []schema.GroupVersionResource{\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"noexist\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedGVR: []schema.GroupVersionResource{},\n\t\t},\n\t\t\"discovery-all\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: \"v1\",\n\t\t\t\tAPIResources: resources[\"noLegacy\"],\n\t\t\t},\n\t\t\tinputGVR: []schema.GroupVersionResource{\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"foo\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedGVR: []schema.GroupVersionResource{\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"foo\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedAll: true,\n\t\t},\n\t}\n\n\tfor tcName, tc := range discoveryTests {\n\t\tfunc() {\n\t\t\tserver := testServer(t, tc.existingResources)\n\t\t\tdefer server.Close()\n\t\t\tclient := discovery.NewDiscoveryClientForConfigOrDie(&restclient.Config{Host: server.URL})\n\n\t\t\tgot, all, err := DiscoverGroupVersionResources(client, tc.inputGVR...)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"myerr %s\", err.Error())\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tc.expectedGVR) {\n\t\t\t\tt.Fatalf(\"%s got %v, expected %v\", tcName, got, tc.expectedGVR)\n\t\t\t}\n\t\t\tif tc.expectedAll && !all {\n\t\t\t\tt.Fatalf(\"%s expected all\", tcName)\n\t\t\t}\n\t\t}()\n\t}\n\n\tfor tcName, tc := range legacyTests {\n\t\tfunc() {\n\t\t\tserver := testServer(t, tc.existingResources)\n\t\t\tdefer server.Close()\n\t\t\tclient := discovery.NewDiscoveryClientForConfigOrDie(&restclient.Config{Host: server.URL})\n\n\t\t\terr := LegacyPolicyResourceGate(client)\n\t\t\tif err != nil {\n\t\t\t\tif len(tc.expectErrStr) == 0 {\n\t\t\t\t\tt.Fatalf(\"%s unexpected err %s\\n\", tcName, err.Error())\n\t\t\t\t}\n\t\t\t\tif tc.expectErrStr != err.Error() {\n\t\t\t\t\tt.Fatalf(\"%s expected err %s, got %s\", tcName, tc.expectErrStr, err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err == nil && len(tc.expectErrStr) != 0 {\n\t\t\t\tt.Fatalf(\"%s expected err %s, got none\\n\", tcName, tc.expectErrStr)\n\t\t\t}\n\t\t}()\n\t}\n}", "func Register(pa Pattern) error {\n\tif err := IsValid(pa); err != nil {\n\t\treturn err\n\t}\n\tsupportedPatterns[pa.String()] = pa\n\n\treturn nil\n}", "func TestRestTesterInvalidPathVariable(t *testing.T) {\n\tconst dbName = \"dbname\"\n\trt := NewRestTester(t, &RestTesterConfig{\n\t\tDatabaseConfig: &DatabaseConfig{\n\t\t\tDbConfig: DbConfig{\n\t\t\t\tName: dbName,\n\t\t\t},\n\t\t},\n\t})\n\tdefer rt.Close()\n\n\turi, err := rt.templateResource(\"/foo/{{.invalid}}/bar\")\n\tassert.Errorf(t, err, \"Expected error for invalid path variable\")\n\tassert.Equalf(t, \"\", uri, \"Expected empty URI for invalid path variable\")\n\tassert.NotContainsf(t, uri, \"<no value>\", \"Expected URI to not contain \\\"<no value>\\\" for invalid path variable\")\n\n\turi, err = rt.templateResource(\"/foo/{{.db}}/bar\")\n\tassert.NoError(t, err)\n\tassert.Equalf(t, \"/foo/\"+dbName+\"/bar\", uri, \"Expected valid URI for valid path variable\")\n}", "func TestRegisterInvalidRoute(t *testing.T) {\n\t// create gearbox instance\n\tgb := new(gearbox)\n\tgb.registeredRoutes = make([]*routeInfo, 0)\n\n\t// test handler is nil\n\tif err := gb.registerRoute(MethodGet, \"invalid Path\", emptyHandler); err == nil {\n\t\tt.Errorf(\"input GET invalid Path find nil expecting error\")\n\t}\n}", "func TestAnySecurityRequirementMet(t *testing.T) {\n\t// Create of a map of scheme names and whether they are valid\n\tschemes := map[string]bool{\n\t\t\"a\": true,\n\t\t\"b\": true,\n\t\t\"c\": false,\n\t\t\"d\": false,\n\t}\n\n\t// Create the test cases\n\ttc := []struct {\n\t\tname string\n\t\tschemes []string\n\t\terror bool\n\t}{\n\t\t{\n\t\t\tname: \"/ok1\",\n\t\t\tschemes: []string{\"a\", \"b\"},\n\t\t\terror: false,\n\t\t},\n\t\t{\n\t\t\tname: \"/ok2\",\n\t\t\tschemes: []string{\"a\", \"c\"},\n\t\t\terror: false,\n\t\t},\n\t\t{\n\t\t\tname: \"/error\",\n\t\t\tschemes: []string{\"c\", \"d\"},\n\t\t\terror: true,\n\t\t},\n\t}\n\n\tdoc := openapi3.T{\n\t\tOpenAPI: \"3.0.0\",\n\t\tInfo: &openapi3.Info{\n\t\t\tTitle: \"MyAPI\",\n\t\t\tVersion: \"0.1\",\n\t\t},\n\t\tPaths: map[string]*openapi3.PathItem{},\n\t\tComponents: &openapi3.Components{\n\t\t\tSecuritySchemes: map[string]*openapi3.SecuritySchemeRef{},\n\t\t},\n\t}\n\n\t// Add the security schemes to the spec's components\n\tfor schemeName := range schemes {\n\t\tdoc.Components.SecuritySchemes[schemeName] = &openapi3.SecuritySchemeRef{\n\t\t\tValue: &openapi3.SecurityScheme{\n\t\t\t\tType: \"http\",\n\t\t\t\tScheme: \"basic\",\n\t\t\t},\n\t\t}\n\t}\n\n\t// Add the paths to the spec\n\tfor _, tc := range tc {\n\t\t// Create the security requirements from the test cases's schemes\n\t\tsecurityRequirements := openapi3.NewSecurityRequirements()\n\t\tfor _, scheme := range tc.schemes {\n\t\t\tsecurityRequirements.With(openapi3.SecurityRequirement{scheme: {}})\n\t\t}\n\n\t\t// Create the path with the security requirements\n\t\tdoc.Paths[tc.name] = &openapi3.PathItem{\n\t\t\tGet: &openapi3.Operation{\n\t\t\t\tSecurity: securityRequirements,\n\t\t\t\tResponses: openapi3.NewResponses(),\n\t\t\t},\n\t\t}\n\t}\n\n\terr := doc.Validate(context.Background())\n\trequire.NoError(t, err)\n\trouter, err := legacyrouter.NewRouter(&doc)\n\trequire.NoError(t, err)\n\n\t// Create the authentication function\n\tauthFunc := makeAuthFunc(schemes)\n\n\tfor _, tc := range tc {\n\t\t// Create the request input for the path\n\t\ttcURL, err := url.Parse(tc.name)\n\t\trequire.NoError(t, err)\n\t\thttpReq := httptest.NewRequest(http.MethodGet, tcURL.String(), nil)\n\t\troute, _, err := router.FindRoute(httpReq)\n\t\trequire.NoError(t, err)\n\t\treq := RequestValidationInput{\n\t\t\tRoute: route,\n\t\t\tOptions: &Options{\n\t\t\t\tAuthenticationFunc: authFunc,\n\t\t\t},\n\t\t}\n\n\t\t// Validate the security requirements\n\t\terr = ValidateSecurityRequirements(context.Background(), &req, *route.Operation.Security)\n\n\t\t// If there should have been an error\n\t\tif tc.error {\n\t\t\trequire.Errorf(t, err, \"an error is expected for path %q\", tc.name)\n\t\t} else {\n\t\t\trequire.NoErrorf(t, err, \"an error wasn't expected for path %q\", tc.name)\n\t\t}\n\t}\n}", "func (s *State) ensureResource(address, provider, typ string, skipped bool) {\n\tif _, ok := s.Resources[address]; !ok {\n\t\tres := Resource{\n\t\t\tProvider: provider,\n\t\t\tType: typ,\n\t\t\tSkipped: skipped,\n\t\t}\n\n\t\tif !skipped {\n\t\t\tres.Components = make(map[string]Component)\n\t\t}\n\n\t\ts.Resources[address] = res\n\t}\n}", "func (rest *TestResourceREST) TestFailRegisterResourceInvalidParentResource() {\n\tresourceDescription := \"Resource description\"\n\tresourceID := \"\"\n\tresourceScopes := []string{}\n\n\tresourceOwnerID := rest.testIdentity.ID\n\tparentResourceID := uuid.NewV4().String()\n\n\tpayload := &app.RegisterResourcePayload{\n\t\tDescription: &resourceDescription,\n\t\tName: \"My new resource\",\n\t\tParentResourceID: &parentResourceID,\n\t\tResourceScopes: resourceScopes,\n\t\tResourceID: &resourceID,\n\t\tResourceOwnerID: resourceOwnerID.String(),\n\t\tType: \"Area\",\n\t}\n\n\ttest.RegisterResourceBadRequest(rest.T(), rest.service.Context, rest.service, rest.securedController, payload)\n}", "func Use(o ResourceOpener, input string) (err error) {\n\tvar res Resource\n\tfor res, err = o(); err != nil; res, err = o() {\n\t\tif _, ok := err.(TransientError); !ok {\n\t\t\treturn\n\t\t}\n\t}\n\n\tdefer func() {\n\t\tif rec := recover(); rec != nil {\n\t\t\tif ferr, ok := rec.(FrobError); ok {\n\t\t\t\tres.Defrob(ferr.defrobTag)\n\t\t\t}\n\t\t\terr = rec.(error)\n\t\t}\n\t\tres.Close()\n\t}()\n\tres.Frob(input)\n\n\treturn\n}", "func validateResourceElement(log logr.Logger, resourceElement, patternElement, originPattern interface{}, path string, ac *anchor.AnchorMap) (string, error) {\n\tswitch typedPatternElement := patternElement.(type) {\n\t// map\n\tcase map[string]interface{}:\n\t\ttypedResourceElement, ok := resourceElement.(map[string]interface{})\n\t\tif !ok {\n\t\t\tlog.V(4).Info(\"Pattern and resource have different structures.\", \"path\", path, \"expected\", fmt.Sprintf(\"%T\", patternElement), \"current\", fmt.Sprintf(\"%T\", resourceElement))\n\t\t\treturn path, fmt.Errorf(\"pattern and resource have different structures. Path: %s. Expected %T, found %T\", path, patternElement, resourceElement)\n\t\t}\n\t\t// CheckAnchorInResource - check anchor key exists in resource and update the AnchorKey fields.\n\t\tac.CheckAnchorInResource(typedPatternElement, typedResourceElement)\n\t\treturn validateMap(log, typedResourceElement, typedPatternElement, originPattern, path, ac)\n\t// array\n\tcase []interface{}:\n\t\ttypedResourceElement, ok := resourceElement.([]interface{})\n\t\tif !ok {\n\t\t\tlog.V(4).Info(\"Pattern and resource have different structures.\", \"path\", path, \"expected\", fmt.Sprintf(\"%T\", patternElement), \"current\", fmt.Sprintf(\"%T\", resourceElement))\n\t\t\treturn path, fmt.Errorf(\"validation rule failed at path %s, resource does not satisfy the expected overlay pattern\", path)\n\t\t}\n\t\treturn validateArray(log, typedResourceElement, typedPatternElement, originPattern, path, ac)\n\t// elementary values\n\tcase string, float64, int, int64, bool, nil:\n\t\t/*Analyze pattern */\n\n\t\tswitch resource := resourceElement.(type) {\n\t\tcase []interface{}:\n\t\t\tfor _, res := range resource {\n\t\t\t\tif !pattern.Validate(log, res, patternElement) {\n\t\t\t\t\treturn path, fmt.Errorf(\"resource value '%v' does not match '%v' at path %s\", resourceElement, patternElement, path)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn \"\", nil\n\t\tdefault:\n\t\t\tif !pattern.Validate(log, resourceElement, patternElement) {\n\t\t\t\treturn path, fmt.Errorf(\"resource value '%v' does not match '%v' at path %s\", resourceElement, patternElement, path)\n\t\t\t}\n\t\t}\n\n\tdefault:\n\t\tlog.V(4).Info(\"Pattern contains unknown type\", \"path\", path, \"current\", fmt.Sprintf(\"%T\", patternElement))\n\t\treturn path, fmt.Errorf(\"failed at '%s', pattern contains unknown type\", path)\n\t}\n\treturn \"\", nil\n}", "func Use(ro ResourceOpener, input string) (err error) {\n\n\tvar r Resource\n\n\tfor {\n\t\tr, err = ro()\n\n\t\t// we should retry on transient errors\n\t\tif _, ok := err.(TransientError); ok == true {\n\t\t\tcontinue\n\t\t}\n\n\t\t// all other errors should be returned\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// if we don't have an error, break out of the\n\t\t// retry loop\n\t\tbreak\n\t}\n\n\t// if we've opened the resource, close it\n\tdefer r.Close()\n\n\t// if something triggers a panic, handle it\n\tdefer func(r Resource) {\n\t\tif pr := recover(); pr != nil {\n\n\t\t\t// we need to Defrob on FrobErrors, whatever frobbing may be\n\t\t\tif e, ok := pr.(FrobError); ok == true {\n\t\t\t\tr.Defrob(e.defrobTag)\n\t\t\t\terr = e\n\t\t\t}\n\n\t\t\t// overwite the return value\n\t\t\terr = errors.New(\"meh\")\n\t\t}\n\t}(r)\n\n\tr.Frob(input)\n\treturn err\n}", "func registerEndpoint(pattern string, methods []string, fn unboundEndpoint) {\n\tif endpoints == nil {\n\t\tendpoints = make(map[string]unboundEndpoint)\n\t}\n\tif endpoints[pattern] != nil || allowedMethods[pattern] != nil {\n\t\tpanic(fmt.Errorf(\"Pattern %q is already registered\", pattern))\n\t}\n\n\tendpoints[pattern] = fn\n\tallowedMethods[pattern] = methods\n}", "func TestAuthResource_WithInvalidRID_CausesPanic(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\trestest.AssertPanicNoRecover(t, func() {\n\t\t\t\tr.Resource(\"test..foo\")\n\t\t\t})\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\ts.Auth(\"test.model\", \"method\", nil).\n\t\t\tResponse().\n\t\t\tAssertErrorCode(res.CodeInternalError)\n\t})\n}", "func TestLookupHostMetaPanicsOnInvalidType(t *testing.T) {\n\tdefer func() {\n\t\tif r := recover(); r == nil {\n\t\t\tt.Error(\"lookupHostMeta should panic if an invalid conntype is specified.\")\n\t\t}\n\t}()\n\tlookupHostMeta(context.Background(), nil, \"name\", \"wssorbashorsomething\")\n}", "func (obj *match) HasPattern() bool {\n\treturn obj.pattern != \"\"\n}", "func TestResourceReqFailures(t *testing.T) {\n\t// create a resource manager instance\n\trm := newRsrcmgr(t, t.Name())\n\n\trsrc := &rproto.Resource{\n\t\tResourceType: \"type1\",\n\t\tResourceKind: rproto.ResourceKind_Scalar,\n\t\tScalar: &rproto.ScalarResource{\n\t\t\tTotalResource: 10,\n\t\t\tAvailableResource: 10,\n\t\t},\n\t}\n\n\t// add some providers\n\terr := rm.AddProvider(&rproto.ResourceProvide{Resource: rsrc, ProviderID: \"prov1\"})\n\tAssertOk(t, err, \"AddProvider failed\")\n\terr = rm.AddProvider(&rproto.ResourceProvide{Resource: rsrc, ProviderID: \"prov2\"})\n\tAssertOk(t, err, \"AddProvider failed\")\n\n\t// try to allocate more resource than any provider can provide\n\treq := &rproto.ResourceRequest{\n\t\tResourceType: \"type1\",\n\t\tAllocType: rproto.AllocType_Any,\n\t\tScheduler: \"leastUsed\",\n\t\tQuantity: 11,\n\t\tConsumerID: \"consumer1\",\n\t\tConstraints: &rproto.ResourceConstraints{},\n\t}\n\t_, err = rm.RequestResource(req)\n\tAssert(t, (err != nil), \"resource req succeeded while expecting to fail\", err)\n\treq.Quantity = 4\n\n\t// try to allocate an invalid resource type\n\treq.ResourceType = \"type2\"\n\t_, err = rm.RequestResource(req)\n\tAssert(t, (err != nil), \"invalid resource type resource req succeeded while expecting to fail\", err)\n\treq.ResourceType = \"type1\"\n\n\tconsumer := rproto.ResourceConsumer{\n\t\tConsumerID: \"consumer1\",\n\t\tProviderID: \"prov1\",\n\t\tResourceType: \"type1\",\n\t\tValues: []uint64{1},\n\t}\n\t// try some invalid resource releases\n\terr = rm.ReleaseResource(nil)\n\tAssert(t, (err != nil), \"resource release succeeded while expecting to fail\", err)\n\tconsumer.ResourceType = \"type2\"\n\terr = rm.ReleaseResource(&consumer)\n\tAssert(t, (err != nil), \"resource release succeeded while expecting to fail\", err)\n\tconsumer.ResourceType = \"type1\"\n\tconsumer.ProviderID = \"prov5\"\n\terr = rm.ReleaseResource(&consumer)\n\tAssert(t, (err != nil), \"resource release succeeded while expecting to fail\", err)\n\tconsumer.ProviderID = \"prov1\"\n\n\t// test idempotent release requests\n\terr = rm.ReleaseResource(&consumer)\n\tAssertOk(t, err, \"Idempotent release request failed\")\n\n\t// test a request with empty constraints\n\t_, err = rm.RequestResource(req)\n\tAssertOk(t, err, \"request with empty constraints failed\")\n\n}", "func TestManifestAPI_Get_UnknownSchema(t *testing.T) {}", "func match(r *manifest.RawResource) bool {\n\treturn (r.Kind == Kind && r.Type == Type) ||\n\t\t(r.Kind == Kind && r.Type == \"\")\n}", "func ResourceExists(dc discovery.DiscoveryInterface, apiGroupVersion, kind string) (bool, error) {\n\n\t_, apiLists, err := dc.ServerGroupsAndResources()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tfor _, apiList := range apiLists {\n\t\tif apiList.GroupVersion == apiGroupVersion {\n\t\t\tfor _, r := range apiList.APIResources {\n\t\t\t\tif r.Kind == kind {\n\t\t\t\t\treturn true, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false, nil\n}", "func ResourceExists(dc discovery.DiscoveryInterface, apiGroupVersion, kind string) (bool, error) {\n\n\t_, apiLists, err := dc.ServerGroupsAndResources()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tfor _, apiList := range apiLists {\n\t\tif apiList.GroupVersion == apiGroupVersion {\n\t\t\tfor _, r := range apiList.APIResources {\n\t\t\t\tif r.Kind == kind {\n\t\t\t\t\treturn true, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false, nil\n}", "func init() {\n\tvar matcher defaultMatcher\n\tRegister(\"default\", matcher)\n}", "func (err *RuleNotSatisfied) RuleNotSatisfied() {}", "func (w *Strap) checkResource(req *Request) (*Resource, error) {\n\n\tfor key, r := range w.resources {\n\t\tfor _, rr := range *r {\n\t\t\tif key == req.Name && rr.Method == strings.ToUpper(req.Method) {\n\t\t\t\treturn rr, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, errors.New(\"Invalid resource or method\")\n}", "func newPattern(s string, h Handler) (*pattern, error) {\n\n\tslice := strings.Split(s, \"/\")\n\tp := pattern{\n\t\tsource: s,\n\t\thandler: h,\n\t\tslice: slice,\n\t\tvalMap: nil,\n\t}\n\n\tif err := p.check(); err != nil {\n\t\treturn nil, xerrors.Errorf(\"check error: %w\", err)\n\t}\n\n\treturn &p, nil\n}", "func ServeHandlePatternNotMatch(w http.ResponseWriter, r *http.Request) {\n\thttp.Error(w, \"Please, use 0-9 A-Z a-z _ for handle.\", http.StatusBadRequest)\n}", "func testResource(t *testing.T, s *Service) {\n\tp := &model.ArgRes{\n\t\tResID: 1233,\n\t}\n\tres, err := s.Resource(context.TODO(), p)\n\tif err != nil {\n\t\tt.Logf(\"testResource error(%v) \\n\", err)\n\t\treturn\n\t}\n\tt.Logf(\"testResource res: %+v \\n\", res)\n}", "func notSupported(w http.ResponseWriter, r *http.Request, body []byte, creds auth.Creds, vars map[string]string, req_id string) {\n\tglog.Warningf(\"Docker pattern not accepted, URI=%s\", r.RequestURI)\n\tNoEndpointHandler(w, r)\n}", "func TestAllSchemesMet(t *testing.T) {\n\t// Create of a map of scheme names and whether they are met\n\tschemes := map[string]bool{\n\t\t\"a\": true,\n\t\t\"b\": true,\n\t\t\"c\": false,\n\t}\n\n\t// Create the test cases\n\ttc := []struct {\n\t\tname string\n\t\terror bool\n\t}{\n\t\t{\n\t\t\tname: \"/ok\",\n\t\t\terror: false,\n\t\t},\n\t\t{\n\t\t\tname: \"/error\",\n\t\t\terror: true,\n\t\t},\n\t}\n\n\tdoc := openapi3.T{\n\t\tOpenAPI: \"3.0.0\",\n\t\tInfo: &openapi3.Info{\n\t\t\tTitle: \"MyAPI\",\n\t\t\tVersion: \"0.1\",\n\t\t},\n\t\tPaths: map[string]*openapi3.PathItem{},\n\t\tComponents: &openapi3.Components{\n\t\t\tSecuritySchemes: map[string]*openapi3.SecuritySchemeRef{},\n\t\t},\n\t}\n\n\t// Add the security schemes to the spec's components\n\tfor schemeName := range schemes {\n\t\tdoc.Components.SecuritySchemes[schemeName] = &openapi3.SecuritySchemeRef{\n\t\t\tValue: &openapi3.SecurityScheme{\n\t\t\t\tType: \"http\",\n\t\t\t\tScheme: \"basic\",\n\t\t\t},\n\t\t}\n\t}\n\n\t// Add the paths to the spec\n\tfor _, tc := range tc {\n\t\t// Create the security requirement for the path\n\t\tsecurityRequirement := openapi3.SecurityRequirement{}\n\t\tfor scheme, valid := range schemes {\n\t\t\t// If the scheme is valid or the test case is meant to return an error\n\t\t\tif valid || tc.error {\n\t\t\t\t// Add the scheme to the security requirement\n\t\t\t\tsecurityRequirement[scheme] = []string{}\n\t\t\t}\n\t\t}\n\n\t\tdoc.Paths[tc.name] = &openapi3.PathItem{\n\t\t\tGet: &openapi3.Operation{\n\t\t\t\tSecurity: &openapi3.SecurityRequirements{\n\t\t\t\t\tsecurityRequirement,\n\t\t\t\t},\n\t\t\t\tResponses: openapi3.NewResponses(),\n\t\t\t},\n\t\t}\n\t}\n\n\terr := doc.Validate(context.Background())\n\trequire.NoError(t, err)\n\trouter, err := legacyrouter.NewRouter(&doc)\n\trequire.NoError(t, err)\n\n\t// Create the authentication function\n\tauthFunc := makeAuthFunc(schemes)\n\n\tfor _, tc := range tc {\n\t\t// Create the request input for the path\n\t\ttcURL, err := url.Parse(tc.name)\n\t\trequire.NoError(t, err)\n\t\thttpReq := httptest.NewRequest(http.MethodGet, tcURL.String(), nil)\n\t\troute, _, err := router.FindRoute(httpReq)\n\t\trequire.NoError(t, err)\n\t\treq := RequestValidationInput{\n\t\t\tRoute: route,\n\t\t\tOptions: &Options{\n\t\t\t\tAuthenticationFunc: authFunc,\n\t\t\t},\n\t\t}\n\n\t\t// Validate the security requirements\n\t\terr = ValidateSecurityRequirements(context.Background(), &req, *route.Operation.Security)\n\n\t\t// If there should have been an error\n\t\tif tc.error {\n\t\t\trequire.Error(t, err)\n\t\t} else {\n\t\t\trequire.NoError(t, err)\n\t\t}\n\t}\n}", "func abortOnMatch(*rawDescriptor) error { return errAbort }", "func testGroupResourceExists(n string, group *ScimGroup, t *testing.T) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\t// find the corresponding state object\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\t// retrieve the configured client from the test setup\n\t\tconn := common.CommonEnvironmentClient()\n\t\tresp, err := NewGroupsAPI(conn).Read(rs.Primary.ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// If no error, assign the response Widget attribute to the widget pointer\n\t\t*group = resp\n\t\treturn nil\n\t}\n}", "func TestAuthResource_WithValidRID_SendsResourceResponse(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.Resource(\"test.foo\")\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\ts.Auth(\"test.model\", \"method\", nil).\n\t\t\tResponse().\n\t\t\tAssertResource(\"test.foo\")\n\t})\n}", "func (s *RateLimitSuite) TestSpecIsOK(c *C) {\n\tc.Assert(plugin.NewRegistry().AddSpec(GetSpec()), IsNil)\n}", "func (s *RateLimitSuite) TestSpecIsOK(c *C) {\n\tc.Assert(plugin.NewRegistry().AddSpec(GetSpec()), IsNil)\n}", "func (m *Resource) IsOK() bool {\n\tswitch {\n\tcase len(m.name) == 0:\n\t\treturn false\n\tcase len(m.description) == 0:\n\t\treturn false\n\tcase m.schema == nil:\n\t\treturn false\n\tcase m.model == nil:\n\t\treturn false\n\tcase m.store == nil:\n\t\treturn false\n\tcase len(m.methods) == 0:\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}", "func Use(o ResourceOpener, s string) (err error) {\n\tres, err := o()\n\tfor err != nil {\n\t\tif _, ok := err.(TransientError); ok {\n\t\t\tres, err = o()\n\t\t\tcontinue\n\t\t}\n\t\treturn err\n\t}\n\tdefer res.Close()\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tswitch e := r.(type) {\n\t\t\tcase FrobError:\n\t\t\t\tres.Defrob(e.defrobTag)\n\t\t\t\terr = e\n\t\t\tcase error:\n\t\t\t\terr = e\n\t\t\tdefault:\n\t\t\t\terr = fmt.Errorf(\"%v\", r)\n\t\t\t}\n\t\t}\n\t}()\n\n\tres.Frob(s)\n\treturn nil\n}", "func (m *Builder) MustAddPattern(name, pattern string) {\n\terr := m.AddPattern(name, pattern)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func checkResourceCreate(ctx context.Context, payloadResourceName string, get getResource) error {\n\t// Try to retrieve the resource by name.\n\t_, err := get(ctx, payloadResourceName)\n\n\t// If no error, then the resource already exists and cannot be created.\n\tif err == nil {\n\t\treturn trace.AlreadyExists(\"resource with name %q already exists\", payloadResourceName)\n\t}\n\n\t// If the error is not found, then the resource does not exist and can be created.\n\tif trace.IsNotFound(err) {\n\t\treturn nil\n\t}\n\n\treturn trace.Wrap(err)\n}", "func (eg *exampleGenerator) generateValidatedPatternExample() interface{} {\n\tif !eg.hasPatternValidation() {\n\t\treturn false\n\t}\n\tpattern := eg.a.Validation.Pattern\n\texample, err := regen.Generate(pattern)\n\tif err != nil {\n\t\treturn eg.r.faker.Name()\n\t}\n\treturn example\n}", "func isResourceNotFound(err error) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\tif strings.Contains(err.Error(), \"not found\") {\n\t\treturn true\n\t}\n\treturn false\n}", "func buildResourceRegexp(s string) (*regexp.Regexp, error) {\n\thash := strings.Split(s, \":\")\n\tfor i, v := range hash {\n\t\tif v == \"\" || v == \"*\" {\n\t\t\thash[i] = \".*\"\n\t\t}\n\t}\n\treturn regexp.Compile(strings.Join(hash, \":\"))\n}", "func TestRegisterRoute(t *testing.T) {\n\t// test cases\n\ttests := []struct {\n\t\tmethod string\n\t\tpath string\n\t\thandler func(*fasthttp.RequestCtx)\n\t\tisErr bool\n\t}{\n\t\t{method: MethodPut, path: \"/admin/welcome\", handler: emptyHandler, isErr: false},\n\t\t{method: MethodPost, path: \"/user/add\", handler: emptyHandler, isErr: false},\n\t\t{method: MethodGet, path: \"/account/get\", handler: emptyHandler, isErr: false},\n\t\t{method: MethodGet, path: \"/account/*\", handler: emptyHandler, isErr: false},\n\t\t{method: MethodDelete, path: \"/account/delete\", handler: emptyHandler, isErr: false},\n\t\t{method: MethodDelete, path: \"/account/delete\", handler: nil, isErr: true},\n\t\t{method: MethodGet, path: \"/account/*/getAccount\", handler: nil, isErr: true},\n\t}\n\n\t// create gearbox instance\n\tgb := new(gearbox)\n\tgb.registeredRoutes = make([]*routeInfo, 0)\n\n\t// counter for valid routes\n\tvalidCounter := 0\n\n\tfor _, tt := range tests {\n\t\terr := gb.registerRoute(tt.method, tt.path, tt.handler)\n\t\tif (err != nil && !tt.isErr) || (err == nil && tt.isErr) {\n\t\t\terrMsg := \"\"\n\n\t\t\t// get error message if there is\n\t\t\tif err != nil {\n\t\t\t\terrMsg = err.Error()\n\t\t\t}\n\n\t\t\tt.Errorf(\"input %v find error %t %s expecting error %t\", tt, err == nil, errMsg, tt.isErr)\n\t\t}\n\n\t\tif !tt.isErr {\n\t\t\tvalidCounter++\n\t\t}\n\t}\n\n\t// check valid counter is the same as count of registered routes\n\tcurrentCount := len(gb.registeredRoutes)\n\tif validCounter != currentCount {\n\t\tt.Errorf(\"input %d find %d expecting %d\", validCounter, currentCount, validCounter)\n\t}\n}", "func Test_pattern_manager_setpatterns3(t *testing.T) {\n\n\tpolicyPath := \"/tmp/servedpatterntest/\"\n\tmyorg1 := \"myorg1\"\n\tmyorg2 := \"myorg2\"\n\tpattern1 := \"pattern1\"\n\tpattern2 := \"pattern2\"\n\n\tservedPatterns1 := map[string]exchange.ServedPattern{\n\t\t\"myorg1_pattern1\": {\n\t\t\tOrg: myorg1,\n\t\t\tPattern: pattern1,\n\t\t},\n\t\t\"myorg1_pattern2\": {\n\t\t\tOrg: myorg1,\n\t\t\tPattern: pattern2,\n\t\t},\n\t\t\"myorg2_pattern2\": {\n\t\t\tOrg: myorg2,\n\t\t\tPattern: pattern2,\n\t\t},\n\t}\n\n\tservedPatterns2 := map[string]exchange.ServedPattern{\n\t\t\"myorg2_pattern1\": {\n\t\t\tOrg: myorg2,\n\t\t\tPattern: pattern1,\n\t\t},\n\t\t\"myorg2_pattern2\": {\n\t\t\tOrg: myorg2,\n\t\t\tPattern: pattern2,\n\t\t},\n\t}\n\n\tdefinedPatterns1 := map[string]exchange.Pattern{\n\t\t\"myorg1/pattern1\": exchange.Pattern{\n\t\t\tLabel: \"label\",\n\t\t\tDescription: \"description\",\n\t\t\tPublic: false,\n\t\t\tWorkloads: []exchange.WorkloadReference{\n\t\t\t\t{\n\t\t\t\t\tWorkloadURL: \"http://mydomain.com/workload/test1\",\n\t\t\t\t\tWorkloadOrg: \"testorg\",\n\t\t\t\t\tWorkloadArch: \"amd64\",\n\t\t\t\t\tWorkloadVersions: []exchange.WorkloadChoice{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tVersion: \"1.0.0\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tAgreementProtocols: []exchange.AgreementProtocol{\n\t\t\t\t{Name: \"Basic\"},\n\t\t\t},\n\t\t},\n\t\t\"myorg1/pattern2\": exchange.Pattern{\n\t\t\tLabel: \"label\",\n\t\t\tDescription: \"description\",\n\t\t\tPublic: false,\n\t\t\tWorkloads: []exchange.WorkloadReference{\n\t\t\t\t{\n\t\t\t\t\tWorkloadURL: \"http://mydomain.com/workload/test1\",\n\t\t\t\t\tWorkloadOrg: \"testorg\",\n\t\t\t\t\tWorkloadArch: \"amd64\",\n\t\t\t\t\tWorkloadVersions: []exchange.WorkloadChoice{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tVersion: \"2.0.0\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tAgreementProtocols: []exchange.AgreementProtocol{\n\t\t\t\t{Name: \"Basic\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tdefinedPatterns2 := map[string]exchange.Pattern{\n\t\t\"myorg2/pattern1\": exchange.Pattern{\n\t\t\tLabel: \"label\",\n\t\t\tDescription: \"description\",\n\t\t\tPublic: false,\n\t\t\tWorkloads: []exchange.WorkloadReference{\n\t\t\t\t{\n\t\t\t\t\tWorkloadURL: \"http://mydomain.com/workload/test2\",\n\t\t\t\t\tWorkloadOrg: \"testorg\",\n\t\t\t\t\tWorkloadArch: \"amd64\",\n\t\t\t\t\tWorkloadVersions: []exchange.WorkloadChoice{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tVersion: \"1.4.0\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tAgreementProtocols: []exchange.AgreementProtocol{\n\t\t\t\t{Name: \"Basic\"},\n\t\t\t},\n\t\t},\n\t\t\"myorg2/pattern2\": exchange.Pattern{\n\t\t\tLabel: \"label\",\n\t\t\tDescription: \"description\",\n\t\t\tPublic: false,\n\t\t\tWorkloads: []exchange.WorkloadReference{\n\t\t\t\t{\n\t\t\t\t\tWorkloadURL: \"http://mydomain.com/workload/test2\",\n\t\t\t\t\tWorkloadOrg: \"testorg\",\n\t\t\t\t\tWorkloadArch: \"amd64\",\n\t\t\t\t\tWorkloadVersions: []exchange.WorkloadChoice{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tVersion: \"1.5.0\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tAgreementProtocols: []exchange.AgreementProtocol{\n\t\t\t\t{Name: \"Basic\"},\n\t\t\t},\n\t\t},\n\t}\n\n\t// setup test\n\tif err := cleanTestDir(policyPath); err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\n\t// run test\n\tif np := NewPatternManager(); np == nil {\n\t\tt.Errorf(\"Error: pattern manager not created\")\n\t} else if err := np.SetCurrentPatterns(servedPatterns1, policyPath); err != nil {\n\t\tt.Errorf(\"Error %v consuming served patterns %v\", err, servedPatterns1)\n\t} else if err := np.UpdatePatternPolicies(myorg1, definedPatterns1, policyPath); err != nil {\n\t\tt.Errorf(\"Error: error updating pattern policies, %v\", err)\n\t} else if err := np.UpdatePatternPolicies(myorg2, definedPatterns2, policyPath); err != nil {\n\t\tt.Errorf(\"Error: error updating pattern policies, %v\", err)\n\t} else if len(np.OrgPatterns) != 2 {\n\t\tt.Errorf(\"Error: should have 2 orgs in the PatternManager, have %v\", len(np.OrgPatterns))\n\t} else if !np.hasOrg(myorg1) {\n\t\tt.Errorf(\"Error: PM should have org %v but doesnt, has %v\", myorg1, np)\n\t} else if err := getPatternEntryFiles(np.OrgPatterns[myorg1][pattern1].PolicyFileNames); err != nil {\n\t\tt.Errorf(\"Error getting pattern entry files for %v %v, %v\", myorg1, pattern1, err)\n\t} else if err := getPatternEntryFiles(np.OrgPatterns[myorg1][pattern2].PolicyFileNames); err != nil {\n\t\tt.Errorf(\"Error getting pattern entry files for %v %v, %v\", myorg1, pattern2, err)\n\t} else if err := getPatternEntryFiles(np.OrgPatterns[myorg2][pattern2].PolicyFileNames); err != nil {\n\t\tt.Errorf(\"Error getting pattern entry files for %v %v, %v\", myorg2, pattern2, err)\n\t} else if err := np.SetCurrentPatterns(servedPatterns2, policyPath); err != nil {\n\t\tt.Errorf(\"Error %v consuming served patterns %v\", err, servedPatterns2)\n\t} else if err := np.UpdatePatternPolicies(myorg2, definedPatterns2, policyPath); err != nil {\n\t\tt.Errorf(\"Error: error updating pattern policies, %v\", err)\n\t} else if len(np.OrgPatterns) != 1 {\n\t\tt.Errorf(\"Error: should have 1 org in the PatternManager, have %v\", len(np.OrgPatterns))\n\t} else if !np.hasOrg(myorg2) {\n\t\tt.Errorf(\"Error: PM should have org %v but doesnt, has %v\", myorg2, np)\n\t} else if np.hasOrg(myorg1) {\n\t\tt.Errorf(\"Error: PM should NOT have org %v but does %v\", myorg1, np)\n\t} else if err := getPatternEntryFiles(np.OrgPatterns[myorg2][pattern1].PolicyFileNames); err != nil {\n\t\tt.Errorf(\"Error getting pattern entry files for %v %v, %v\", myorg2, pattern1, err)\n\t} else if err := getPatternEntryFiles(np.OrgPatterns[myorg2][pattern2].PolicyFileNames); err != nil {\n\t\tt.Errorf(\"Error getting pattern entry files for %v %v, %v\", myorg2, pattern2, err)\n\t} else if files, err := getPolicyFiles(policyPath + myorg1); err != nil {\n\t\tt.Errorf(err.Error())\n\t} else if len(files) != 0 {\n\t\tt.Errorf(\"Error: found policy files for %v, %v\", myorg1, files)\n\t} else {\n\t\tt.Log(np)\n\t}\n\n}", "func (p *fsPath) AsPattern() *Pattern {\n\tif anew, ok := p.TryPattern(); !ok {\n\t\tpanic(\"newPattern: Match returned an ErrBadPattern-error!\")\n\t} else {\n\t\treturn anew\n\t}\n}", "func Test_NotFound(t *testing.T) {\n\tvar (\n\t\tnotFoundMsg ErrorMessage\n\t\tresp *http.Response\n\t)\n\n\tsvc := NewService()\n\tts := httptest.NewServer(svc.NewRouter(\"*\"))\n\tdefer ts.Close()\n\n\treq, _ := http.NewRequest(\"GET\", ts.URL+\"/not_found\", nil)\n\n\toutputLog := helpers.CaptureOutput(func() {\n\t\tresp, _ = http.DefaultClient.Do(req)\n\t})\n\n\tif got, want := resp.StatusCode, http.StatusNotFound; got != want {\n\t\tt.Fatalf(\"Invalid status code, got %d but want %d\", got, want)\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Got an error when reading body: %s\", err.Error())\n\t}\n\terr = json.Unmarshal(data, &notFoundMsg)\n\tif err != nil {\n\t\tt.Fatalf(\"Got an error when parsing json: %s\", err.Error())\n\t}\n\tif got, want := notFoundMsg.Code, http.StatusNotFound; got != want {\n\t\tt.Fatalf(\"Wrong code return, got %d but want %d\", got, want)\n\t}\n\tif got, want := notFoundMsg.Message, \"Not Found\"; got != want {\n\t\tt.Fatalf(\"Wrong message return, got %s but want %s\", got, want)\n\t}\n\n\tmatched, err := regexp.MatchString(`uri=/not_found `, outputLog)\n\tif matched != true || err != nil {\n\t\tt.Fatalf(\"request is not logged :\\n%s\", outputLog)\n\t}\n}", "func (nse ErrNoSuchEndpoint) NotFound() {}", "func (uee *UnknownEndpointError) NotFound() {}", "func getSpecRefsForPattern(patName string,\n\tpatOrg string,\n\tdevId string,\n\tdevToken string,\n\tgetPatterns exchange.PatternHandler,\n\tresolveWorkload exchange.WorkloadResolverHandler,\n\tdb *bolt.DB,\n\tconfig *config.HorizonConfig, checkWorkloadConfig bool) (*policy.APISpecList, error) {\n\n\tglog.V(5).Infof(apiLogString(fmt.Sprintf(\"getSpecRefsForPattern %v org %v. Check workload config: %v\", patName, patOrg, checkWorkloadConfig)))\n\n\t// Get the pattern definition from the exchange. There should only be one pattern returned in the map.\n\tpattern, err := getPatterns(patOrg, patName, devId, devToken)\n\tif err != nil {\n\t\treturn nil, NewSystemError(fmt.Sprintf(\"Unable to read pattern object %v from exchange, error %v\", patName, err))\n\t} else if len(pattern) != 1 {\n\t\treturn nil, NewSystemError(fmt.Sprintf(\"Expected only 1 pattern from exchange, received %v\", len(pattern)))\n\t}\n\n\t// Get the pattern definition that we need to analyze.\n\tpatId := fmt.Sprintf(\"%v/%v\", patOrg, patName)\n\tpatternDef, ok := pattern[patId]\n\tif !ok {\n\t\treturn nil, NewSystemError(fmt.Sprintf(\"Expected pattern id not found in GET pattern response: %v\", pattern))\n\t}\n\n\tglog.V(5).Infof(apiLogString(fmt.Sprintf(\"working with pattern definition %v\", patternDef)))\n\n\t// For each workload in the pattern, resolve the workload to a list of required microservices.\n\tcompleteAPISpecList := new(policy.APISpecList)\n\tthisArch := cutil.ArchString()\n\tfor _, workload := range patternDef.Workloads {\n\n\t\t// Ignore workloads that don't match this node's hardware architecture.\n\t\tif workload.WorkloadArch != thisArch && config.ArchSynonyms.GetCanonicalArch(workload.WorkloadArch) != thisArch {\n\t\t\tglog.Infof(apiLogString(fmt.Sprintf(\"skipping workload because it is for a different hardware architecture, this node is %v. Skipped workload is: %v\", thisArch, workload)))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Each workload in the pattern can specify rollback workload versions, so to get a fully qualified workload URL,\n\t\t// we need to iterate each workload choice to grab the version.\n\t\tfor _, workloadChoice := range workload.WorkloadVersions {\n\t\t\t_, workloadDef, err := resolveWorkload(workload.WorkloadURL, workload.WorkloadOrg, workloadChoice.Version, workload.WorkloadArch, devId, devToken)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, NewSystemError(fmt.Sprintf(\"Error resolving workload %v %v %v %v, error %v\", workload.WorkloadURL, workload.WorkloadOrg, workloadChoice.Version, thisArch, err))\n\t\t\t}\n\n\t\t\tif checkWorkloadConfig {\n\t\t\t\t// The workload might have variables that need to be configured. If so, find all relevant workloadconfig objects to make sure\n\t\t\t\t// there is a workload config available.\n\t\t\t\tif present, err := workloadConfigPresent(workloadDef, workload.WorkloadURL, workloadChoice.Version, db); err != nil {\n\t\t\t\t\treturn nil, NewSystemError(fmt.Sprintf(\"Error checking workload config, error %v\", err))\n\t\t\t\t} else if !present {\n\t\t\t\t\treturn nil, NewMSMissingVariableConfigError(fmt.Sprintf(\"Workload config for %v %v is missing\", workload.WorkloadURL, workloadChoice.Version), \"configstate.state\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// get the ms references from the workload, the version here is a version range.\n\t\t\tapiSpecList := new(policy.APISpecList)\n\t\t\tfor _, apiSpec := range workloadDef.APISpecs {\n\t\t\t\tif apiSpec.Arch != thisArch && config.ArchSynonyms.GetCanonicalArch(apiSpec.Arch) != thisArch {\n\t\t\t\t\treturn nil, NewSystemError(fmt.Sprintf(\"The referenced microservice %v by workload %v has the hardware architecture that is not supported by this node.\", apiSpec, workload.WorkloadURL))\n\t\t\t\t}\n\t\t\t\tnewAPISpec := policy.APISpecification_Factory(apiSpec.SpecRef, apiSpec.Org, apiSpec.Version, apiSpec.Arch)\n\t\t\t\t(*apiSpecList) = append((*apiSpecList), (*newAPISpec))\n\t\t\t}\n\n\t\t\t// MergeWith will omit exact duplicates when merging the 2 lists.\n\t\t\t(*completeAPISpecList) = completeAPISpecList.MergeWith(apiSpecList)\n\t\t}\n\n\t}\n\n\t// If the pattern search doesnt find any microservices then there is a problem.\n\tif len(*completeAPISpecList) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// for now, anax only allow one microservice version, so we need to get the common version range for each microservice.\n\tcommon_apispec_list, err := completeAPISpecList.GetCommonVersionRanges()\n\tif err != nil {\n\t\treturn nil, NewAPIUserInputError(fmt.Sprintf(\"Error resolving the common version ranges for the referenced microservices for %v %v. %v\", patId, thisArch, err), \"configstate.state\")\n\t}\n\tglog.V(5).Infof(apiLogString(fmt.Sprintf(\"getSpecRefsForPattern resolved microservice version ranges to %v\", *common_apispec_list)))\n\n\treturn common_apispec_list, nil\n}", "func TestValidation(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tinput string\n\t\texpectedError error\n\t}{\n\t\t{\n\t\t\t\"when no OpenAPI property is supplied\",\n\t\t\t`\ninfo:\n title: \"Hello World REST APIs\"\n version: \"1.0\"\npaths:\n \"/api/v2/greetings.json\":\n get:\n operationId: listGreetings\n responses:\n 200:\n description: \"List different greetings\"\n \"/api/v2/greetings/{id}.json\":\n parameters:\n - name: id\n in: path\n required: true\n schema:\n type: string\n example: \"greeting\"\n get:\n operationId: showGreeting\n responses:\n 200:\n description: \"Get a single greeting object\"\n`,\n\t\t\terrors.New(\"value of openapi must be a non-empty JSON string\"),\n\t\t},\n\t\t{\n\t\t\t\"when an empty OpenAPI property is supplied\",\n\t\t\t`\nopenapi: ''\ninfo:\n title: \"Hello World REST APIs\"\n version: \"1.0\"\npaths:\n \"/api/v2/greetings.json\":\n get:\n operationId: listGreetings\n responses:\n 200:\n description: \"List different greetings\"\n \"/api/v2/greetings/{id}.json\":\n parameters:\n - name: id\n in: path\n required: true\n schema:\n type: string\n example: \"greeting\"\n get:\n operationId: showGreeting\n responses:\n 200:\n description: \"Get a single greeting object\"\n`,\n\t\t\terrors.New(\"value of openapi must be a non-empty JSON string\"),\n\t\t},\n\t\t{\n\t\t\t\"when the Info property is not supplied\",\n\t\t\t`\nopenapi: '1.0'\npaths:\n \"/api/v2/greetings.json\":\n get:\n operationId: listGreetings\n responses:\n 200:\n description: \"List different greetings\"\n \"/api/v2/greetings/{id}.json\":\n parameters:\n - name: id\n in: path\n required: true\n schema:\n type: string\n example: \"greeting\"\n get:\n operationId: showGreeting\n responses:\n 200:\n description: \"Get a single greeting object\"\n`,\n\t\t\terrors.New(\"invalid info: must be a JSON object\"),\n\t\t},\n\t\t{\n\t\t\t\"when the Paths property is not supplied\",\n\t\t\t`\nopenapi: '1.0'\ninfo:\n title: \"Hello World REST APIs\"\n version: \"1.0\"\n`,\n\t\t\terrors.New(\"invalid paths: must be a JSON object\"),\n\t\t},\n\t\t{\n\t\t\t\"when a valid spec is supplied\",\n\t\t\t`\nopenapi: 3.0.2\ninfo:\n title: \"Hello World REST APIs\"\n version: \"1.0\"\npaths:\n \"/api/v2/greetings.json\":\n get:\n operationId: listGreetings\n responses:\n 200:\n description: \"List different greetings\"\n \"/api/v2/greetings/{id}.json\":\n parameters:\n - name: id\n in: path\n required: true\n schema:\n type: string\n example: \"greeting\"\n get:\n operationId: showGreeting\n responses:\n 200:\n description: \"Get a single greeting object\"\ncomponents:\n schemas:\n GreetingObject:\n properties:\n id:\n type: string\n type:\n type: string\n default: \"greeting\"\n attributes:\n properties:\n description:\n type: string\n`,\n\t\t\tnil,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tdoc := &openapi3.Swagger{}\n\t\t\terr := yaml.Unmarshal([]byte(test.input), &doc)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tc := context.Background()\n\t\t\tvalidationErr := doc.Validate(c)\n\n\t\t\trequire.Equal(t, test.expectedError, validationErr, \"expected errors (or lack of) to match\")\n\t\t})\n\t}\n}", "func (e NotFound) IsNotFound() {}", "func Test_pattern_manager_setpatterns0(t *testing.T) {\n\n\tpolicyPath := \"/tmp/servedpatterntest/\"\n\tservedPatterns := map[string]exchange.ServedPattern{}\n\n\tif np := NewPatternManager(); np == nil {\n\t\tt.Errorf(\"Error: pattern manager not created\")\n\t} else if err := np.SetCurrentPatterns(servedPatterns, policyPath); err != nil {\n\t\tt.Errorf(\"Error %v consuming served patterns %v\", err, servedPatterns)\n\t} else if len(np.OrgPatterns) != 0 {\n\t\tt.Errorf(\"Error: should have 0 org in the PatternManager, have %v\", len(np.OrgPatterns))\n\t} else {\n\t\tt.Log(np)\n\t}\n\n}", "func TestResourcesCtrlCreate(t *testing.T) {\n\ta := assert.New(t)\n\tr := require.New(t)\n\tparams := utils.NewFakeParamsGetter()\n\trender := utils.NewFakeRender()\n\tinter := &resourcesCtrlResourcesInter{}\n\tvalid := &resourcesCtrlResourcesValid{}\n\trecorder := httptest.NewRecorder()\n\tctrl := NewResourcesCtrl(inter, render, params, valid)\n\tresourceIn := &models.Resource{}\n\tresourceOut := &models.Resource{}\n\n\tvalid.errValid = true\n\n\t// Validation error\n\tctrl.Create(recorder, utils.FakeRequest(\"POST\", \"http://foo.bar/resources\", resourceIn))\n\tr.Equal(422, render.Status)\n\tr.NotEmpty(recorder.Body.Bytes())\n\tr.NotNil(render.APIError)\n\ta.IsType(errs.API.Validation, render.APIError)\n\tutils.Clear(params, render, recorder)\n\n\tvalid.errValid = false\n\n\t// No error, one resource is created\n\tctrl.Create(recorder, utils.FakeRequest(\"POST\", \"http://foo.bar/resources\", resourceIn))\n\tr.Equal(201, render.Status)\n\terr := json.NewDecoder(recorder.Body).Decode(resourceOut)\n\tr.NoError(err)\n\ta.NotNil(resourceOut)\n\tutils.Clear(params, render, recorder)\n\n\t// Null body decoding error\n\tctrl.Create(recorder, utils.FakeRequestRaw(\"POST\", \"http://foo.bar/resources\", nil))\n\tr.Equal(400, render.Status)\n\tr.NotEmpty(recorder.Body.Bytes())\n\tr.NotNil(render.APIError)\n\ta.IsType(errs.API.BodyDecoding, render.APIError)\n\tutils.Clear(params, render, recorder)\n\n\t// Body decoding error\n\tctrl.Create(recorder, utils.FakeRequestRaw(\"POST\", \"http://foo.bar/resources\", []byte{'{'}))\n\tr.Equal(400, render.Status)\n\tr.NotEmpty(recorder.Body.Bytes())\n\tr.NotNil(render.APIError)\n\ta.IsType(errs.API.BodyDecoding, render.APIError)\n\tutils.Clear(params, render, recorder)\n\n\t// The interactor returns a database error\n\tinter.errDB = true\n\tctrl.Create(recorder, utils.FakeRequest(\"POST\", \"http://foo.bar/resources\", resourceIn))\n\tr.Equal(500, render.Status)\n\tr.NotEmpty(recorder.Body.Bytes())\n\tr.NotNil(render.APIError)\n\ta.IsType(errs.API.Internal, render.APIError)\n\tutils.Clear(params, render, recorder)\n}", "func registerResourceMitigation(request *libcoap.Pdu, typ reflect.Type, controller controllers.ControllerInterface, session *libcoap.Session,\n context *libcoap.Context, is_unknown bool) (interface{}, string, error) {\n\n hex := hex.Dump(request.Data)\n if request.Code == libcoap.RequestPut && !strings.Contains(hex, string(libcoap.IETF_MITIGATION_SCOPE_HEX)) {\n return nil, \"\", errors.New(\"Body data MUST be mitigation request\")\n }\n body, err := messages.UnmarshalCbor(request, reflect.TypeOf(messages.MitigationRequest{}))\n if err != nil {\n return nil, \"\", err\n }\n\n var resourcePath string\n\n // Create sub resource to handle observation on behalf of Unknown resource in case of mitigation PUT\n if is_unknown && request.Code == libcoap.RequestPut {\n p := request.PathString()\n resourcePath = p\n r := libcoap.ResourceInit(&p, 0)\n r.TurnOnResourceObservable()\n r.RegisterHandler(libcoap.RequestGet, toMethodHandler(controller.HandleGet, typ, controller, !is_unknown))\n r.RegisterHandler(libcoap.RequestPut, toMethodHandler(controller.HandlePut, typ, controller, !is_unknown))\n r.RegisterHandler(libcoap.RequestPost, toMethodHandler(controller.HandlePost, typ, controller, !is_unknown))\n r.RegisterHandler(libcoap.RequestDelete, toMethodHandler(controller.HandleDelete, typ, controller, !is_unknown))\n context.AddResource(r)\n log.Debugf(\"Create sub resource to handle observation later : uri-path=%+v\", p)\n // Create sub resource for handle get all with observe option\n pa := strings.Split(p, \"/mid\")\n if len(pa) > 1 {\n resourceAll := context.GetResourceByQuery(&pa[0])\n if resourceAll == nil {\n ra := libcoap.ResourceInit(&pa[0], 0)\n ra.TurnOnResourceObservable()\n ra.RegisterHandler(libcoap.RequestGet, toMethodHandler(controller.HandleGet, typ, controller, !is_unknown))\n context.AddResource(ra)\n log.Debugf(\"Create observer in sub-resource with query: %+v\", pa[0])\n }\n }\n }\n return body, resourcePath, nil\n}", "func Test_pattern_manager_setpatterns2(t *testing.T) {\n\n\tpolicyPath := \"/tmp/servedpatterntest/\"\n\tmyorg1 := \"myorg1\"\n\tmyorg2 := \"myorg2\"\n\tpattern1 := \"pattern1\"\n\tpattern2 := \"pattern2\"\n\n\tservedPatterns1 := map[string]exchange.ServedPattern{\n\t\t\"myorg1_pattern1\": {\n\t\t\tOrg: myorg1,\n\t\t\tPattern: pattern1,\n\t\t},\n\t}\n\n\tservedPatterns2 := map[string]exchange.ServedPattern{\n\t\t\"myorg2_pattern2\": {\n\t\t\tOrg: myorg2,\n\t\t\tPattern: pattern2,\n\t\t},\n\t}\n\n\tdefinedPatterns1 := map[string]exchange.Pattern{\n\t\t\"myorg1/pattern1\": exchange.Pattern{\n\t\t\tLabel: \"label\",\n\t\t\tDescription: \"description\",\n\t\t\tPublic: false,\n\t\t\tWorkloads: []exchange.WorkloadReference{\n\t\t\t\t{\n\t\t\t\t\tWorkloadURL: \"http://mydomain.com/workload/test1\",\n\t\t\t\t\tWorkloadOrg: \"testorg\",\n\t\t\t\t\tWorkloadArch: \"amd64\",\n\t\t\t\t\tWorkloadVersions: []exchange.WorkloadChoice{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tVersion: \"1.0.0\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tAgreementProtocols: []exchange.AgreementProtocol{\n\t\t\t\t{Name: \"Basic\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tdefinedPatterns2 := map[string]exchange.Pattern{\n\t\t\"myorg2/pattern2\": exchange.Pattern{\n\t\t\tLabel: \"label\",\n\t\t\tDescription: \"description\",\n\t\t\tPublic: false,\n\t\t\tWorkloads: []exchange.WorkloadReference{\n\t\t\t\t{\n\t\t\t\t\tWorkloadURL: \"http://mydomain.com/workload/test2\",\n\t\t\t\t\tWorkloadOrg: \"testorg\",\n\t\t\t\t\tWorkloadArch: \"amd64\",\n\t\t\t\t\tWorkloadVersions: []exchange.WorkloadChoice{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tVersion: \"1.5.0\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tAgreementProtocols: []exchange.AgreementProtocol{\n\t\t\t\t{Name: \"Basic\"},\n\t\t\t},\n\t\t},\n\t}\n\n\t// setup test\n\tif err := cleanTestDir(policyPath); err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\n\t// run test\n\tif np := NewPatternManager(); np == nil {\n\t\tt.Errorf(\"Error: pattern manager not created\")\n\t} else if err := np.SetCurrentPatterns(servedPatterns1, policyPath); err != nil {\n\t\tt.Errorf(\"Error %v consuming served patterns %v\", err, servedPatterns1)\n\t} else if err := np.UpdatePatternPolicies(myorg1, definedPatterns1, policyPath); err != nil {\n\t\tt.Errorf(\"Error: error updating pattern policies, %v\", err)\n\t} else if len(np.OrgPatterns) != 1 {\n\t\tt.Errorf(\"Error: should have 1 org in the PatternManager, have %v\", len(np.OrgPatterns))\n\t} else if !np.hasOrg(myorg1) {\n\t\tt.Errorf(\"Error: PM should have org %v but doesnt, has %v\", myorg1, np)\n\t} else if err := getPatternEntryFiles(np.OrgPatterns[myorg1][pattern1].PolicyFileNames); err != nil {\n\t\tt.Errorf(\"Error getting pattern entry files for %v %v, %v\", myorg1, pattern1, err)\n\t} else if err := np.SetCurrentPatterns(servedPatterns2, policyPath); err != nil {\n\t\tt.Errorf(\"Error %v consuming served patterns %v\", err, servedPatterns2)\n\t} else if err := np.UpdatePatternPolicies(myorg2, definedPatterns2, policyPath); err != nil {\n\t\tt.Errorf(\"Error: error updating pattern policies, %v\", err)\n\t} else if len(np.OrgPatterns) != 1 {\n\t\tt.Errorf(\"Error: should have 1 org in the PatternManager, have %v\", len(np.OrgPatterns))\n\t} else if !np.hasOrg(myorg2) {\n\t\tt.Errorf(\"Error: PM should have org %v but doesnt, has %v\", myorg2, np)\n\t} else if np.hasOrg(myorg1) {\n\t\tt.Errorf(\"Error: PM should NOT have org %v but does %v\", myorg1, np)\n\t} else if err := getPatternEntryFiles(np.OrgPatterns[myorg2][pattern2].PolicyFileNames); err != nil {\n\t\tt.Errorf(\"Error getting pattern entry files for %v %v, %v\", myorg2, pattern2, err)\n\t} else if files, err := getPolicyFiles(policyPath + myorg1); err != nil {\n\t\tt.Errorf(err.Error())\n\t} else if len(files) != 0 {\n\t\tt.Errorf(\"Error: found policy files for %v, %v\", myorg1, files)\n\t} else {\n\t\tt.Log(np)\n\t}\n\n}", "func checkAPIResourceIsPresent(available []*meta_v1.APIResourceList, resource meta_v1_unstruct.Unstructured) (*meta_v1.APIResource, bool) {\n\tfor _, rList := range available {\n\t\tif rList == nil {\n\t\t\tcontinue\n\t\t}\n\t\tgroup := rList.GroupVersion\n\t\tfor _, r := range rList.APIResources {\n\t\t\tif group == resource.GroupVersionKind().GroupVersion().String() && r.Kind == resource.GetKind() {\n\t\t\t\tr.Group = rList.GroupVersion\n\t\t\t\tr.Kind = rList.Kind\n\t\t\t\treturn &r, true\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, false\n}", "func NewNotFound(name, group, resource string) error {\n\treturn errors.NewNotFound(schema.GroupResource{Group: group, Resource: resource}, name)\n}", "func FailedPattern(name, in, pattern string, value interface{}) *Validation {\n\tvar msg string\n\tif in == \"\" {\n\t\tmsg = fmt.Sprintf(patternFailNoIn, name, pattern)\n\t} else {\n\t\tmsg = fmt.Sprintf(patternFail, name, in, pattern)\n\t}\n\n\treturn &Validation{\n\t\tcode: PatternFailCode,\n\t\tName: name,\n\t\tIn: in,\n\t\tValue: value,\n\t\tmessage: msg,\n\t}\n}", "func testResourceAll(t *testing.T, s *Service) {\n\tres, err := s.ResourceAll(context.TODO())\n\tif err != nil {\n\t\tt.Logf(\"testResourceAll error(%v) \\n\", err)\n\t\treturn\n\t}\n\tt.Logf(\"testResourceAll res: %+v \\n\", res)\n}", "func TestRejectShortName(t *testing.T) {\n if err := Create(\"short\", \"\", \"\", nil); err == nil {\n t.Error(\"Short handle worked\")\n }\n}", "func Test_pattern_manager_setpatterns4(t *testing.T) {\n\n\tpolicyPath := \"/tmp/servedpatterntest/\"\n\tmyorg1 := \"myorg1\"\n\tmyorg2 := \"myorg2\"\n\tpattern1 := \"pattern1\"\n\tpattern2 := \"pattern2\"\n\n\tservedPatterns1 := map[string]exchange.ServedPattern{\n\t\t\"myorg1_pattern1\": {\n\t\t\tOrg: myorg1,\n\t\t\tPattern: pattern1,\n\t\t},\n\t\t\"myorg1_pattern2\": {\n\t\t\tOrg: myorg1,\n\t\t\tPattern: pattern2,\n\t\t},\n\t\t\"myorg2_pattern2\": {\n\t\t\tOrg: myorg2,\n\t\t\tPattern: pattern2,\n\t\t},\n\t}\n\n\tservedPatterns2 := map[string]exchange.ServedPattern{\n\t\t\"myorg1_pattern1\": {\n\t\t\tOrg: myorg1,\n\t\t\tPattern: pattern1,\n\t\t},\n\t\t\"myorg2_pattern1\": {\n\t\t\tOrg: myorg2,\n\t\t\tPattern: pattern1,\n\t\t},\n\t\t\"myorg2_pattern2\": {\n\t\t\tOrg: myorg2,\n\t\t\tPattern: pattern2,\n\t\t},\n\t}\n\n\tdefinedPatterns1 := map[string]exchange.Pattern{\n\t\t\"myorg1/pattern1\": exchange.Pattern{\n\t\t\tLabel: \"label\",\n\t\t\tDescription: \"description\",\n\t\t\tPublic: false,\n\t\t\tWorkloads: []exchange.WorkloadReference{\n\t\t\t\t{\n\t\t\t\t\tWorkloadURL: \"http://mydomain.com/workload/test1\",\n\t\t\t\t\tWorkloadOrg: \"testorg\",\n\t\t\t\t\tWorkloadArch: \"amd64\",\n\t\t\t\t\tWorkloadVersions: []exchange.WorkloadChoice{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tVersion: \"1.0.0\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tAgreementProtocols: []exchange.AgreementProtocol{\n\t\t\t\t{Name: \"Basic\"},\n\t\t\t},\n\t\t},\n\t\t\"myorg1/pattern2\": exchange.Pattern{\n\t\t\tLabel: \"label\",\n\t\t\tDescription: \"description\",\n\t\t\tPublic: false,\n\t\t\tWorkloads: []exchange.WorkloadReference{\n\t\t\t\t{\n\t\t\t\t\tWorkloadURL: \"http://mydomain.com/workload/test1\",\n\t\t\t\t\tWorkloadOrg: \"testorg\",\n\t\t\t\t\tWorkloadArch: \"amd64\",\n\t\t\t\t\tWorkloadVersions: []exchange.WorkloadChoice{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tVersion: \"2.0.0\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tAgreementProtocols: []exchange.AgreementProtocol{\n\t\t\t\t{Name: \"Basic\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tdefinedPatterns2 := map[string]exchange.Pattern{\n\t\t\"myorg2/pattern1\": exchange.Pattern{\n\t\t\tLabel: \"label\",\n\t\t\tDescription: \"description\",\n\t\t\tPublic: false,\n\t\t\tWorkloads: []exchange.WorkloadReference{\n\t\t\t\t{\n\t\t\t\t\tWorkloadURL: \"http://mydomain.com/workload/test2\",\n\t\t\t\t\tWorkloadOrg: \"testorg\",\n\t\t\t\t\tWorkloadArch: \"amd64\",\n\t\t\t\t\tWorkloadVersions: []exchange.WorkloadChoice{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tVersion: \"1.4.0\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tAgreementProtocols: []exchange.AgreementProtocol{\n\t\t\t\t{Name: \"Basic\"},\n\t\t\t},\n\t\t},\n\t\t\"myorg2/pattern2\": exchange.Pattern{\n\t\t\tLabel: \"label\",\n\t\t\tDescription: \"description\",\n\t\t\tPublic: false,\n\t\t\tWorkloads: []exchange.WorkloadReference{\n\t\t\t\t{\n\t\t\t\t\tWorkloadURL: \"http://mydomain.com/workload/test2\",\n\t\t\t\t\tWorkloadOrg: \"testorg\",\n\t\t\t\t\tWorkloadArch: \"amd64\",\n\t\t\t\t\tWorkloadVersions: []exchange.WorkloadChoice{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tVersion: \"1.5.0\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tAgreementProtocols: []exchange.AgreementProtocol{\n\t\t\t\t{Name: \"Basic\"},\n\t\t\t},\n\t\t},\n\t}\n\n\t// setup the test\n\tif err := cleanTestDir(policyPath); err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\n\t// run the test\n\tif np := NewPatternManager(); np == nil {\n\t\tt.Errorf(\"Error: pattern manager not created\")\n\t} else if err := np.SetCurrentPatterns(servedPatterns1, policyPath); err != nil {\n\t\tt.Errorf(\"Error %v consuming served patterns %v\", err, servedPatterns1)\n\t} else if err := np.UpdatePatternPolicies(myorg1, definedPatterns1, policyPath); err != nil {\n\t\tt.Errorf(\"Error: error updating pattern policies, %v\", err)\n\t} else if err := np.UpdatePatternPolicies(myorg2, definedPatterns2, policyPath); err != nil {\n\t\tt.Errorf(\"Error: error updating pattern policies, %v\", err)\n\t} else if len(np.OrgPatterns) != 2 {\n\t\tt.Errorf(\"Error: should have 2 orgs in the PatternManager, have %v\", len(np.OrgPatterns))\n\t} else if !np.hasOrg(myorg1) {\n\t\tt.Errorf(\"Error: PM should have org %v but doesnt, has %v\", myorg1, np)\n\t} else if !np.hasOrg(myorg2) {\n\t\tt.Errorf(\"Error: PM should have org %v but doesnt, has %v\", myorg2, np)\n\t} else if err := getPatternEntryFiles(np.OrgPatterns[myorg1][pattern1].PolicyFileNames); err != nil {\n\t\tt.Errorf(\"Error getting pattern entry files for %v %v, %v\", myorg1, pattern1, err)\n\t} else if err := getPatternEntryFiles(np.OrgPatterns[myorg1][pattern2].PolicyFileNames); err != nil {\n\t\tt.Errorf(\"Error getting pattern entry files for %v %v, %v\", myorg1, pattern2, err)\n\t} else if err := getPatternEntryFiles(np.OrgPatterns[myorg2][pattern2].PolicyFileNames); err != nil {\n\t\tt.Errorf(\"Error getting pattern entry files for %v %v, %v\", myorg2, pattern2, err)\n\t} else if err := np.SetCurrentPatterns(servedPatterns2, policyPath); err != nil {\n\t\tt.Errorf(\"Error %v consuming served patterns %v\", err, servedPatterns2)\n\t} else if err := np.UpdatePatternPolicies(myorg1, definedPatterns1, policyPath); err != nil {\n\t\tt.Errorf(\"Error: error updating pattern policies, %v\", err)\n\t} else if err := np.UpdatePatternPolicies(myorg2, definedPatterns2, policyPath); err != nil {\n\t\tt.Errorf(\"Error: error updating pattern policies, %v\", err)\n\t} else if len(np.OrgPatterns) != 2 {\n\t\tt.Errorf(\"Error: should have 2 org in the PatternManager, have %v\", len(np.OrgPatterns))\n\t} else if !np.hasOrg(myorg2) {\n\t\tt.Errorf(\"Error: PM should have org %v but doesnt, has %v\", myorg2, np)\n\t} else if !np.hasOrg(myorg1) {\n\t\tt.Errorf(\"Error: PM should have org %v but doesnt, has %v\", myorg1, np)\n\t} else if err := getPatternEntryFiles(np.OrgPatterns[myorg1][pattern1].PolicyFileNames); err != nil {\n\t\tt.Errorf(\"Error getting pattern entry files for %v %v, %v\", myorg1, pattern1, err)\n\t} else if err := getPatternEntryFiles(np.OrgPatterns[myorg2][pattern1].PolicyFileNames); err != nil {\n\t\tt.Errorf(\"Error getting pattern entry files for %v %v, %v\", myorg2, pattern1, err)\n\t} else if err := getPatternEntryFiles(np.OrgPatterns[myorg2][pattern2].PolicyFileNames); err != nil {\n\t\tt.Errorf(\"Error getting pattern entry files for %v %v, %v\", myorg2, pattern2, err)\n\t} else {\n\t\tt.Log(np)\n\t}\n\n}", "func Has(path string) bool { return mustGetDefaultProvider().Has(path) }", "func TestListResources_DuplicateResourceFilterByLabel(t *testing.T) {\n\tt.Parallel()\n\tctx := context.Background()\n\n\tbackend, err := lite.NewWithConfig(ctx, lite.Config{\n\t\tPath: t.TempDir(),\n\t\tClock: clockwork.NewFakeClock(),\n\t})\n\trequire.NoError(t, err)\n\n\tpresence := NewPresenceService(backend)\n\n\t// Same resource name, but have different labels.\n\tnames := []string{\"a\", \"a\", \"a\", \"a\"}\n\tlabels := []map[string]string{\n\t\t{\"env\": \"prod\"},\n\t\t{\"env\": \"dev\"},\n\t\t{\"env\": \"qa\"},\n\t\t{\"env\": \"dev\"},\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tkind string\n\t\tinsertResources func()\n\t\twantNames []string\n\t}{\n\t\t{\n\t\t\tname: \"KindDatabaseServer\",\n\t\t\tkind: types.KindDatabaseServer,\n\t\t\tinsertResources: func() {\n\t\t\t\tfor i := 0; i < len(names); i++ {\n\t\t\t\t\tdb, err := types.NewDatabaseServerV3(types.Metadata{\n\t\t\t\t\t\tName: fmt.Sprintf(\"name-%v\", i),\n\t\t\t\t\t}, types.DatabaseServerSpecV3{\n\t\t\t\t\t\tHostID: \"_\",\n\t\t\t\t\t\tHostname: \"_\",\n\t\t\t\t\t\tDatabase: &types.DatabaseV3{\n\t\t\t\t\t\t\tMetadata: types.Metadata{\n\t\t\t\t\t\t\t\tName: names[i],\n\t\t\t\t\t\t\t\tLabels: labels[i],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: types.DatabaseSpecV3{\n\t\t\t\t\t\t\t\tProtocol: \"_\",\n\t\t\t\t\t\t\t\tURI: \"_\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t})\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\t_, err = presence.UpsertDatabaseServer(ctx, db)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"KindAppServer\",\n\t\t\tkind: types.KindAppServer,\n\t\t\tinsertResources: func() {\n\t\t\t\tfor i := 0; i < len(names); i++ {\n\t\t\t\t\tserver, err := types.NewAppServerV3(types.Metadata{\n\t\t\t\t\t\tName: fmt.Sprintf(\"name-%v\", i),\n\t\t\t\t\t}, types.AppServerSpecV3{\n\t\t\t\t\t\tHostID: \"_\",\n\t\t\t\t\t\tApp: &types.AppV3{\n\t\t\t\t\t\t\tMetadata: types.Metadata{\n\t\t\t\t\t\t\t\tName: names[i],\n\t\t\t\t\t\t\t\tLabels: labels[i],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: types.AppSpecV3{URI: \"_\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t})\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\t_, err = presence.UpsertApplicationServer(ctx, server)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"KindKubernetesCluster\",\n\t\t\tkind: types.KindKubernetesCluster,\n\t\t\tinsertResources: func() {\n\t\t\t\tfor i := 0; i < len(names); i++ {\n\n\t\t\t\t\tkube, err := types.NewKubernetesClusterV3(\n\t\t\t\t\t\ttypes.Metadata{\n\t\t\t\t\t\t\tName: names[i],\n\t\t\t\t\t\t\tLabels: labels[i],\n\t\t\t\t\t\t},\n\t\t\t\t\t\ttypes.KubernetesClusterSpecV3{},\n\t\t\t\t\t)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\tkubeServer, err := types.NewKubernetesServerV3FromCluster(\n\t\t\t\t\t\tkube,\n\t\t\t\t\t\tfmt.Sprintf(\"host-%v\", i),\n\t\t\t\t\t\tfmt.Sprintf(\"hostID-%v\", i),\n\t\t\t\t\t)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\t// Upsert server.\n\t\t\t\t\t_, err = presence.UpsertKubernetesServer(ctx, kubeServer)\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\ttc.insertResources()\n\n\t\t\t// Look among the duplicated resource by label\n\t\t\tresp, err := presence.ListResources(ctx, proto.ListResourcesRequest{\n\t\t\t\tResourceType: tc.kind,\n\t\t\t\tNeedTotalCount: true,\n\t\t\t\tLimit: 5,\n\t\t\t\tSearchKeywords: []string{\"dev\"},\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Len(t, resp.Resources, 1)\n\t\t\trequire.Equal(t, 1, resp.TotalCount)\n\t\t\trequire.Equal(t, map[string]string{\"env\": \"dev\"}, resp.Resources[0].GetAllLabels())\n\t\t})\n\t}\n}", "func TestResourcesCtrlFind(t *testing.T) {\n\ta := assert.New(t)\n\tr := require.New(t)\n\tparams := utils.NewFakeParamsGetter()\n\trender := utils.NewFakeRender()\n\tinter := &resourcesCtrlResourcesInter{}\n\trecorder := httptest.NewRecorder()\n\tctrl := NewResourcesCtrl(inter, render, params, nil)\n\tresourcesOut := []models.Resource{}\n\n\t// No error, 3 resources are returned\n\tctrl.Find(recorder, utils.FakeRequest(\"GET\", \"http://foo.bar/resources\", nil))\n\tr.Equal(200, render.Status)\n\terr := json.NewDecoder(recorder.Body).Decode(&resourcesOut)\n\tr.NoError(err)\n\ta.Len(resourcesOut, 3)\n\tutils.Clear(params, render, recorder)\n\n\t// The interactor returns a database error\n\tinter.errDB = true\n\tctrl.Find(recorder, utils.FakeRequest(\"GET\", \"http://foo.bar/resources\", nil))\n\tr.Equal(500, render.Status)\n\tr.NotEmpty(recorder.Body.Bytes())\n\tr.NotNil(render.APIError)\n\ta.IsType(errs.API.Internal, render.APIError)\n\tutils.Clear(params, render, recorder)\n}", "func testOpSrcWithNonexistentRegistryNamespace(t *testing.T) {\n\topSrcName := \"nonexistent-namespace-opsrc\"\n\t// validURL is a valid endpoint for the OperatorSource\n\tvalidURL := \"https://quay.io/cnr\"\n\n\t// nonexistentRegistryNamespace is a namespace that does not exist\n\t// on the app registry\n\tnonexistentRegistryNamespace := \"not-existent-namespace\"\n\n\tctx := test.NewTestCtx(t)\n\tdefer ctx.Cleanup()\n\n\t// Get global framework variables\n\tclient := test.Global.Client\n\n\t// Get test namespace\n\tnamespace, err := ctx.GetNamespace()\n\trequire.NoError(t, err, \"Could not get namespace\")\n\tnonexistentRegistryNamespaceOperatorSource := &operator.OperatorSource{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: operator.OperatorSourceKind,\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: opSrcName,\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tSpec: operator.OperatorSourceSpec{\n\t\t\tType: endpointType,\n\t\t\tEndpoint: validURL,\n\t\t\tRegistryNamespace: nonexistentRegistryNamespace,\n\t\t},\n\t}\n\terr = helpers.CreateRuntimeObject(client, ctx, nonexistentRegistryNamespaceOperatorSource)\n\trequire.NoError(t, err, \"Could not create OperatorSource\")\n\n\t// Check that OperatorSource reaches \"Failed\" state eventually\n\tresultOperatorSource := &operator.OperatorSource{}\n\texpectedPhase := \"Failed\"\n\terr = wait.Poll(helpers.RetryInterval, helpers.Timeout, func() (bool, error) {\n\t\terr = helpers.WaitForResult(client, resultOperatorSource, namespace, opSrcName)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif resultOperatorSource.Status.CurrentPhase.Name == expectedPhase &&\n\t\t\tstrings.Contains(resultOperatorSource.Status.CurrentPhase.Message, \"The OperatorSource endpoint returned an empty manifest list\") {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\tassert.NoError(t, err, fmt.Sprintf(\"OperatorSource never reached expected phase/message, expected %v\", expectedPhase))\n}", "func (r *HijackRouter) MustAdd(pattern string, handler func(*Hijack)) *HijackRouter {\n\tr.browser.e(r.Add(pattern, \"\", handler))\n\treturn r\n}", "func (d *Definition) Search(pattern string) Resource {\n\tresource := make(chan Resource)\n\ttree := d.ResourceTree\n\n\tgo func() {\n\t\tdefer close(resource)\n\t\ttree.Traverse(func(r Resource) {\n\t\t\tresource <- r\n\t\t})\n\t}()\n\n\tfor resourceWanted := range resource {\n\t\tpattern := fmt.Sprint(d.Context, \"/\", pattern)\n\t\tif resourceWanted.ID() == pattern {\n\t\t\treturn resourceWanted\n\t\t}\n\t}\n\n\treturn nil\n}", "func TestResourcesCtrlUpdateByHostname(t *testing.T) {\n\ta := assert.New(t)\n\tr := require.New(t)\n\tparams := utils.NewFakeParamsGetter()\n\trender := utils.NewFakeRender()\n\tinter := &resourcesCtrlResourcesInter{}\n\tvalid := &resourcesCtrlResourcesValid{}\n\trecorder := httptest.NewRecorder()\n\tctrl := NewResourcesCtrl(inter, render, params, valid)\n\tresourceIn := &models.Resource{\n\t\tHostname: utils.StrCpy(\"foo.bar.com\"),\n\t}\n\tresourceOut := &models.Resource{}\n\n\tvalid.errValid = true\n\n\t// Validation error\n\tctrl.UpdateByHostname(recorder, utils.FakeRequest(\"PUT\", \"http://foo.bar/resources/1\", resourceIn))\n\tr.Equal(422, render.Status)\n\tr.NotEmpty(recorder.Body.Bytes())\n\tr.NotNil(render.APIError)\n\ta.IsType(errs.API.Validation, render.APIError)\n\tutils.Clear(params, render, recorder)\n\n\tvalid.errValid = false\n\n\t// No error, a resource is returned\n\tctrl.UpdateByHostname(recorder, utils.FakeRequest(\"PUT\", \"http://foo.bar/resources/1\", resourceIn))\n\tr.Equal(200, render.Status)\n\terr := json.NewDecoder(recorder.Body).Decode(resourceOut)\n\tr.NoError(err)\n\ta.NotNil(resourceOut)\n\ta.Nil(resourceOut.Hostname)\n\tutils.Clear(params, render, recorder)\n\n\t// Null body decoding error\n\tctrl.UpdateByHostname(recorder, utils.FakeRequestRaw(\"PUT\", \"http://foo.bar/resources/1\", nil))\n\tr.Equal(400, render.Status)\n\tr.NotEmpty(recorder.Body.Bytes())\n\tr.NotNil(render.APIError)\n\ta.IsType(errs.API.BodyDecoding, render.APIError)\n\tutils.Clear(params, render, recorder)\n\n\t// The interactor returns a database error\n\tinter.errDB = true\n\tctrl.UpdateByHostname(recorder, utils.FakeRequest(\"PUT\", \"http://foo.bar/resources/1\", resourceIn))\n\tr.Equal(500, render.Status)\n\tr.NotEmpty(recorder.Body.Bytes())\n\tr.NotNil(render.APIError)\n\ta.IsType(errs.API.Internal, render.APIError)\n\tutils.Clear(params, render, recorder)\n\n\t// Resource not found\n\tinter.errDB = false\n\tinter.errNotFound = true\n\tctrl.UpdateByHostname(recorder, utils.FakeRequest(\"PUT\", \"http://foo.bar/resources/1\", resourceIn))\n\tr.Equal(404, render.Status)\n\tr.NotEmpty(recorder.Body.Bytes())\n\tr.NotNil(render.APIError)\n\ta.IsType(errs.API.NotFound, render.APIError)\n\tutils.Clear(params, render, recorder)\n}", "func keyMissing(t *testing.T, f func(inputs yt_stats.Inputs) http.Handler, url string) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\trr := httptest.NewRecorder()\n\thandler := f(getInputs())\n\thandler.ServeHTTP(rr, req)\n\tif status := rr.Code; status != http.StatusBadRequest {\n\t\tt.Errorf(\"handler returned wrong status code: expected %v actually %v\", http.StatusBadRequest, status)\n\t}\n\texpected := fmt.Sprintf(`{\"quota_usage\":0,\"status_code\":%d,\"status_message\":\"keyMissing\"}`, http.StatusBadRequest)\n\tif strings.Trim(rr.Body.String(), \"\\n\") != expected {\n\t\tt.Errorf(\"handler returned wrong body: expected %v actually %v\", expected, rr.Body.String())\n\t}\n}", "func (p *fsPath) TryPattern() (*Pattern, bool) {\n\tpattern := &Pattern{*p}\n\t_, err := p.Match(pattern)\n\treturn &Pattern{*p}, (err == nil)\n}", "func Test_NewResources(t *testing.T) {\n\ttype args struct {\n\t\tbody []byte\n\t\theader map[string][]string\n\t\tbinding ResolvedTrigger\n\t}\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant []json.RawMessage\n\t}{{\n\t\tname: \"empty\",\n\t\targs: args{\n\t\t\tbody: json.RawMessage{},\n\t\t\theader: map[string][]string{},\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"namespace\"),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{bldr.TriggerBinding(\"tb\", \"namespace\")},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{},\n\t}, {\n\t\tname: \"one resource template\",\n\t\targs: args{\n\t\t\tbody: json.RawMessage(`{\"foo\": \"bar\"}`),\n\t\t\theader: map[string][]string{\"one\": {\"1\"}},\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"namespace\",\n\t\t\t\t\tbldr.TriggerTemplateSpec(\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param1\", \"description\", \"\"),\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param2\", \"description\", \"\"),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt1\": \"$(params.param1)-$(params.param2)\"}`)),\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"namespace\",\n\t\t\t\t\t\tbldr.TriggerBindingSpec(\n\t\t\t\t\t\t\tbldr.TriggerBindingParam(\"param1\", \"$(body.foo)\"),\n\t\t\t\t\t\t\tbldr.TriggerBindingParam(\"param2\", \"$(header.one)\"),\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"rt1\": \"bar-1\"}`),\n\t\t},\n\t}, {\n\t\tname: \"multiple resource templates\",\n\t\targs: args{\n\t\t\tbody: json.RawMessage(`{\"foo\": \"bar\"}`),\n\t\t\theader: map[string][]string{\"one\": {\"1\"}},\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"namespace\",\n\t\t\t\t\tbldr.TriggerTemplateSpec(\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param1\", \"description\", \"\"),\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param2\", \"description\", \"\"),\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param3\", \"description\", \"default2\"),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt1\": \"$(params.param1)-$(params.param2)\"}`)),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt2\": \"$(params.param3)\"}`)),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt3\": \"rt3\"}`)),\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"namespace\",\n\t\t\t\t\t\tbldr.TriggerBindingSpec(\n\t\t\t\t\t\t\tbldr.TriggerBindingParam(\"param1\", \"$(body.foo)\"),\n\t\t\t\t\t\t\tbldr.TriggerBindingParam(\"param2\", \"$(header.one)\"),\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"rt1\": \"bar-1\"}`),\n\t\t\tjson.RawMessage(`{\"rt2\": \"default2\"}`),\n\t\t\tjson.RawMessage(`{\"rt3\": \"rt3\"}`),\n\t\t},\n\t}, {\n\t\tname: \"one resource template with one uid\",\n\t\targs: args{\n\t\t\tbody: json.RawMessage(`{\"foo\": \"bar\"}`),\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"namespace\",\n\t\t\t\t\tbldr.TriggerTemplateSpec(\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param1\", \"description\", \"\"),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt1\": \"$(params.param1)-$(uid)\"}`)),\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"namespace\",\n\t\t\t\t\t\tbldr.TriggerBindingSpec(\n\t\t\t\t\t\t\tbldr.TriggerBindingParam(\"param1\", \"$(body.foo)\"),\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"rt1\": \"bar-cbhtc\"}`),\n\t\t},\n\t}, {\n\t\tname: \"one resource template with three uid\",\n\t\targs: args{\n\t\t\tbody: json.RawMessage(`{\"foo\": \"bar\"}`),\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"namespace\",\n\t\t\t\t\tbldr.TriggerTemplateSpec(\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param1\", \"description\", \"\"),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt1\": \"$(params.param1)-$(uid)-$(uid)\", \"rt2\": \"$(uid)\"}`)),\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"namespace\",\n\t\t\t\t\t\tbldr.TriggerBindingSpec(\n\t\t\t\t\t\t\tbldr.TriggerBindingParam(\"param1\", \"$(body.foo)\"),\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"rt1\": \"bar-cbhtc-cbhtc\", \"rt2\": \"cbhtc\"}`),\n\t\t},\n\t}, {\n\t\tname: \"multiple resource templates with multiple uid\",\n\t\targs: args{\n\t\t\tbody: json.RawMessage(`{\"foo\": \"bar\"}`),\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"namespace\",\n\t\t\t\t\tbldr.TriggerTemplateSpec(\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param1\", \"description\", \"\"),\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param2\", \"description\", \"default2\"),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt1\": \"$(params.param1)-$(uid)\", \"$(uid)\": \"$(uid)\"}`)),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt2\": \"$(params.param2)-$(uid)\"}`)),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt3\": \"rt3\"}`)),\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"namespace\",\n\t\t\t\t\t\tbldr.TriggerBindingSpec(\n\t\t\t\t\t\t\tbldr.TriggerBindingParam(\"param1\", \"$(body.foo)\"),\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"rt1\": \"bar-cbhtc\", \"cbhtc\": \"cbhtc\"}`),\n\t\t\tjson.RawMessage(`{\"rt2\": \"default2-cbhtc\"}`),\n\t\t\tjson.RawMessage(`{\"rt3\": \"rt3\"}`),\n\t\t},\n\t}, {\n\t\tname: \"one resource template multiple bindings\",\n\t\targs: args{\n\t\t\tbody: json.RawMessage(`{\"foo\": \"bar\"}`),\n\t\t\theader: map[string][]string{\"one\": {\"1\"}},\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"namespace\",\n\t\t\t\t\tbldr.TriggerTemplateSpec(\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param1\", \"description\", \"\"),\n\t\t\t\t\t\tbldr.TriggerTemplateParam(\"param2\", \"description\", \"\"),\n\t\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"rt1\": \"$(params.param1)-$(params.param2)\"}`)),\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"namespace\",\n\t\t\t\t\t\tbldr.TriggerBindingSpec(\n\t\t\t\t\t\t\tbldr.TriggerBindingParam(\"param1\", \"$(body.foo)\"),\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t\tbldr.TriggerBinding(\"tb2\", \"namespace\",\n\t\t\t\t\t\tbldr.TriggerBindingSpec(\n\t\t\t\t\t\t\tbldr.TriggerBindingParam(\"param2\", \"$(header.one)\"),\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"rt1\": \"bar-1\"}`),\n\t\t},\n\t}, {\n\t\tname: \"bindings with static values\",\n\t\targs: args{\n\t\t\tbody: json.RawMessage(`{\"foo\": \"bar\"}`),\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"ns\", bldr.TriggerTemplateSpec(\n\t\t\t\t\tbldr.TriggerTemplateParam(\"p1\", \"\", \"\"),\n\t\t\t\t\tbldr.TriggerTemplateParam(\"p2\", \"\", \"\"),\n\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"p1\": \"$(params.p1)\", \"p2\": \"$(params.p2)\"}`)),\n\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"ns\", bldr.TriggerBindingSpec(\n\t\t\t\t\t\tbldr.TriggerBindingParam(\"p1\", \"static_value\"),\n\t\t\t\t\t\tbldr.TriggerBindingParam(\"p2\", \"$(body.foo)\"),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"p1\": \"static_value\", \"p2\": \"bar\"}`),\n\t\t},\n\t}, {\n\t\tname: \"bindings with combination of static values \",\n\t\targs: args{\n\t\t\tbody: json.RawMessage(`{\"foo\": \"fooValue\", \"bar\": \"barValue\"}`),\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"ns\", bldr.TriggerTemplateSpec(\n\t\t\t\t\tbldr.TriggerTemplateParam(\"p1\", \"\", \"\"),\n\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"p1\": \"$(params.p1)\"`)),\n\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"ns\", bldr.TriggerBindingSpec(\n\t\t\t\t\t\tbldr.TriggerBindingParam(\"p1\", \"Event values are - foo: $(body.foo); bar: $(body.bar)\"),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"p1\": \"Event values are - foo: fooValue; bar: barValue\"`),\n\t\t},\n\t}, {\n\t\tname: \"event value is JSON string\",\n\t\targs: args{\n\t\t\tbody: json.RawMessage(`{\"a\": \"b\"}`),\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"ns\", bldr.TriggerTemplateSpec(\n\t\t\t\t\tbldr.TriggerTemplateParam(\"p1\", \"\", \"\"),\n\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"p1\": \"$(params.p1)\"}`)),\n\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"ns\", bldr.TriggerBindingSpec(\n\t\t\t\t\t\tbldr.TriggerBindingParam(\"p1\", \"$(body)\"),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"p1\": \"{\\\"a\\\":\\\"b\\\"}\"}`),\n\t\t},\n\t}, {\n\t\tname: \"header event values\",\n\t\targs: args{\n\t\t\theader: map[string][]string{\n\t\t\t\t\"a\": {\"singlevalue\"},\n\t\t\t\t\"b\": {\"multiple\", \"values\"},\n\t\t\t},\n\t\t\tbinding: ResolvedTrigger{\n\t\t\t\tTriggerTemplate: bldr.TriggerTemplate(\"tt\", \"ns\", bldr.TriggerTemplateSpec(\n\t\t\t\t\tbldr.TriggerTemplateParam(\"p1\", \"\", \"\"),\n\t\t\t\t\tbldr.TriggerTemplateParam(\"p2\", \"\", \"\"),\n\t\t\t\t\tbldr.TriggerResourceTemplate(json.RawMessage(`{\"p1\": \"$(params.p1)\",\"p2\": \"$(params.p2)\"}`)),\n\t\t\t\t),\n\t\t\t\t),\n\t\t\t\tTriggerBindings: []*triggersv1.TriggerBinding{\n\t\t\t\t\tbldr.TriggerBinding(\"tb\", \"ns\", bldr.TriggerBindingSpec(\n\t\t\t\t\t\tbldr.TriggerBindingParam(\"p1\", \"$(header.a)\"),\n\t\t\t\t\t\tbldr.TriggerBindingParam(\"p2\", \"$(header.b)\"),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: []json.RawMessage{\n\t\t\tjson.RawMessage(`{\"p1\": \"singlevalue\",\"p2\": \"multiple,values\"}`),\n\t\t},\n\t}}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t// This seeds Uid() to return 'cbhtc'\n\t\t\trand.Seed(0)\n\t\t\tparams, err := ResolveParams(tt.args.binding.TriggerBindings, tt.args.body, tt.args.header, tt.args.binding.TriggerTemplate.Spec.Params)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"ResolveParams() returned unexpected error: %s\", err)\n\t\t\t}\n\t\t\tgot := ResolveResources(tt.args.binding.TriggerTemplate, params)\n\t\t\tif diff := cmp.Diff(tt.want, got); diff != \"\" {\n\t\t\t\tstringDiff := cmp.Diff(convertJSONRawMessagesToString(tt.want), convertJSONRawMessagesToString(got))\n\t\t\t\tt.Errorf(\"ResolveResources(): -want +got: %s\", stringDiff)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestLookupDNSPanicsOnInvalidType(t *testing.T) {\n\tdefer func() {\n\t\tif r := recover(); r == nil {\n\t\t\tt.Error(\"lookupDNS should panic if an invalid conntype is specified.\")\n\t\t}\n\t}()\n\tlookupDNS(context.Background(), nil, \"name\", \"wssorbashorsomething\")\n}", "func (cli *CLI) RegisterUnmatched(callback func(string, *Arg)) *CLI {\n\tcli.RegisterAny(callback)\n\treturn cli\n}", "func TestDROStructuralValidatorNotObject(t *testing.T) {\n\tvalidator := NewDROStructuralValidator(newMockRepository(nil))\n\terr := validator.ValidateResource(testResource(\"bs646cd8717.json\"))\n\tassert.Nil(t, err)\n}", "func match(got string, pattern *regexp.Regexp, msg string, note func(key string, value interface{})) error {\n\tif pattern.MatchString(got) {\n\t\treturn nil\n\t}\n\n\treturn errors.New(msg)\n}", "func registerResourceTelemetrySetup(request *libcoap.Pdu, typ reflect.Type, controller controllers.ControllerInterface, session *libcoap.Session,\n context *libcoap.Context, is_unknown bool) (interface{}, string, error) {\n\n hex := hex.Dump(request.Data)\n if request.Code == libcoap.RequestPut && !strings.Contains(hex, string(libcoap.IETF_TELEMETRY_SETUP_HEX)) {\n return nil, \"\", errors.New(\"Body data MUST be telemetry setup request\")\n }\n body, err := messages.UnmarshalCbor(request, reflect.TypeOf(messages.TelemetrySetupRequest{}))\n if err != nil {\n return nil, \"\", err\n }\n\n var resourcePath string\n\n // Create sub resource to handle observation on behalf of Unknown resource in case of telemetry setup configuration PUT\n if is_unknown && request.Code == libcoap.RequestPut {\n p := request.PathString()\n resourcePath = p\n resource := context.GetResourceByQuery(&resourcePath)\n if resource == nil {\n r := libcoap.ResourceInit(&p, 0)\n r.TurnOnResourceObservable()\n r.RegisterHandler(libcoap.RequestGet, toMethodHandler(controller.HandleGet, typ, controller, !is_unknown))\n r.RegisterHandler(libcoap.RequestPut, toMethodHandler(controller.HandlePut, typ, controller, !is_unknown))\n r.RegisterHandler(libcoap.RequestPost, toMethodHandler(controller.HandlePost, typ, controller, !is_unknown))\n r.RegisterHandler(libcoap.RequestDelete, toMethodHandler(controller.HandleDelete, typ, controller, !is_unknown))\n context.AddResource(r)\n log.Debugf(\"Create sub resource to handle observation later : uri-path=%+v\", p)\n }\n }\n return body, resourcePath, nil\n}", "func TestIssue1234(t *testing.T) {\n\tvar r cue.Runtime\n\tinst, err := r.Compile(\"test\", `\n#Test: or([])\n\n\t`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = openapi.Gen(inst, &openapi.Config{})\n\tif err == nil {\n\t\tt.Fatal(\"expected error\")\n\t}\n}", "func (pat *patternInjector) match(ctx *context) error {\n\tif !ctx.justReturned() {\n\t\treturn ctx.call(pat.pat)\n\t}\n\n\tret := ctx.ret\n\tif ret.ok {\n\t\tif n, ok := pat.inject(ctx.next(ret.n)); ok {\n\t\t\tctx.consume(n)\n\t\t\treturn ctx.commit()\n\t\t}\n\t}\n\treturn ctx.predicates(false)\n}", "func TestHandler_URLMappingShowRecapSalesFailIDNotExist(t *testing.T) {\n\t// melakukan proses login\n\tuser := model.DummyUserPriviledgeWithUsergroup(1)\n\tsd, _ := auth.Login(user)\n\ttoken := \"Bearer \" + sd.Token\n\tng := tester.New()\n\tng.SetHeader(tester.H{\"Authorization\": token})\n\tng.Method = \"GET\"\n\tng.Path = \"/v1/recap-sales/999999\"\n\tng.Run(test.Router(), func(res tester.HTTPResponse, req tester.HTTPRequest) {\n\t\tassert.Equal(t, int(404), res.Code, fmt.Sprintf(\"Should has 'endpoint %s' with method '%s'\", \"/v1/recap-sales/999999\", \"GET\"))\n\t})\n}", "func (pattern targetPattern) Match(object *metav1.ObjectMeta) bool {\n\treturn object.Name == pattern.name && pattern.namespace.MatchString(object.Namespace)\n}", "func returnNotResource(c *gin.Context) {\n\tc.String(http.StatusNotFound, \"\")\n}", "func Test_pattern_manager_setpatterns1(t *testing.T) {\n\n\tpolicyPath := \"/tmp/servedpatterntest/\"\n\tservedPatterns := map[string]exchange.ServedPattern{\n\t\t\"myorg1_pattern1\": {\n\t\t\tOrg: \"myorg1\",\n\t\t\tPattern: \"pattern1\",\n\t\t},\n\t}\n\n\tif np := NewPatternManager(); np == nil {\n\t\tt.Errorf(\"Error: pattern manager not created\")\n\t} else if err := np.SetCurrentPatterns(servedPatterns, policyPath); err != nil {\n\t\tt.Errorf(\"Error %v consuming served patterns %v\", err, servedPatterns)\n\t} else if len(np.OrgPatterns) != 1 {\n\t\tt.Errorf(\"Error: should have 1 org in the PatternManager, have %v\", len(np.OrgPatterns))\n\t} else {\n\t\tt.Log(np)\n\t}\n\n}", "func (r resourceFactory) getResourceDataOKExists(schemaDefinitionPropertyName string, resourceLocalData *schema.ResourceData) (interface{}, bool) {\n\tresourceSchema, _ := r.openAPIResource.getResourceSchema()\n\tschemaDefinitionProperty, err := resourceSchema.getProperty(schemaDefinitionPropertyName)\n\tif err != nil {\n\t\treturn nil, false\n\t}\n\treturn resourceLocalData.GetOkExists(schemaDefinitionProperty.getTerraformCompliantPropertyName())\n}", "func hasResource(client discovery.DiscoveryInterface, resource schema.GroupVersionResource) bool {\n\tresources, err := client.ServerResourcesForGroupVersion(resource.GroupVersion().String())\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tfor _, serverResource := range resources.APIResources {\n\t\tif serverResource.Name == resource.Resource {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func TestDROStructuralValidatorNoMemberAssertion(t *testing.T) {\n\tjson := datautils.JSONObject{\n\t\t\"@type\": \"http://sdr.sul.stanford.edu/models/sdr3-image.jsonld\",\n\t\t\"structural\": map[string]interface{}{}}\n\tobj := datautils.NewResource(json)\n\tvalidator := NewDROStructuralValidator(newMockRepository(nil))\n\terr := validator.ValidateResource(obj)\n\tassert.Nil(t, err)\n}", "func WrapWithNotFound(cause error, parameters ...wparams.ParamStorer) Error {\n\treturn newGenericError(cause, DefaultNotFound, wparams.NewParamStorer(parameters...))\n}", "func FamiliarMatch(pattern string, ref Reference) (bool, error) {\n\tmatched, err := path.Match(pattern, FamiliarString(ref))\n\tif namedRef, isNamed := ref.(Named); isNamed && !matched {\n\t\tmatched, _ = path.Match(pattern, FamiliarName(namedRef))\n\t}\n\treturn matched, err\n}", "func TestPutNewPresentationInvalidtemplatePath(t *testing.T) {\n request := createPutNewPresentationRequest()\n request.templatePath = invalidizeTestParamValue(request.templatePath, \"templatePath\", \"string\").(string)\n e := initializeTest(\"PutNewPresentation\", \"templatePath\", request.templatePath)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n r, _, e := getTestApiClient().DocumentApi.PutNewPresentation(request)\n assertError(t, \"PutNewPresentation\", \"templatePath\", r.Code, e)\n}", "func (h *harness) addResource(t *testing.T, resource *test.APIResource) {\n\tt.Helper()\n\n\th.DiscoveryClient.WithAPIResource(resource)\n\trequire.NoError(t, h.discoveryHelper.Refresh())\n\n\tfor _, item := range resource.Items {\n\t\tobj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(item)\n\t\trequire.NoError(t, err)\n\n\t\tunstructuredObj := &unstructured.Unstructured{Object: obj}\n\t\tif resource.Namespaced {\n\t\t\t_, err = h.DynamicClient.Resource(resource.GVR()).Namespace(item.GetNamespace()).Create(context.TODO(), unstructuredObj, metav1.CreateOptions{})\n\t\t} else {\n\t\t\t_, err = h.DynamicClient.Resource(resource.GVR()).Create(context.TODO(), unstructuredObj, metav1.CreateOptions{})\n\t\t}\n\t\trequire.NoError(t, err)\n\t}\n}", "func (group *RouterGroup) register(method string, subpattern string, handler HandlerFunc) {\n\tpattern := path.Join(group.prefix + subpattern)\n\tgroup.engine.router.addRoute(method, pattern, handler)\n}", "func Register(name string, r Closer) error {\n\tif _, ok := resources[name]; ok {\n\t\tlog4go.Warn(\"resource[%v] update\", name)\n\t}\n\tresources[name] = r\n\treturn nil\n}", "func (s *EnumSchema) withRegistry(registry map[string]Schema) Schema {\n\tfullname := GetFullName(s)\n\tif schema, ok := registry[fullname]; ok {\n\t\treturn &refSchema{Type_: fullname, Ref: schema}\n\t} else {\n\t\tregistry[fullname] = s\n\t\treturn s\n\t}\n}", "func TestValues_Pattern_Edgecases(t *testing.T) {\n\tvar err *errors.Validation\n\terr = Pattern(\"path\", \"in\", \"pick-a-boo\", `.*-[a-z]-.*`)\n\tassert.Nil(t, err)\n\n\t// Invalid regexp\n\terr = Pattern(\"path\", \"in\", \"pick-a-boo\", `.*-[a(-z]-^).*`)\n\tif assert.NotNil(t, err) {\n\t\tassert.Equal(t, int(err.Code()), int(errors.PatternFailCode))\n\t\tassert.Contains(t, err.Error(), \"pattern is invalid\")\n\t}\n\n\t// Valid regexp, invalid pattern\n\terr = Pattern(\"path\", \"in\", \"pick-8-boo\", `.*-[a-z]-.*`)\n\tif assert.NotNil(t, err) {\n\t\tassert.Equal(t, int(err.Code()), int(errors.PatternFailCode))\n\t\tassert.NotContains(t, err.Error(), \"pattern is invalid\")\n\t\tassert.Contains(t, err.Error(), \"should match\")\n\t}\n}" ]
[ "0.54446614", "0.53186023", "0.5237049", "0.51093924", "0.5071384", "0.50415397", "0.5041398", "0.5022821", "0.5021216", "0.50205475", "0.50031614", "0.49272117", "0.48908263", "0.48736688", "0.48641396", "0.48465416", "0.48355845", "0.482778", "0.48097754", "0.480847", "0.47881806", "0.47840348", "0.47835448", "0.47833437", "0.47759297", "0.47645882", "0.47637865", "0.4762806", "0.4762806", "0.47439098", "0.47407252", "0.4719499", "0.46994543", "0.46967703", "0.46958044", "0.46945643", "0.46901932", "0.4669986", "0.46662495", "0.46641967", "0.46628395", "0.46628395", "0.46617538", "0.46535075", "0.4651862", "0.46487027", "0.46395358", "0.4636772", "0.4634129", "0.4614709", "0.46035337", "0.4598277", "0.4591351", "0.4586082", "0.45824957", "0.45742187", "0.45724684", "0.45700523", "0.45595214", "0.45540726", "0.4549534", "0.45488858", "0.45425797", "0.454141", "0.45353743", "0.45349306", "0.4525447", "0.4520833", "0.4517905", "0.45168513", "0.45140204", "0.4507519", "0.45056722", "0.4503779", "0.44948408", "0.44869787", "0.44816133", "0.44723794", "0.4471467", "0.4471385", "0.44710925", "0.44690686", "0.4463817", "0.44549313", "0.4453514", "0.44504157", "0.4450188", "0.4439882", "0.4433737", "0.44279096", "0.4422022", "0.44217888", "0.44216737", "0.4421562", "0.4421416", "0.44211277", "0.44205156", "0.44202343", "0.44105393", "0.44028696" ]
0.68765235
0
Test that SetOwnedResources sets which resources are reset when calling Reset.
func TestServiceSetOwnedResources(t *testing.T) { resources := []string{"test.foo.>", "test.bar.>"} access := []string{"test.zoo.>", "test.baz.>"} runTest(t, func(s *res.Service) { s.SetOwnedResources(resources, access) }, nil, restest.WithReset(resources, access)) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *User) SetOwnedObjects(value []DirectoryObjectable)() {\n m.ownedObjects = value\n}", "func TestReset(t *testing.T) {\n\ttestCancel(t, false)\n}", "func TestApplyOwnershipDiff(t *testing.T) {\n\tusers := []*user.User{\n\t\tfakeUser(\"1\", \"1\", \"user-1\"),\n\t\tfakeUser(\"2\", \"2\", \"user-2\"),\n\t}\n\tgroups := []*user.Group{\n\t\tfakeGroup(\"1\", \"group-1\"),\n\t\tfakeGroup(\"2\", \"group-2\"),\n\t}\n\townershipRecords := []ownershipRecord{\n\t\tmakeOwned(\"foo\", \"user-1\", \"1\", \"group-1\", \"1\"),\n\t}\n\tm := newMockOS(ownershipRecords, users, groups, nil, nil)\n\tt.Run(\"no-changes\", func(t *testing.T) {\n\t\to := &owner.Ownership{UID: intRef(1), GID: intRef(1)}\n\t\tdiff, err := owner.NewOwnershipDiff(m, \"foo\", o)\n\t\trequire.NoError(t, err)\n\t\terr = diff.Apply()\n\t\trequire.NoError(t, err)\n\t\tm.AssertNotCalled(t, \"Chown\", any, any, any)\n\t})\n\tt.Run(\"uid-changes\", func(t *testing.T) {\n\t\to := &owner.Ownership{UID: intRef(2), GID: intRef(1)}\n\t\tdiff, err := owner.NewOwnershipDiff(m, \"foo\", o)\n\t\trequire.NoError(t, err)\n\t\terr = diff.Apply()\n\t\trequire.NoError(t, err)\n\t\tm.AssertCalled(t, \"Chown\", \"foo\", 2, 1)\n\t})\n\tt.Run(\"gid-changes\", func(t *testing.T) {\n\t\to := &owner.Ownership{UID: intRef(1), GID: intRef(2)}\n\t\tdiff, err := owner.NewOwnershipDiff(m, \"foo\", o)\n\t\trequire.NoError(t, err)\n\t\terr = diff.Apply()\n\t\trequire.NoError(t, err)\n\t\tm.AssertCalled(t, \"Chown\", \"foo\", 1, 2)\n\t})\n\tt.Run(\"uid-and-gid-changes\", func(t *testing.T) {\n\t\to := &owner.Ownership{UID: intRef(2), GID: intRef(2)}\n\t\tdiff, err := owner.NewOwnershipDiff(m, \"foo\", o)\n\t\trequire.NoError(t, err)\n\t\terr = diff.Apply()\n\t\trequire.NoError(t, err)\n\t\tm.AssertCalled(t, \"Chown\", \"foo\", 2, 2)\n\t})\n\tt.Run(\"chown-error-needs-changes\", func(t *testing.T) {\n\t\texpected := errors.New(\"error1\")\n\t\tm := failingMockOS(map[string]error{\"Chown\": expected})\n\t\to := &owner.Ownership{UID: intRef(2), GID: intRef(2)}\n\t\tdiff, err := owner.NewOwnershipDiff(m, \"foo\", o)\n\t\trequire.NoError(t, err)\n\t\terr = diff.Apply()\n\t\tm.AssertCalled(t, \"Chown\", any, any, any)\n\t\tassert.Equal(t, expected, err)\n\t})\n}", "func TestResetIPSetsOnFailure(t *testing.T) {\n\tmetrics.ReinitializeAll()\n\tcalls := []testutils.TestCmd{\n\t\t{Cmd: []string{\"ipset\", \"list\", \"--name\"}, PipedToCommand: true, HasStartError: true, ExitCode: 1},\n\t\t{Cmd: []string{\"grep\", \"-q\", \"-v\", \"azure-npm-\"}, ExitCode: 0}, // non-azure sets exist\n\t\t{Cmd: []string{\"ipset\", \"list\", \"--name\"}, PipedToCommand: true, HasStartError: true, ExitCode: 1},\n\t\t{Cmd: []string{\"grep\", \"azure-npm-\"}},\n\t}\n\tioShim := common.NewMockIOShim(calls)\n\tdefer ioShim.VerifyCalls(t, calls)\n\tiMgr := NewIPSetManager(applyAlwaysCfg, ioShim)\n\n\tiMgr.CreateIPSets([]*IPSetMetadata{namespaceSet, keyLabelOfPodSet})\n\n\tmetrics.IncNumIPSets()\n\tmetrics.IncNumIPSets()\n\tmetrics.AddEntryToIPSet(\"test1\")\n\tmetrics.AddEntryToIPSet(\"test1\")\n\tmetrics.AddEntryToIPSet(\"test2\")\n\n\trequire.Error(t, iMgr.ResetIPSets())\n\n\tassertExpectedInfo(t, iMgr, &expectedInfo{\n\t\tmainCache: nil,\n\t\ttoAddUpdateCache: nil,\n\t\ttoDeleteCache: nil,\n\t\tsetsForKernel: nil,\n\t})\n}", "func (r *FooReconciler) cleanupOwnedResources(ctx context.Context, log logr.Logger, foo *batchv1.Foo) error {\n\tlog.Info(\"finding existing Deployments for MyKind resource\")\n\n\t// List all deployment resources owned by this MyKind\n\tvar deployments apps.DeploymentList\n\t//if err := r.List(ctx, &deployments, client.InNamespace(foo.Namespace), client.MatchingField(deploymentOwnerKey, foo.Name)); err != nil {\n\t//\treturn err\n\t//}\n\n\tdeleted := 0\n\tfor _, depl := range deployments.Items {\n\t\tif depl.Name == foo.Spec.Name {\n\t\t\t// If this deployment's name matches the one on the MyKind resource\n\t\t\t// then do not delete it.\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := r.Client.Delete(ctx, &depl); err != nil {\n\t\t\tlog.Error(err, \"failed to delete Deployment resource\")\n\t\t\treturn err\n\t\t}\n\n\t\tr.Recorder.Eventf(foo, core.EventTypeNormal, \"Deleted\", \"Deleted deployment %q\", depl.Name)\n\t\tdeleted++\n\t}\n\n\tlog.Info(\"finished cleaning up old Deployment resources\", \"number_deleted\", deleted)\n\n\treturn nil\n}", "func TestReset(t *testing.T) {\n\tqd, err := New(QFILE)\n\tif nil != err {\n\t\tt.Error(err)\n\t}\n\tt.Logf(\"%s: %d unused, and %d used quotes\", QFILE, qd.Unused(), qd.Used())\n\tqd.ResetAndSave()\n\tt.Logf(\"%s: %d unused, and %d used quotes\", QFILE, qd.Unused(), qd.Used())\n}", "func TestResourceOwner(t *testing.T) {\n\tctx, cancel := clear()\n\tdefer cancel()\n\tstatusChanges := make([]bool, 0)\n\tWatchGlobalReady(func(status bool) {\n\t\tstatusChanges = append(statusChanges, status)\n\t})\n\n\t// first request will fail\n\trespCode = http.StatusInternalServerError\n\treqCount = 0\n\n\tclientID := \"koala-clientID\"\n\tsecret := \"koala-secret\"\n\tauthHeader := \"Basic \" + base64.StdEncoding.\n\t\tEncodeToString(\n\t\t\t[]byte(clientID+\":\"+secret),\n\t\t)\n\ta, err := ResourceOwner(ctx, ResourceOwnerOptions{\n\t\tUrl: ts.URL,\n\t\tUsername: \"koala\",\n\t\tPassword: \"pass\",\n\t\tClientID: clientID,\n\t\tSecret: secret,\n\t})\n\tassert.Nil(t, err, \"Should not return error if all options are set\")\n\t// speed up retry process\n\tretryWait.SetBaseDuration(1)\n\n\tvar token string\n\tselect {\n\tcase token = <-a.GetToken():\n\t\tassert.Equal(t, \"2\", token, \"Supplied token doesn't match send token\")\n\tcase <-time.After(time.Millisecond * 50):\n\t\tassert.Fail(t, \"Token not received\")\n\t}\n\n\tassert.Equal(t, \"koala\", lastQuery.Get(\"username\"), \"Received incorrect username\")\n\tassert.Equal(t, \"pass\", lastQuery.Get(\"password\"), \"Received incorrect password\")\n\tassert.Equal(t, \"password\", lastQuery.Get(\"grant_type\"), \"Received incorrect grant type\")\n\tassert.Equal(t, authHeader, lastHeaders.Get(\"authorization\"), \"Received incorrect header\")\n\n\t// ready should be true\n\tassert.True(t, globalReadyStatus(), \"We have retrieved the token, so global ready status should be true\")\n\trespCode = http.StatusInternalServerError\n\tgo a.Refresh()\n\t<-time.After(time.Millisecond)\n\tassert.False(t, globalReadyStatus(), \"We have asked for refresh - global ready status should immediately be false\")\n\n\tselect {\n\tcase token = <-a.GetToken():\n\t\tassert.Equal(t, \"4\", token, \"Supplied token doesn't match send token\")\n\t\tassert.True(t, globalReadyStatus(), \"We have retrieved the token, so global ready status should be true\")\n\tcase <-time.After(time.Millisecond * 50):\n\t\tassert.Fail(t, \"Token not received\")\n\t}\n\n\t//assert.Fail(t, \"-- to see output\")\n}", "func TestDeleteMembers(t *testing.T) {\n\tcalls := []testutils.TestCmd{\n\t\tfakeRestoreSuccessCommand,\n\t}\n\tioshim := common.NewMockIOShim(calls)\n\tdefer ioshim.VerifyCalls(t, calls)\n\tiMgr := NewIPSetManager(applyAlwaysCfg, ioshim)\n\trequire.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, \"1.1.1.1\", \"a\"))\n\trequire.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, \"2.2.2.2\", \"b\"))\n\trequire.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, \"3.3.3.3\", \"c\"))\n\t// create to destroy later\n\tiMgr.CreateIPSets([]*IPSetMetadata{TestCIDRSet.Metadata})\n\t// clear dirty cache, otherwise a set deletion will be a no-op\n\tiMgr.clearDirtyCache()\n\n\t// will remove this member\n\trequire.NoError(t, iMgr.RemoveFromSets([]*IPSetMetadata{TestNSSet.Metadata}, \"1.1.1.1\", \"a\"))\n\t// will add this member\n\trequire.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, \"5.5.5.5\", \"e\"))\n\t// won't add/remove this member since the next two calls cancel each other out\n\trequire.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, \"4.4.4.4\", \"d\"))\n\trequire.NoError(t, iMgr.RemoveFromSets([]*IPSetMetadata{TestNSSet.Metadata}, \"4.4.4.4\", \"d\"))\n\t// won't add/remove this member since the next two calls cancel each other out\n\trequire.NoError(t, iMgr.RemoveFromSets([]*IPSetMetadata{TestNSSet.Metadata}, \"2.2.2.2\", \"b\"))\n\trequire.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, \"2.2.2.2\", \"b\"))\n\t// destroy extra set\n\tiMgr.DeleteIPSet(TestCIDRSet.PrefixName, util.SoftDelete)\n\n\texpectedLines := []string{\n\t\tfmt.Sprintf(\"-N %s --exist nethash\", TestNSSet.HashedName),\n\t\tfmt.Sprintf(\"-D %s 1.1.1.1\", TestNSSet.HashedName),\n\t\tfmt.Sprintf(\"-A %s 5.5.5.5\", TestNSSet.HashedName),\n\t\tfmt.Sprintf(\"-F %s\", TestCIDRSet.HashedName),\n\t\tfmt.Sprintf(\"-X %s\", TestCIDRSet.HashedName),\n\t\t\"\",\n\t}\n\tsortedExpectedLines := testAndSortRestoreFileLines(t, expectedLines)\n\tcreator := iMgr.fileCreatorForApply(len(calls))\n\tactualLines := testAndSortRestoreFileString(t, creator.ToString())\n\tdptestutils.AssertEqualLines(t, sortedExpectedLines, actualLines)\n\twasFileAltered, err := creator.RunCommandOnceWithFile(\"ipset\", \"restore\")\n\trequire.NoError(t, err, \"ipset restore should be successful\")\n\trequire.False(t, wasFileAltered, \"file should not be altered\")\n}", "func (p *Player) Reset() {\n\t//Reset Lifes to 2\n\tp.Lives = 2\n\n\t//Reset CurrentDeck to \"Original\" Deck\n\tp.Deck = p.Cards\n\n\t//Go trough Deck & generate GUIDs\n\tfor _, c := range p.Deck {\n\t\tc.SetGUID(GetNextGUID())\n\t}\n\tif p.Leader != nil {\n\t\tp.Leader.SetGUID(GetNextGUID())\n\t}\n\n\t//Give 10 random cards from CurrentDeck to Hand\n\tfor i := 0; i < 10; i++ {\n\t\tp.DrawCard()\n\t}\n\n\t//Check for Leader-related effects\n\tif p.Leader != nil && p.Leader.LeaderEffect == LeaderFxDrawExtraCard {\n\t\tp.Leader.Play(p, nil)\n\t}\n}", "func TestServiceReset(t *testing.T) {\n\ttbl := []struct {\n\t\tResources []string\n\t\tAccess []string\n\t\tExpected interface{}\n\t}{\n\t\t{nil, nil, nil},\n\t\t{[]string{}, nil, nil},\n\t\t{nil, []string{}, nil},\n\t\t{[]string{}, []string{}, nil},\n\n\t\t{[]string{\"test.foo.>\"}, nil, json.RawMessage(`{\"resources\":[\"test.foo.>\"]}`)},\n\t\t{nil, []string{\"test.foo.>\"}, json.RawMessage(`{\"access\":[\"test.foo.>\"]}`)},\n\t\t{[]string{\"test.foo.>\"}, []string{\"test.bar.>\"}, json.RawMessage(`{\"resources\":[\"test.foo.>\"],\"access\":[\"test.bar.>\"]}`)},\n\n\t\t{[]string{\"test.foo.>\"}, []string{}, json.RawMessage(`{\"resources\":[\"test.foo.>\"]}`)},\n\t\t{[]string{}, []string{\"test.foo.>\"}, json.RawMessage(`{\"access\":[\"test.foo.>\"]}`)},\n\t}\n\n\tfor _, l := range tbl {\n\t\trunTest(t, func(s *res.Service) {\n\t\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t\t}, func(s *restest.Session) {\n\t\t\ts.Service().Reset(l.Resources, l.Access)\n\t\t\t// Send token event to flush any system.reset event\n\t\t\ts.Service().TokenEvent(mock.CID, nil)\n\n\t\t\tif l.Expected != nil {\n\t\t\t\ts.GetMsg().\n\t\t\t\t\tAssertSubject(\"system.reset\").\n\t\t\t\t\tAssertPayload(l.Expected)\n\t\t\t}\n\n\t\t\ts.GetMsg().AssertTokenEvent(mock.CID, nil)\n\t\t})\n\t}\n}", "func (Registers *RegisterSet) Reset(usingBIOS bool) {\n\tRegisters.sysRegisters.reset(usingBIOS)\n\tRegisters.fiqRegisters.reset(usingBIOS)\n\tRegisters.svcRegisters.reset(usingBIOS)\n\tRegisters.abtRegisters.reset(usingBIOS)\n\tRegisters.irqRegisters.reset(usingBIOS)\n\tRegisters.undRegisters.reset(usingBIOS)\n}", "func TestOwnershipDiff(t *testing.T) {\n\tusers := []*user.User{\n\t\tfakeUser(\"1\", \"1\", \"user-1\"),\n\t\tfakeUser(\"2\", \"2\", \"user-2\"),\n\t\tfakeUser(\"3\", \"3\", \"user-3\"),\n\t}\n\tgroups := []*user.Group{\n\t\tfakeGroup(\"1\", \"group-1\"),\n\t\tfakeGroup(\"2\", \"group-2\"),\n\t\tfakeGroup(\"3\", \"group-3\"),\n\t}\n\tm := newMockOS(nil, users, groups, nil, nil)\n\tt.Run(\"Original\", func(t *testing.T) {\n\t\tt.Run(\"uid\", func(t *testing.T) {\n\t\t\to := (&owner.OwnershipDiff{UIDs: &[2]int{1, 2}}).SetProxy(m)\n\t\t\tassert.Equal(t, \"user: user-1 (1)\", o.Original())\n\t\t})\n\t\tt.Run(\"gid\", func(t *testing.T) {\n\t\t\to := (&owner.OwnershipDiff{GIDs: &[2]int{1, 2}}).SetProxy(m)\n\t\t\tassert.Equal(t, \"group: group-1 (1)\", o.Original())\n\t\t})\n\t\tt.Run(\"both\", func(t *testing.T) {\n\t\t\to := (&owner.OwnershipDiff{UIDs: &[2]int{1, 2}, GIDs: &[2]int{1, 2}}).SetProxy(m)\n\t\t\tassert.Equal(t, \"user: user-1 (1); group: group-1 (1)\", o.Original())\n\t\t})\n\t\tt.Run(\"heterogenous\", func(t *testing.T) {\n\t\t\tt.Run(\"mismatched-uid\", func(t *testing.T) {\n\t\t\t\to := (&owner.OwnershipDiff{UIDs: &[2]int{1, 2}, GIDs: &[2]int{1, 1}}).SetProxy(m)\n\t\t\t\tassert.Equal(t, \"user: user-1 (1)\", o.Original())\n\t\t\t})\n\t\t\tt.Run(\"mismatched-gid\", func(t *testing.T) {\n\t\t\t\to := (&owner.OwnershipDiff{UIDs: &[2]int{1, 1}, GIDs: &[2]int{1, 2}}).SetProxy(m)\n\t\t\t\tassert.Equal(t, \"group: group-1 (1)\", o.Original())\n\t\t\t})\n\t\t})\n\t})\n\tt.Run(\"Current\", func(t *testing.T) {\n\t\tt.Run(\"uid\", func(t *testing.T) {\n\t\t\to := (&owner.OwnershipDiff{UIDs: &[2]int{1, 2}}).SetProxy(m)\n\t\t\tassert.Equal(t, \"user: user-2 (2)\", o.Current())\n\t\t})\n\t\tt.Run(\"gid\", func(t *testing.T) {\n\t\t\to := (&owner.OwnershipDiff{GIDs: &[2]int{1, 2}}).SetProxy(m)\n\t\t\tassert.Equal(t, \"group: group-2 (2)\", o.Current())\n\t\t})\n\t\tt.Run(\"both\", func(t *testing.T) {\n\t\t\to := (&owner.OwnershipDiff{UIDs: &[2]int{1, 2}, GIDs: &[2]int{1, 2}}).SetProxy(m)\n\t\t\tassert.Equal(t, \"user: user-2 (2); group: group-2 (2)\", o.Current())\n\t\t})\n\t\tt.Run(\"heterogenous\", func(t *testing.T) {\n\t\t\tt.Run(\"mismatched-uid\", func(t *testing.T) {\n\t\t\t\to := (&owner.OwnershipDiff{UIDs: &[2]int{1, 2}, GIDs: &[2]int{1, 1}}).SetProxy(m)\n\t\t\t\tassert.Equal(t, \"user: user-2 (2)\", o.Current())\n\t\t\t})\n\t\t\tt.Run(\"mismatched-gid\", func(t *testing.T) {\n\t\t\t\to := (&owner.OwnershipDiff{UIDs: &[2]int{1, 1}, GIDs: &[2]int{1, 2}}).SetProxy(m)\n\t\t\t\tassert.Equal(t, \"group: group-2 (2)\", o.Current())\n\t\t\t})\n\t\t})\n\t})\n\tt.Run(\"Changes\", func(t *testing.T) {\n\t\tt.Run(\"uid\", func(t *testing.T) {\n\t\t\to := (&owner.OwnershipDiff{UIDs: &[2]int{1, 2}}).SetProxy(m)\n\t\t\tassert.True(t, o.Changes())\n\t\t})\n\t\tt.Run(\"gid\", func(t *testing.T) {\n\t\t\to := (&owner.OwnershipDiff{GIDs: &[2]int{1, 2}}).SetProxy(m)\n\t\t\tassert.True(t, o.Changes())\n\t\t})\n\t\tt.Run(\"both\", func(t *testing.T) {\n\t\t\to := (&owner.OwnershipDiff{UIDs: &[2]int{1, 2}, GIDs: &[2]int{1, 2}}).SetProxy(m)\n\t\t\tassert.True(t, o.Changes())\n\t\t})\n\t\tt.Run(\"heterogenous\", func(t *testing.T) {\n\t\t\tt.Run(\"mismatched-uid\", func(t *testing.T) {\n\t\t\t\to := (&owner.OwnershipDiff{UIDs: &[2]int{1, 2}, GIDs: &[2]int{1, 1}}).SetProxy(m)\n\t\t\t\tassert.True(t, o.Changes())\n\t\t\t})\n\t\t\tt.Run(\"mismatched-gid\", func(t *testing.T) {\n\t\t\t\to := (&owner.OwnershipDiff{UIDs: &[2]int{1, 1}, GIDs: &[2]int{1, 2}}).SetProxy(m)\n\t\t\t\tassert.True(t, o.Changes())\n\t\t\t})\n\t\t})\n\t\tt.Run(\"neither\", func(t *testing.T) {\n\t\t\to := (&owner.OwnershipDiff{}).SetProxy(m)\n\t\t\tassert.False(t, o.Changes())\n\t\t})\n\t})\n\tt.Run(\"NewOwnershipDiff\", func(t *testing.T) {\n\t\townershipRecords := []ownershipRecord{\n\t\t\tmakeOwned(\"foo\", \"user-1\", \"1\", \"group-1\", \"1\"),\n\t\t}\n\t\tm := newMockOS(ownershipRecords, users, groups, nil, nil)\n\t\tt.Run(\"when-matching\", func(t *testing.T) {\n\t\t\to := &owner.Ownership{UID: intRef(1), GID: intRef(1)}\n\t\t\td, err := owner.NewOwnershipDiff(m, \"foo\", o)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.False(t, d.Changes())\n\t\t})\n\t\tt.Run(\"when-mismatched\", func(t *testing.T) {\n\t\t\to := &owner.Ownership{UID: intRef(2), GID: intRef(2)}\n\t\t\td, err := owner.NewOwnershipDiff(m, \"foo\", o)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.True(t, d.Changes())\n\t\t})\n\t\tt.Run(\"when-uid-match\", func(t *testing.T) {\n\t\t\to := &owner.Ownership{UID: intRef(1), GID: intRef(2)}\n\t\t\td, err := owner.NewOwnershipDiff(m, \"foo\", o)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.True(t, d.Changes())\n\t\t})\n\t\tt.Run(\"when-gid-match\", func(t *testing.T) {\n\t\t\to := &owner.Ownership{UID: intRef(2), GID: intRef(1)}\n\t\t\td, err := owner.NewOwnershipDiff(m, \"foo\", o)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.True(t, d.Changes())\n\t\t})\n\t\tt.Run(\"when-only-uid\", func(t *testing.T) {\n\t\t\tt.Run(\"when-matches\", func(t *testing.T) {\n\t\t\t\to := &owner.Ownership{UID: intRef(1)}\n\t\t\t\td, err := owner.NewOwnershipDiff(m, \"foo\", o)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.False(t, d.Changes())\n\t\t\t})\n\t\t\tt.Run(\"when-not-matches\", func(t *testing.T) {\n\t\t\t\to := &owner.Ownership{UID: intRef(2)}\n\t\t\t\td, err := owner.NewOwnershipDiff(m, \"foo\", o)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.True(t, d.Changes())\n\t\t\t})\n\t\t})\n\t\tt.Run(\"when-only-gid\", func(t *testing.T) {\n\t\t\tt.Run(\"when-matches\", func(t *testing.T) {\n\t\t\t\to := &owner.Ownership{GID: intRef(1)}\n\t\t\t\td, err := owner.NewOwnershipDiff(m, \"foo\", o)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.False(t, d.Changes())\n\t\t\t})\n\t\t\tt.Run(\"when-not-matches\", func(t *testing.T) {\n\t\t\t\to := &owner.Ownership{GID: intRef(2)}\n\t\t\t\td, err := owner.NewOwnershipDiff(m, \"foo\", o)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.True(t, d.Changes())\n\t\t\t})\n\t\t})\n\t})\n\tt.Run(\"when-syscall-errors\", func(t *testing.T) {\n\t\texpectedError := errors.New(\"error\")\n\t\to := &owner.Ownership{UID: intRef(1), GID: intRef(1)}\n\t\tt.Run(\"GetUID\", func(t *testing.T) {\n\t\t\tm := failingMockOS(map[string]error{\"GetUID\": expectedError})\n\t\t\t_, err := owner.NewOwnershipDiff(m, \"foo\", o)\n\t\t\tassert.Equal(t, expectedError, err)\n\t\t})\n\t\tt.Run(\"GetGID\", func(t *testing.T) {\n\t\t\tm := failingMockOS(map[string]error{\"GetGID\": expectedError})\n\t\t\t_, err := owner.NewOwnershipDiff(m, \"foo\", o)\n\t\t\tassert.Equal(t, expectedError, err)\n\t\t})\n\t})\n}", "func TestPopulateResources(t *testing.T) {\n\ttestName := \"TestPopulateResources\"\n\n\tbeforeTest()\n\t// kinds to check for status\n\tvar kindsToCheckStatus = map[string]bool{}\n\n\tvar files = []string{\n\t\tKappnavConfigFile,\n\t\tCrdApplication,\n\t\tappBookinfo,\n\t\tappDetails,\n\t\tdeploymentDetailsV1,\n\t\tserviceDetails,\n\t\tingressBookinfo,\n\t\tappProductpage,\n\t\tnetworkpolicyProductpage,\n\t\tdeploymentProcuctpageV1,\n\t\tserviceProductpage,\n\t\tappRatings,\n\t\tdeploymentRatingsV1,\n\t\tserviceRatings,\n\t\tappReviews,\n\t\tnetworkpolicyReviews,\n\t\tdeploymentReviewsV1,\n\t\tdeploymentReviewsV2,\n\t\tdeploymentReviewsV3,\n\t\tserviceReview,\n\t\tcrdFoo,\n\t\tfooExample,\n\t\tappFoo,\n\t\tkappnavCRFile,\n\t}\n\n\titeration0IDs, err := readResourceIDs(files)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t/* Iteration 0: all normal */\n\ttestActions := newTestActions(testName, kindsToCheckStatus)\n\tvar emptyIDs = []resourceID{}\n\ttestActions.addIteration(iteration0IDs, emptyIDs)\n\n\t/* iteration 1: clean up */\n\ttestActions.addIteration(emptyIDs, emptyIDs)\n\n\t/* create a watcher that populates all resources */\n\tclusterWatcher, err := createClusterWatcher(iteration0IDs, testActions, StatusFailureRate)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer clusterWatcher.shutDown()\n\n\t// ensure we can find each resource\n\tfor _, res := range iteration0IDs {\n\t\texists, _ := resourceExists(clusterWatcher, res)\n\t\tif !exists {\n\t\t\tt.Fatal(fmt.Errorf(\"can't find resource for %s\\n,\", res.fileName))\n\t\t}\n\t}\n\n\terr = testActions.transitionAll()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (w *percentRankWindow) Reset(context.Context) {\n\tw.peerRes = nil\n}", "func (cr *cmdRunner) prepReset(scanRes *storage.ScmScanResponse) (*storage.ScmPrepareResponse, error) {\n\tstate := scanRes.State\n\tresp := &storage.ScmPrepareResponse{State: state}\n\n\tcr.log.Debugf(\"scm backend prep reset: state %q\", state)\n\n\tif err := cr.deleteGoals(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch state {\n\tcase storage.ScmStateNoRegions:\n\t\treturn resp, nil\n\tcase storage.ScmStateFreeCapacity, storage.ScmStateNoFreeCapacity, storage.ScmStateNotInterleaved:\n\t\t// Continue to remove namespaces and regions.\n\t\tresp.RebootRequired = true\n\tdefault:\n\t\treturn nil, errors.Errorf(\"unhandled scm state %q\", state)\n\t}\n\n\tfor _, dev := range scanRes.Namespaces {\n\t\tif err := cr.removeNamespace(dev.Name); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tcr.log.Infof(\"Resetting PMem memory allocations.\")\n\n\tif err := cr.removeRegions(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func (w *rankWindow) Reset(context.Context) {\n\tw.peerRes = nil\n}", "func (m *PlayerMutation) ResetCards() {\n\tm.cards = nil\n\tm.clearedcards = false\n\tm.removedcards = nil\n}", "func (m *User) SetOwnedDevices(value []DirectoryObjectable)() {\n m.ownedDevices = value\n}", "func (m *PatientrightstypeMutation) ResetResponsible() {\n\tm._Responsible = nil\n}", "func (m *CarMutation) ResetOwner() {\n\tm.owner = nil\n\tm.clearedowner = false\n}", "func (pas *PodAutoscalerStatus) MarkResourceNotOwned(kind, name string) {\n\tpas.MarkInactive(\"NotOwned\",\n\t\tfmt.Sprintf(\"There is an existing %s %q that we do not own.\", kind, name))\n}", "func TestGatewayOwnership(t *testing.T) {\n\ttestName := \"test-gateway-owned\"\n\tcontourName := fmt.Sprintf(\"%s-contour\", testName)\n\tgcName := \"test-gatewayclass-owned\"\n\tcfg := objcontour.Config{\n\t\tName: contourName,\n\t\tNamespace: operatorNs,\n\t\tSpecNs: specNs,\n\t\tNetworkType: operatorv1alpha1.NodePortServicePublishingType,\n\t\tGatewayClass: &gcName,\n\t}\n\n\tnonOwnedClass := \"test-gatewayclass-not-owned\"\n\t// Create Gateway API resources that should not be managed by the operator.\n\tif err := newGatewayClass(ctx, kclient, nonOwnedClass); err != nil {\n\t\tt.Fatalf(\"failed to create gatewayclass %s: %v\", nonOwnedClass, err)\n\t}\n\tt.Logf(\"created gatewayclass %s\", nonOwnedClass)\n\n\t// The gatewayclass should not report admitted.\n\tif err := waitForGatewayClassStatusConditions(ctx, kclient, 5*time.Second, nonOwnedClass, expectedNonOwnedGatewayClassConditions...); err != nil {\n\t\tt.Fatalf(\"failed to observe expected status conditions for gatewayclass %s: %v\", nonOwnedClass, err)\n\t}\n\n\t// Create the namespace used by the non-owned gateway\n\tif err := newNs(ctx, kclient, cfg.SpecNs); err != nil {\n\t\tt.Fatalf(\"failed to create namespace %s: %v\", cfg.SpecNs, err)\n\t}\n\n\tnonOwnedGateway := \"other-vendor\"\n\tappName := fmt.Sprintf(\"%s-%s\", testAppName, testName)\n\tif err := newGateway(ctx, kclient, cfg.SpecNs, nonOwnedGateway, nonOwnedClass, \"app\", appName); err != nil {\n\t\tt.Fatalf(\"failed to create gateway %s/%s: %v\", cfg.SpecNs, nonOwnedGateway, err)\n\t}\n\tt.Logf(\"created gateway %s/%s\", cfg.SpecNs, nonOwnedGateway)\n\n\t// The gateway should not report scheduled.\n\tif err := waitForGatewayStatusConditions(ctx, kclient, 5*time.Second, nonOwnedGateway, cfg.SpecNs, expectedNonOwnedGatewayConditions...); err != nil {\n\t\tt.Fatalf(\"failed to observe expected status conditions for gateway %s/%s: %v\", cfg.SpecNs, nonOwnedGateway, err)\n\t}\n\n\t// Create the Contour and Gateway API resources that should be managed by the operator.\n\tcntr, err := newContour(ctx, kclient, cfg)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create contour %s/%s: %v\", operatorNs, contourName, err)\n\t}\n\tt.Logf(\"created contour %s/%s\", cntr.Namespace, cntr.Name)\n\n\tif err := newOperatorGatewayClass(ctx, kclient, gcName, operatorNs, contourName); err != nil {\n\t\tt.Fatalf(\"failed to create gatewayclass %s: %v\", gcName, err)\n\t}\n\tt.Logf(\"created gatewayclass %s\", gcName)\n\n\t// The gatewayclass should now report admitted.\n\tif err := waitForGatewayClassStatusConditions(ctx, kclient, 1*time.Minute, gcName, expectedGatewayClassConditions...); err != nil {\n\t\tt.Fatalf(\"failed to observe expected status conditions for gatewayclass %s: %v\", gcName, err)\n\t}\n\n\t// The contour should now report available.\n\tif err := waitForContourStatusConditions(ctx, kclient, 1*time.Minute, contourName, operatorNs, expectedContourConditions...); err != nil {\n\t\tt.Fatalf(\"failed to observe expected status conditions for contour %s/%s: %v\", operatorNs, testName, err)\n\t}\n\tt.Logf(\"observed expected status conditions for contour %s/%s\", testName, operatorNs)\n\n\t// Create the gateway. The gateway must be projectcontour/contour until the following issue is fixed:\n\t// https://github.com/projectcontour/contour-operator/issues/241\n\tgwName := \"contour\"\n\tif err := newGateway(ctx, kclient, cfg.SpecNs, gwName, gcName, \"app\", appName); err != nil {\n\t\tt.Fatalf(\"failed to create gateway %s/%s: %v\", cfg.SpecNs, gwName, err)\n\t}\n\tt.Logf(\"created gateway %s/%s\", cfg.SpecNs, gwName)\n\n\t// The gateway should report admitted.\n\tif err := waitForGatewayStatusConditions(ctx, kclient, 3*time.Minute, gwName, cfg.SpecNs, expectedGatewayConditions...); err != nil {\n\t\tt.Fatalf(\"failed to observe expected status conditions for gateway %s/%s: %v\", cfg.SpecNs, gwName, err)\n\t}\n\n\tgateways := []string{nonOwnedGateway, gwName}\n\tfor _, gw := range gateways {\n\t\t// Ensure the gateway can be deleted and clean-up.\n\t\tif err := deleteGateway(ctx, kclient, 3*time.Minute, gw, cfg.SpecNs); err != nil {\n\t\t\tt.Fatalf(\"failed to delete gateway %s/%s: %v\", cfg.SpecNs, gw, err)\n\t\t}\n\t}\n\n\tclasses := []string{nonOwnedClass, gcName}\n\tfor _, class := range classes {\n\t\t// Ensure the gatewayclass can be deleted and clean-up.\n\t\tif err := deleteGatewayClass(ctx, kclient, 3*time.Minute, class); err != nil {\n\t\t\tt.Fatalf(\"failed to delete gatewayclass %s: %v\", class, err)\n\t\t}\n\t}\n\n\t// Ensure the contour can be deleted and clean-up.\n\tif err := deleteContour(ctx, kclient, 3*time.Minute, contourName, operatorNs); err != nil {\n\t\tt.Fatalf(\"failed to delete contour %s/%s: %v\", operatorNs, contourName, err)\n\t}\n\n\t// Ensure the envoy service is cleaned up automatically.\n\tif err := waitForServiceDeletion(ctx, kclient, 3*time.Minute, specNs, \"envoy\"); err != nil {\n\t\tt.Fatalf(\"failed to delete contour %s/envoy: %v\", specNs, err)\n\t}\n\tt.Logf(\"cleaned up envoy service %s/envoy\", specNs)\n\n\t// Delete the operand namespace since contour.spec.namespace.removeOnDeletion\n\t// defaults to false.\n\tif err := deleteNamespace(ctx, kclient, 5*time.Minute, cfg.SpecNs); err != nil {\n\t\tt.Fatalf(\"failed to delete namespace %s: %v\", cfg.SpecNs, err)\n\t}\n\tt.Logf(\"observed the deletion of namespace %s\", cfg.SpecNs)\n}", "func (_m *MockSeriesIterator) Reset(id ident.ID, ns ident.ID, startInclusive time.Time, endExclusive time.Time, replicas []Iterator) {\n\t_m.ctrl.Call(_m, \"Reset\", id, ns, startInclusive, endExclusive, replicas)\n}", "func setOwnerRef(r *unstructured.Unstructured, set *apps.ResourceSet) {\n\tvar newRefs []metav1.OwnerReference\n\tfor _, or := range r.GetOwnerReferences() {\n\t\tif or.APIVersion != \"apps.cloudrobotics.com/v1alpha1\" || or.Kind != \"ResourceSet\" {\n\t\t\tnewRefs = append(newRefs, or)\n\t\t}\n\t}\n\t_true := true\n\tnewRefs = append(newRefs, metav1.OwnerReference{\n\t\tAPIVersion: \"apps.cloudrobotics.com/v1alpha1\",\n\t\tKind: \"ResourceSet\",\n\t\tName: set.Name,\n\t\tUID: set.UID,\n\t\tBlockOwnerDeletion: &_true,\n\t})\n\tr.SetOwnerReferences(newRefs)\n}", "func (m *CardMutation) ResetOwner() {\n\tm.owner = nil\n\tm.clearedowner = false\n}", "func (g *Game) resetAllReadyForNextRound() {\n\tg.mu.Lock()\n\tdefer g.mu.Unlock()\n\tfor _, p := range g.players {\n\t\tp.readyForNextRound = false\n\t}\n}", "func AWSReset() {\n\tSetClusterName()\n\tsshUser, osLabel := distSelect()\n\tinstaller.RunPlaybook(\"./inventory/\"+common.Name+\"/installer/\", \"reset.yml\", sshUser, osLabel)\n\t// waiting for Infrastructure\n\ttime.Sleep(30)\n\tAWSInstall()\n\treturn\n}", "func TestSyncClusterMachineSetOwnerReference(t *testing.T) {\n\ttrueVar := true\n\tcases := []struct {\n\t\tname string\n\t\townerRef *metav1.OwnerReference\n\t\texpectNewMaster bool\n\t}{\n\t\t{\n\t\t\tname: \"owned\",\n\t\t\townerRef: &metav1.OwnerReference{\n\t\t\t\tUID: testClusterUUID,\n\t\t\t\tAPIVersion: clusteroperator.SchemeGroupVersion.String(),\n\t\t\t\tKind: \"Cluster\",\n\t\t\t\tController: &trueVar,\n\t\t\t},\n\t\t\texpectNewMaster: false,\n\t\t},\n\t\t{\n\t\t\tname: \"no owner\",\n\t\t\texpectNewMaster: true,\n\t\t},\n\t\t{\n\t\t\tname: \"different owner\",\n\t\t\townerRef: &metav1.OwnerReference{\n\t\t\t\tUID: types.UID(\"other-cluster-uuid\"),\n\t\t\t\tAPIVersion: clusteroperator.SchemeGroupVersion.String(),\n\t\t\t\tKind: \"Cluster\",\n\t\t\t\tController: &trueVar,\n\t\t\t},\n\t\t\texpectNewMaster: true,\n\t\t},\n\t}\n\tfor _, tc := range cases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tcontroller, clusterStore, machineSetStore, _, clusterOperatorClient := newTestClusterController()\n\n\t\t\tcluster := newCluster()\n\t\t\tclusterStore.Add(cluster)\n\n\t\t\tmachineSetName := fmt.Sprintf(\"%s-master-random\", cluster.Name)\n\t\t\tmachineSet := newMachineSet(machineSetName, cluster, false)\n\t\t\tmachineSet.Spec.NodeType = clusteroperator.NodeTypeMaster\n\t\t\tmachineSet.Spec.Size = 1\n\t\t\tmachineSet.Spec.Infra = true\n\t\t\tif tc.ownerRef != nil {\n\t\t\t\tmachineSet.OwnerReferences = []metav1.OwnerReference{*tc.ownerRef}\n\t\t\t}\n\t\t\tmachineSetStore.Add(machineSet)\n\n\t\t\tcontroller.syncCluster(getKey(cluster, t))\n\n\t\t\texpectedActions := []expectedClientAction{}\n\t\t\tif tc.expectNewMaster {\n\t\t\t\texpectedActions = append(expectedActions, newExpectedMachineSetCreateAction(cluster, \"master\"))\n\t\t\t} else {\n\t\t\t\texpectedActions = append(expectedActions, expectedClusterStatusUpdateAction{machineSets: 1})\n\t\t\t}\n\n\t\t\tvalidateClientActions(t, \"TestSyncClusterMachineSetOwnerReference.\"+tc.name, clusterOperatorClient, expectedActions...)\n\n\t\t\texpectedAdds := 0\n\t\t\tif tc.expectNewMaster {\n\t\t\t\texpectedAdds = 1\n\t\t\t}\n\t\t\tvalidateControllerExpectations(t, \"TestSyncClusterMachineSetOwnerReference.\"+tc.name, controller, cluster, expectedAdds, 0)\n\t\t})\n\t}\n}", "func (mock *Serf) Reset() {\n\tlockSerfBootstrap.Lock()\n\tmock.calls.Bootstrap = nil\n\tlockSerfBootstrap.Unlock()\n\tlockSerfCluster.Lock()\n\tmock.calls.Cluster = nil\n\tlockSerfCluster.Unlock()\n\tlockSerfID.Lock()\n\tmock.calls.ID = nil\n\tlockSerfID.Unlock()\n\tlockSerfJoin.Lock()\n\tmock.calls.Join = nil\n\tlockSerfJoin.Unlock()\n\tlockSerfMember.Lock()\n\tmock.calls.Member = nil\n\tlockSerfMember.Unlock()\n\tlockSerfShutdown.Lock()\n\tmock.calls.Shutdown = nil\n\tlockSerfShutdown.Unlock()\n}", "func (_m *MockMultiReaderIterator) Reset(readers []io.Reader) {\n\t_m.ctrl.Call(_m, \"Reset\", readers)\n}", "func (w *cumulativeDistWindow) Reset(context.Context) {\n\tw.peerRes = nil\n}", "func validateOwnerRefs(r *unstructured.Unstructured, set *apps.ResourceSet) error {\n\tif set == nil {\n\t\treturn nil\n\t}\n\tname, version, ok := decodeResourceSetName(set.Name)\n\tif !ok {\n\t\treturn errors.Errorf(\"invalid ResourceSet name %q\", set.Name)\n\t}\n\tfor _, or := range r.GetOwnerReferences() {\n\t\tif or.APIVersion != \"apps.cloudrobotics.com/v1alpha1\" || or.Kind != \"ResourceSet\" {\n\t\t\tcontinue\n\t\t}\n\t\tn, v, ok := decodeResourceSetName(or.Name)\n\t\tif !ok {\n\t\t\treturn errors.Errorf(\"ResourceSet owner reference with invalid name %q\", or.Name)\n\t\t}\n\t\tif n != name {\n\t\t\treturn errors.Errorf(\"owned by conflicting ResourceSet object %q\", or.Name)\n\t\t}\n\t\tif v > version {\n\t\t\t// TODO(rodrigoq): should this be transient to cope with concurrent synk runs?\n\t\t\treturn errors.Errorf(\"owned by newer ResourceSet %q > v%d\", or.Name, version)\n\t\t}\n\t}\n\treturn nil\n}", "func (m *AbilitypatientrightsMutation) ResetMedicalSupplies() {\n\tm._MedicalSupplies = nil\n\tm.add_MedicalSupplies = nil\n}", "func (m *MockMetrics) SetSpecialResourcesCreated(value int) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"SetSpecialResourcesCreated\", value)\n}", "func (r *randList) Reset() {\n\tr.offset = 0\n\tr.perm = rand.Perm(len(r.list))\n}", "func (c *Mock) SetOwner(v bool) interfaces.Client {\n\treturn c.FakeSetOwner(v)\n}", "func (omx OmxPlayer) Reset() (error) {\n\tomx.player = nil\n\tomx.omxIn = nil\n\tomx.omxKill()\n\treturn nil\n}", "func (r *KubeCarrierReconciler) reconcileOwnedObjects(ctx context.Context, log logr.Logger, kubeCarrier *operatorv1alpha1.KubeCarrier, objects []unstructured.Unstructured) (bool, error) {\n\tvar deploymentIsReady bool\n\tfor _, object := range objects {\n\t\tif err := addOwnerReference(kubeCarrier, &object, r.Scheme); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tcurObj, err := reconcile.Unstructured(ctx, log, r.Client, &object)\n\t\tif err != nil {\n\t\t\treturn false, fmt.Errorf(\"reconcile kind: %s, err: %w\", object.GroupVersionKind().Kind, err)\n\t\t}\n\n\t\tswitch obj := curObj.(type) {\n\t\tcase *appsv1.Deployment:\n\t\t\tdeploymentIsReady = util.DeploymentIsAvailable(obj)\n\t\t}\n\t}\n\treturn deploymentIsReady, nil\n}", "func (mnuo *MetricNameUpdateOne) ClearOwners() *MetricNameUpdateOne {\n\tmnuo.mutation.ClearOwners()\n\treturn mnuo\n}", "func (l *Manager) ReleaseResources(client string) {\n\t// Looping over the set\n\tfor r := range l.ClientHolder[client] {\n\t\tmsg := l.ReleaseResource(client, r.(string))\n\t\tlog.Println(msg)\n\t}\n}", "func (s *UAA) Reset() {\n\ts.users.Clear()\n\ts.clients.Clear()\n\ts.groups.Clear()\n}", "func (r *ElevatorReconciler) reconcileOwnedObjects(ctx context.Context, log logr.Logger, elevator *operatorv1alpha1.Elevator, objects []unstructured.Unstructured) (bool, error) {\n\tvar deploymentIsReady bool\n\tfor _, object := range objects {\n\t\tif err := addOwnerReference(elevator, &object, r.Scheme); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tcurObj, err := reconcile.Unstructured(ctx, log, r.Client, &object)\n\t\tif err != nil {\n\t\t\treturn false, fmt.Errorf(\"reconcile kind: %s, err: %w\", object.GroupVersionKind().Kind, err)\n\t\t}\n\n\t\tswitch obj := curObj.(type) {\n\t\tcase *appsv1.Deployment:\n\t\t\tdeploymentIsReady = util.DeploymentIsAvailable(obj)\n\t\t}\n\t}\n\treturn deploymentIsReady, nil\n}", "func TestSpareResourceSetUpdate(t *testing.T) { //nolint:dupl\n\tvar result SpareResourceSet\n\terr := json.NewDecoder(strings.NewReader(spareResourceSetBody)).Decode(&result)\n\n\tif err != nil {\n\t\tt.Errorf(\"Error decoding JSON: %s\", err)\n\t}\n\n\ttestClient := &common.TestClient{}\n\tresult.SetClient(testClient)\n\n\tresult.OnLine = true\n\tresult.ResourceType = \"Hat\"\n\tresult.TimeToProvision = \"P0DT06H30M5S\"\n\tresult.TimeToReplenish = \"P5DT0H12M0S\"\n\terr = result.Update()\n\n\tif err != nil {\n\t\tt.Errorf(\"Error making Update call: %s\", err)\n\t}\n\n\tcalls := testClient.CapturedCalls()\n\n\tif strings.Contains(calls[0].Payload, \"OnLine\") {\n\t\tt.Errorf(\"Unexpected OnLine update payload: %s\", calls[0].Payload)\n\t}\n\n\tif !strings.Contains(calls[0].Payload, \"ResourceType:Hat\") {\n\t\tt.Errorf(\"Unexpected ResourceType update payload: %s\", calls[0].Payload)\n\t}\n\n\tif !strings.Contains(calls[0].Payload, \"TimeToProvision:P0DT06H30M5S\") {\n\t\tt.Errorf(\"Unexpected TimeToProvision update payload: %s\", calls[0].Payload)\n\t}\n\n\tif !strings.Contains(calls[0].Payload, \"TimeToReplenish:P5DT0H12M0S\") {\n\t\tt.Errorf(\"Unexpected TimeToReplenish update payload: %s\", calls[0].Payload)\n\t}\n}", "func (m *RestaurantMutation) ResetOwner() {\n\tm.owner = nil\n\tm.clearedowner = false\n}", "func (m *CompetenceMutation) ResetOwner() {\n\tm.owner = nil\n\tm.clearedowner = false\n}", "func (m *UsersMgmtServiceServerMock) Reset() {\n\tm.GetUsersFunc = nil\n\tm.GetUserFunc = nil\n\tm.CreateUserFunc = nil\n\tm.DeleteUserFunc = nil\n\tm.UpdateUserFunc = nil\n\tm.UpdateSelfFunc = nil\n}", "func ResourceOwnedBy(owner runtime.Object) Func {\n\treturn func(obj runtime.Object) bool {\n\t\treturn metav1.IsControlledBy(obj.(metav1.Object), owner.(metav1.Object))\n\t}\n}", "func ResetVars() {\n\trand.Seed(time.Now().UTC().UnixNano())\n\tPlayers = nil\n\tGameSetup.Name = \"\"\n\tGameSetup.Roles = make(map[string]int)\n\tGameSetup.Total = 0\n\tGameSetup.Keep = 100\n\tGame.Name = \"\"\n\tGame.Number = -1\n\tGame.RoundNight = true\n\tGame.RoundNum = 0\n\tGame.Seed = rand.Int63()\n\tMultiverse.Universes = nil\n\tMultiverse.originalAssignments = nil\n\tMultiverse.rando = nil\n\tResetObservations()\n}", "func (s *Suite) Reset() {\n\tfor _, set := range s.sets {\n\t\tset.Reset()\n\t}\n}", "func setOwner(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tvar err error\n\tfmt.Println(\"starting set_owner\")\n\n\tif len(args) != 2 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 2\")\n\t}\n\n\treceiptId := args[0]\n\townerId := args[1]\n\tfmt.Println(receiptId + \"->\" + ownerId)\n\n\t// check if user already exists\n\towner, err := getOwner(stub, ownerId)\n\tif err != nil {\n\t\treturn shim.Error(\"This owner does not exist - \" + ownerId)\n\t}\n\n\t// get receipt's current state\n\treceiptAsBytes, err := stub.GetState(receiptId)\n\tif err != nil {\n\t\treturn shim.Error(\"Failed to get Receipt\")\n\t}\n\tres := Receipt{}\n\tjson.Unmarshal(receiptAsBytes, &res)\n\n\tres.OwnerRelation.Id = owner.Id\n\tres.OwnerRelation.Username = owner.Username\n\n\tjsonAsBytes, _ := json.Marshal(res)\n\terr = stub.PutState(args[0], jsonAsBytes)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tfmt.Println(\"- end set owner\")\n\treturn shim.Success(nil)\n}", "func TestGitRemoteAnnouncer_Reset(t *testing.T) {\n\tg := MockRepositoryProducer{t: t}\n\ta, err := announcer.NewGitRemoteAnnouncer(announcer.GitRemoteAnnouncerConfig{\n\t\tGit: &g,\n\t})\n\tassert.NotNil(t, a)\n\tassert.Nil(t, err)\n\tprevClones := g.clones\n\terr = a.Reset()\n\tassert.Nil(t, err)\n\tassert.Equal(t, prevClones+1, g.clones)\n}", "func (w *denseRankWindow) Reset(context.Context) {\n\tw.denseRank = 0\n\tw.peerRes = nil\n}", "func (v *AccountCoinsRequest) Reset() {\n\tv.AccountIdentifier.Reset()\n\tv.Currencies = v.Currencies[:0]\n\tv.IncludeMempool = false\n}", "func (m *SafeMode) Reset(tctx *tcontext.Context) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\t//nolint:errcheck\n\tm.setCount(tctx, 0)\n\tm.tables = make(map[string]struct{})\n}", "func (f *fixture) Reset(ctx context.Context) error {\n\tif f.startChrome && f.cr != nil {\n\t\tif err := UnmountAllSmbMounts(ctx, f.cr); err != nil {\n\t\t\ttesting.ContextLog(ctx, \"Failed to unmount all SMB mounts: \", err)\n\t\t}\n\t}\n\treturn removeAllContents(ctx, f.guestDir)\n}", "func ResetIPSetEntries() {\n\tnumIPSetEntries.Set(0)\n\tfor setName := range ipsetInventoryMap {\n\t\tremoveFromIPSetInventory(setName)\n\t}\n\tipsetInventoryMap = make(map[string]int)\n}", "func (r *FakeRedis) Reset() {\n\tr.GetCalledWith = \"\"\n\tr.SetCalledWith = \"\"\n\tr.DeleteCalledWith = \"\"\n\tr.DeleteReturns = 0\n\tr.ExpectError = false\n\tr.GetReturns = nil\n\tr.SetObject = nil\n}", "func (cli *FakeConfigAgentClient) Reset() {\n\t*cli = FakeConfigAgentClient{}\n}", "func TestQosCpuSet_Manage(t *testing.T) {\n\ttotal, err := getTotalCpus()\n\tif err != nil {\n\t\tt.Skipf(\"cpu qos cpuset skipped for get total cpu err: %v\", err)\n\t}\n\tlastCoreStr := fmt.Sprintf(\"%d\", total-1)\n\tlastSecCoreStr := fmt.Sprintf(\"%d\", total-2)\n\tleftCoreStr := fmt.Sprintf(\"0-%d\", total-2)\n\tif total == 2 {\n\t\tleftCoreStr = \"0\"\n\t}\n\n\ttestCases := []cpuSetTestData{\n\t\t{\n\t\t\tdescribe: \"no reserved\",\n\t\t\treserved: sets.NewInt(),\n\t\t\tlimit: 1,\n\t\t\tonlineIsolate: false,\n\t\t\texpect: struct {\n\t\t\t\toffline string\n\t\t\t\tonline string\n\t\t\t}{offline: lastCoreStr, online: \"\"},\n\t\t},\n\t\t{\n\t\t\tdescribe: \"has reserved\",\n\t\t\treserved: sets.NewInt([]int{int(total) - 1}...),\n\t\t\tlimit: 1,\n\t\t\tonlineIsolate: false,\n\t\t\texpect: struct {\n\t\t\t\toffline string\n\t\t\t\tonline string\n\t\t\t}{offline: lastSecCoreStr, online: \"\"},\n\t\t},\n\t\t{\n\t\t\tdescribe: \"online isolate enable\",\n\t\t\treserved: sets.NewInt(),\n\t\t\tlimit: 1,\n\t\t\tonlineIsolate: true,\n\t\t\texpect: struct {\n\t\t\t\toffline string\n\t\t\t\tonline string\n\t\t\t}{offline: lastCoreStr, online: leftCoreStr},\n\t\t},\n\t}\n\n\tcpusetCg := \"/sys/fs/cgroup/cpuset\"\n\tofflineCgInRoot := \"/offlinetest\"\n\tonlineCgInRoot := \"/onlinetest\"\n\tofflineCg := path.Join(cpusetCg, offlineCgInRoot)\n\tonlineCg := path.Join(cpusetCg, onlineCgInRoot)\n\tfor _, tc := range testCases {\n\t\tqosCpuset := &qosCpuSet{\n\t\t\tonlineIsolate: tc.onlineIsolate,\n\t\t\treserved: tc.reserved,\n\t\t\tlastOfflineCgs: newCgroupPaths(),\n\t\t\tlastOnlineCgs: newCgroupPaths(),\n\t\t}\n\n\t\tfunc() {\n\t\t\texisted, err := mkdirCgPath(offlineCg)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"mkdir offline cgroup %s err: %v\", offlineCg, err)\n\t\t\t}\n\t\t\tif !existed {\n\t\t\t\tdefer os.RemoveAll(offlineCg)\n\t\t\t}\n\n\t\t\texisted, err = mkdirCgPath(onlineCg)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"mkdir online cgroup %s err: %v\", onlineCg, err)\n\t\t\t}\n\t\t\tif !existed {\n\t\t\t\tdefer os.RemoveAll(onlineCg)\n\t\t\t}\n\n\t\t\tqosCpuset.Manage(&CgroupResourceConfig{\n\t\t\t\tResources: v1.ResourceList{\n\t\t\t\t\tv1.ResourceCPU: *resource.NewMilliQuantity(tc.limit*1000, resource.DecimalSI),\n\t\t\t\t},\n\t\t\t\tOfflineCgroups: []string{\n\t\t\t\t\tofflineCgInRoot,\n\t\t\t\t},\n\t\t\t\tOnlineCgroups: []string{\n\t\t\t\t\tonlineCgInRoot,\n\t\t\t\t},\n\t\t\t})\n\n\t\t\tofflineCpusets, err := readCpuSetCgroup(offlineCg)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"read cpuset cgroup %s err: %v\", offlineCg, err)\n\t\t\t}\n\t\t\tif offlineCpusets != tc.expect.offline {\n\t\t\t\tt.Fatalf(\"cpu qos cpuset test case(%s) failed, expect offline %s, got %s\",\n\t\t\t\t\ttc.describe, tc.expect.offline, offlineCpusets)\n\t\t\t}\n\t\t\tonCpusets, err := readCpuSetCgroup(onlineCg)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"read cpuset cgroup %s err: %v\", onlineCg, err)\n\t\t\t}\n\t\t\tif onCpusets != tc.expect.online {\n\t\t\t\tt.Fatalf(\"cpu qos cpuset test case(%s) failed, expect online %s, got %s\",\n\t\t\t\t\ttc.describe, tc.expect.online, onCpusets)\n\t\t\t}\n\t\t}()\n\t}\n}", "func (m *RoleMutation) ResetUsers() {\n\tm.users = nil\n\tm.removedusers = nil\n}", "func (ap *actPool) Reset() {\n\tap.mutex.Lock()\n\tdefer ap.mutex.Unlock()\n\t// Remove committed transactions in actpool\n\tap.removeCommittedTxs()\n\t// Reset pending balance for each account\n\tfor addrHash, queue := range ap.accountTxs {\n\t\tbalance, err := ap.pendingSF.Balance(hashToAddr[addrHash])\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Error when resetting actpool state: %v\\n\", err)\n\t\t\treturn\n\t\t}\n\t\tqueue.SetPendingBalance(balance)\n\t}\n\t// Reset confirmed nonce and pending nonce for each account\n\tfor addrHash, queue := range ap.accountTxs {\n\t\tfrom := hashToAddr[addrHash]\n\t\tcommittedNonce, err := ap.pendingSF.Nonce(from)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Error when resetting Tx: %v\\n\", err)\n\t\t\treturn\n\t\t}\n\t\tqueue.SetConfirmedNonce(committedNonce)\n\t\tnewPendingNonce := queue.UpdatedPendingNonce(committedNonce, true)\n\t\tif err := ap.pendingSF.SetNonce(from, newPendingNonce); err != nil {\n\t\t\tglog.Errorf(\"Error when resetting actPool state: %v\\n\", err)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (p *Player) ResetRows() {\n\t//Faction related effects\n\tvar monsterCard Card\n\tif p.Faction == FactionMonsters {\n\t\tif len(p.RowClose) > 0 {\n\t\t\tp.RowClose, monsterCard = p.RowClose.WithoutRandom()\n\t\t} else if len(p.RowRanged) > 0 {\n\t\t\tp.RowClose, monsterCard = p.RowRanged.WithoutRandom()\n\t\t} else if len(p.RowSiege) > 0 {\n\t\t\tp.RowClose, monsterCard = p.RowSiege.WithoutRandom()\n\t\t}\n\t}\n\n\t//Put all cards to Grave\n\tp.Grave = append(p.Grave, p.RowClose...)\n\tp.Grave = append(p.Grave, p.RowRanged...)\n\tp.Grave = append(p.Grave, p.RowSiege...)\n\n\t//Reset rows\n\tp.RowClose, p.RowRanged, p.RowSiege = make(Cards, 0), make(Cards, 0), make(Cards, 0)\n\n\t//Faction related post-effects\n\tif p.Faction == FactionMonsters && monsterCard != nil {\n\t\tmonsterCard.PutOnTable(p)\n\t} else if p.Faction == FactionNorthernRealms && p.Game.LastRoundWinner == p {\n\t\tp.DrawCard()\n\t}\n\n\t//Reset Horns\n\tp.HornClose, p.HornRanged, p.HornSiege = false, false, false\n}", "func (c *StakerObject) Reset() bool {\n\tc.stakerInfo = NewStakerInfo()\n\treturn true\n}", "func (_m *MockEncoder) DiscardReset(t time.Time, capacity int) ts.Segment {\n\tret := _m.ctrl.Call(_m, \"DiscardReset\", t, capacity)\n\tret0, _ := ret[0].(ts.Segment)\n\treturn ret0\n}", "func (c *Client) Reset() error {\n\tif err := c.hello(); err != nil {\n\t\treturn err\n\t}\n\tif _, _, err := c.cmd(250, \"RSET\"); err != nil {\n\t\treturn err\n\t}\n\tc.rcpts = nil\n\treturn nil\n}", "func (mnu *MetricNameUpdate) ClearOwners() *MetricNameUpdate {\n\tmnu.mutation.ClearOwners()\n\treturn mnu\n}", "func (_m *Reservation) Resources() virtenginetypes.ResourceGroup {\n\tret := _m.Called()\n\n\tvar r0 virtenginetypes.ResourceGroup\n\tif rf, ok := ret.Get(0).(func() virtenginetypes.ResourceGroup); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(virtenginetypes.ResourceGroup)\n\t\t}\n\t}\n\n\treturn r0\n}", "func (m *MenuMutation) ResetOwner() {\n\tm.owner = nil\n\tm.clearedowner = false\n}", "func PcrReset(rw io.ReadWriter, pcrs []int) error {\n\tpcrSelect, err := newPCRSelection(pcrs)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = pcrReset(rw, pcrSelect)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func OwnerClear(rw io.ReadWriter, ownerAuth Digest) error {\n\t// Run OSAP for the Owner, reading a random OddOSAP for our initial command\n\t// and getting back a secret and a handle.\n\tsharedSecretOwn, osaprOwn, err := newOSAPSession(rw, etOwner, khOwner, ownerAuth[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer osaprOwn.Close(rw)\n\tdefer zeroBytes(sharedSecretOwn[:])\n\n\t// The digest input for OwnerClear is\n\t//\n\t// digest = SHA1(ordOwnerClear)\n\t//\n\tauthIn := []interface{}{ordOwnerClear}\n\tca, err := newCommandAuth(osaprOwn.AuthHandle, osaprOwn.NonceEven, nil, sharedSecretOwn[:], authIn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tra, ret, err := ownerClear(rw, ca)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Check response authentication.\n\traIn := []interface{}{ret, ordOwnerClear}\n\tif err := ra.verify(ca.NonceOdd, sharedSecretOwn[:], raIn); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (o AccessLevelBasicConditionDevicePolicyOutput) RequireCorpOwned() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v AccessLevelBasicConditionDevicePolicy) *bool { return v.RequireCorpOwned }).(pulumi.BoolPtrOutput)\n}", "func (u *comboUtility) Reset() {\n\tif !u.reset {\n\t\tu.reset = true\n\t\tu.children.Do(func(util interface{}) {\n\t\t\tutil.(*comboUtility).Reset()\n\t\t})\n\t}\n}", "func (c *Catalog) OwnedReferencesStatefulSet(name string) v1beta2.StatefulSet {\n\treturn v1beta2.StatefulSet{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tSpec: v1beta2.StatefulSetSpec{\n\t\t\tReplicas: util.Int32(1),\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\"referencedpod\": \"yes\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tServiceName: name,\n\t\t\tTemplate: c.OwnedReferencesPodTemplate(name),\n\t\t},\n\t}\n}", "func (k Keeper) Set(ctx sdk.Context, owner sdk.AccAddress, resourceHash hash.Hash, resource ownership.Ownership_Resource, resourceAddress sdk.AccAddress) (*ownership.Ownership, error) {\n\tstore := ctx.KVStore(k.storeKey)\n\town, err := k.get(store, resourceHash)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif own != nil {\n\t\treturn nil, sdkerrors.Wrapf(sdkerrors.ErrUnauthorized, \"resource %s:%q already has an owner\", resource, resourceHash)\n\t}\n\n\town, err = ownership.New(owner.String(), resource, resourceHash, resourceAddress)\n\tif err != nil {\n\t\treturn nil, sdkerrors.Wrapf(sdkerrors.ErrInvalidRequest, err.Error())\n\t}\n\n\tdata, err := k.cdc.MarshalBinaryLengthPrefixed(own)\n\tif err != nil {\n\t\treturn nil, sdkerrors.Wrapf(sdkerrors.ErrJSONMarshal, err.Error())\n\t}\n\tstore.Set(own.Hash, data)\n\n\t// emit event\n\tctx.EventManager().EmitEvent(\n\t\tsdk.NewEvent(\n\t\t\ttypes.EventType,\n\t\t\tsdk.NewAttribute(sdk.AttributeKeyAction, types.AttributeActionCreated),\n\t\t\tsdk.NewAttribute(types.AttributeKeyHash, own.Hash.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyResourceHash, own.ResourceHash.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyResourceType, own.Resource.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyResourceAddress, own.ResourceAddress.String()),\n\t\t),\n\t)\n\n\treturn own, nil\n}", "func TestSpareResourceSet(t *testing.T) {\n\tvar result SpareResourceSet\n\terr := json.NewDecoder(strings.NewReader(spareResourceSetBody)).Decode(&result)\n\n\tif err != nil {\n\t\tt.Errorf(\"Error decoding JSON: %s\", err)\n\t}\n\n\tif result.ID != \"SpareResourceSet-1\" {\n\t\tt.Errorf(\"Received invalid ID: %s\", result.ID)\n\t}\n\n\tif result.Name != \"SpareResourceSetOne\" {\n\t\tt.Errorf(\"Received invalid name: %s\", result.Name)\n\t}\n\n\tif result.OnHandLocation.AltitudeMeters != 150 {\n\t\tt.Errorf(\"OnHandLocation Altitude incorrect: %d\", result.OnHandLocation.AltitudeMeters)\n\t}\n\n\tif !result.OnLine {\n\t\tt.Error(\"OnLine should be true\")\n\t}\n\n\tif result.ResourceType != \"Box\" {\n\t\tt.Errorf(\"Invalid resource type: %s\", result.ResourceType)\n\t}\n}", "func (r *Reconciler) Own(obj metav1.Object) {\n\tutil.Panic(controllerutil.SetControllerReference(r.NooBaaAccount, obj, r.Scheme))\n}", "func (p *unlimitedPool) Reset() {\n\n\tp.m.Lock()\n\n\tif !p.closed {\n\t\tp.m.Unlock()\n\t\treturn\n\t}\n\n\t// cancelled the pool, not closed it, pool will be usable after calling initialize().\n\tp.initialize()\n\tp.m.Unlock()\n}", "func IsOwned(object metav1.Object) (owned bool, err error) {\n\trefs, err := getRefs(object)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn len(refs) > 0, nil\n}", "func (_m *MockMultiReaderIterator) ResetSliceOfSlices(readers xio.ReaderSliceOfSlicesIterator) {\n\t_m.ctrl.Call(_m, \"ResetSliceOfSlices\", readers)\n}", "func TestCluster_Owners(t *testing.T) {\n\tc := cluster{\n\t\tnodes: []*Node{\n\t\t\t{URI: NewTestURIFromHostPort(\"serverA\", 1000)},\n\t\t\t{URI: NewTestURIFromHostPort(\"serverB\", 1000)},\n\t\t\t{URI: NewTestURIFromHostPort(\"serverC\", 1000)},\n\t\t},\n\t\tHasher: NewTestModHasher(),\n\t\tReplicaN: 2,\n\t}\n\n\t// Verify nodes are distributed.\n\tif a := c.partitionNodes(0); !reflect.DeepEqual(a, []*Node{c.nodes[0], c.nodes[1]}) {\n\t\tt.Fatalf(\"unexpected owners: %s\", spew.Sdump(a))\n\t}\n\n\t// Verify nodes go around the ring.\n\tif a := c.partitionNodes(2); !reflect.DeepEqual(a, []*Node{c.nodes[2], c.nodes[0]}) {\n\t\tt.Fatalf(\"unexpected owners: %s\", spew.Sdump(a))\n\t}\n}", "func (m *InsuranceMutation) ResetInsurancePatientrights() {\n\tm._InsurancePatientrights = nil\n\tm.removed_InsurancePatientrights = nil\n}", "func (r *ReconcileIoTProject) ensureControllerOwnerIsSet(owner, object v1.Object) error {\n\n\tif util.IsNewObject(object) {\n\t\terr := controllerutil.SetControllerReference(owner, object, r.scheme)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func TestRedpandaResourceRequirements(t *testing.T) {\n\ttype test struct {\n\t\tname string\n\t\tsetRequestsCPU resource.Quantity\n\t\tsetRequestsMem resource.Quantity\n\t\tsetRedpandaCPU resource.Quantity\n\t\tsetRedpandaMem resource.Quantity\n\t\texpectedRedpandaCPU resource.Quantity\n\t\texpectedRedpandaMem resource.Quantity\n\t}\n\tmakeResources := func(t test) v1alpha1.RedpandaResourceRequirements {\n\t\treturn v1alpha1.RedpandaResourceRequirements{\n\t\t\tResourceRequirements: corev1.ResourceRequirements{\n\t\t\t\tRequests: corev1.ResourceList{\n\t\t\t\t\tcorev1.ResourceMemory: t.setRequestsMem,\n\t\t\t\t\tcorev1.ResourceCPU: t.setRequestsCPU,\n\t\t\t\t},\n\t\t\t},\n\t\t\tRedpanda: corev1.ResourceList{\n\t\t\t\tcorev1.ResourceMemory: t.setRedpandaMem,\n\t\t\t\tcorev1.ResourceCPU: t.setRedpandaCPU,\n\t\t\t},\n\t\t}\n\t}\n\n\tt.Run(\"Memory\", func(t *testing.T) {\n\t\ttests := []test{\n\t\t\t{\n\t\t\t\tname: \"RedpandaMemory is set from requests.memory\",\n\t\t\t\tsetRequestsMem: resource.MustParse(\"3000Mi\"),\n\t\t\t\texpectedRedpandaMem: resource.MustParse(\"2700Mi\"),\n\t\t\t},\n\t\t\t{\n\t\t\t\tname: \"RedpandaMemory is set from lower redpanda.memory\",\n\t\t\t\tsetRequestsMem: resource.MustParse(\"4000Mi\"),\n\t\t\t\tsetRedpandaMem: resource.MustParse(\"3000Mi\"),\n\t\t\t\texpectedRedpandaMem: resource.MustParse(\"3000Mi\"),\n\t\t\t},\n\t\t}\n\t\tfor _, tt := range tests {\n\t\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t\trrr := makeResources(tt)\n\t\t\t\tassert.Equal(t, tt.expectedRedpandaMem.Value(), rrr.RedpandaMemory().Value())\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"CPU\", func(t *testing.T) {\n\t\ttests := []test{\n\t\t\t{\n\t\t\t\tname: \"RedpandaCPU is set from integer requests.cpu\",\n\t\t\t\tsetRequestsCPU: resource.MustParse(\"1\"),\n\t\t\t\tsetRequestsMem: resource.MustParse(\"20Gi\"),\n\t\t\t\texpectedRedpandaCPU: resource.MustParse(\"1\"),\n\t\t\t},\n\t\t\t{\n\t\t\t\tname: \"RedpandaCPU is set from milli requests.cpu\",\n\t\t\t\tsetRequestsCPU: resource.MustParse(\"1000m\"),\n\t\t\t\tsetRequestsMem: resource.MustParse(\"20Gi\"),\n\t\t\t\texpectedRedpandaCPU: resource.MustParse(\"1\"),\n\t\t\t},\n\t\t\t{\n\t\t\t\tname: \"RedpandaCPU is rounded up from milli requests.cpu\",\n\t\t\t\tsetRequestsCPU: resource.MustParse(\"1001m\"),\n\t\t\t\tsetRequestsMem: resource.MustParse(\"20Gi\"),\n\t\t\t\texpectedRedpandaCPU: resource.MustParse(\"2\"),\n\t\t\t},\n\t\t\t{\n\t\t\t\tname: \"RedpandaCPU is set from lower redpanda.cpu\",\n\t\t\t\tsetRequestsCPU: resource.MustParse(\"2\"),\n\t\t\t\tsetRequestsMem: resource.MustParse(\"20Gi\"),\n\t\t\t\tsetRedpandaCPU: resource.MustParse(\"1\"),\n\t\t\t\texpectedRedpandaCPU: resource.MustParse(\"1\"),\n\t\t\t},\n\t\t\t{\n\t\t\t\tname: \"RedpandaCPU is set from higher redpanda.cpu\",\n\t\t\t\tsetRequestsCPU: resource.MustParse(\"1\"),\n\t\t\t\tsetRequestsMem: resource.MustParse(\"20Gi\"),\n\t\t\t\tsetRedpandaCPU: resource.MustParse(\"2\"),\n\t\t\t\texpectedRedpandaCPU: resource.MustParse(\"2\"),\n\t\t\t},\n\t\t\t{\n\t\t\t\tname: \"RedpandaCPU is rounded up from milli redpanda.cpu\",\n\t\t\t\tsetRequestsCPU: resource.MustParse(\"1\"),\n\t\t\t\tsetRequestsMem: resource.MustParse(\"20Gi\"),\n\t\t\t\tsetRedpandaCPU: resource.MustParse(\"1001m\"),\n\t\t\t\texpectedRedpandaCPU: resource.MustParse(\"2\"),\n\t\t\t},\n\t\t\t{\n\t\t\t\tname: \"RedpandaCPU is limited by 2GiB/core\",\n\t\t\t\tsetRequestsCPU: resource.MustParse(\"10\"),\n\t\t\t\tsetRequestsMem: resource.MustParse(\"4Gi\"),\n\t\t\t\texpectedRedpandaCPU: resource.MustParse(\"2\"),\n\t\t\t},\n\t\t\t{\n\t\t\t\tname: \"RedpandaCPU has a minimum if requests >0\",\n\t\t\t\tsetRequestsCPU: resource.MustParse(\"100m\"),\n\t\t\t\tsetRequestsMem: resource.MustParse(\"100Mi\"),\n\t\t\t\texpectedRedpandaCPU: resource.MustParse(\"1\"),\n\t\t\t},\n\t\t\t{\n\t\t\t\tname: \"RedpandaCPU not set if no request\",\n\t\t\t\texpectedRedpandaCPU: resource.MustParse(\"0\"),\n\t\t\t},\n\t\t}\n\t\tfor _, tt := range tests {\n\t\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t\trrr := makeResources(tt)\n\t\t\t\tassert.Equal(t, tt.expectedRedpandaCPU.Value(), rrr.RedpandaCPU().Value())\n\t\t\t})\n\t\t}\n\t})\n}", "func (in *OwnedResourceStatus) DeepCopy() *OwnedResourceStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(OwnedResourceStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (rw *RandW) Reset() {\n\trw.r = rand.New(rand.NewSource(uint64(time.Now().UnixNano())))\n}", "func ReconcileStatefulSets(ctx context.Context, namedGetters []NamedStatefulSetCreatorGetter, namespace string, client ctrlruntimeclient.Client, objectModifiers ...ObjectModifier) error {\n\tfor _, get := range namedGetters {\n\t\tname, create := get()\n\t\tcreate = DefaultStatefulSet(create)\n\t\tcreateObject := StatefulSetObjectWrapper(create)\n\t\tcreateObject = createWithNamespace(createObject, namespace)\n\t\tcreateObject = createWithName(createObject, name)\n\n\t\tfor _, objectModifier := range objectModifiers {\n\t\t\tcreateObject = objectModifier(createObject)\n\t\t}\n\n\t\tif err := EnsureNamedObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, createObject, client, &appsv1.StatefulSet{}, false); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to ensure StatefulSet %s/%s: %v\", namespace, name, err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func setOwnerReferences(objCopy interface{}) []metav1.OwnerReference {\n\townerReferences := []metav1.OwnerReference{}\n\tnewReference := metav1.OwnerReference{}\n\tswitch userObj := objCopy.(type) {\n\tcase *apps_v1alpha.UserRegistrationRequest:\n\t\tnewReference = *metav1.NewControllerRef(userObj, apps_v1alpha.SchemeGroupVersion.WithKind(\"UserRegistrationRequest\"))\n\tcase *apps_v1alpha.User:\n\t\tnewReference = *metav1.NewControllerRef(userObj, apps_v1alpha.SchemeGroupVersion.WithKind(\"User\"))\n\t}\n\ttakeControl := false\n\tnewReference.Controller = &takeControl\n\townerReferences = append(ownerReferences, newReference)\n\treturn ownerReferences\n}", "func IsResMgrOwnedState(state pbtask.TaskState) bool {\n\t_, ok := resMgrOwnedTaskStates[state]\n\tif ok {\n\t\treturn true\n\t}\n\treturn false\n}", "func (v *AccountCoinsResponse) Reset() {\n\tv.BlockIdentifier.Reset()\n\tv.Coins = v.Coins[:0]\n\tv.Metadata = v.Metadata[:0]\n}", "func TestAccountUncleanShutdown(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\n\t// create a renter tester\n\trt, err := newRenterTester(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() {\n\t\terr := rt.Close()\n\t\tif err != nil {\n\t\t\tt.Log(err)\n\t\t}\n\t}()\n\tr := rt.renter\n\n\t// create a number accounts\n\taccounts, err := openRandomTestAccountsOnRenter(r)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// close the renter and reload it with a dependency that interrupts the\n\t// accounts save on shutdown\n\tdeps := &dependencies.DependencyInterruptAccountSaveOnShutdown{}\n\tr, err = rt.reloadRenterWithDependency(r, deps)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// verify the accounts were saved on disk\n\tfor _, account := range accounts {\n\t\treloaded, err := r.staticAccountManager.managedOpenAccount(account.staticHostKey)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !reloaded.staticID.SPK().Equals(account.staticID.SPK()) {\n\t\t\tt.Fatal(\"Unexpected reloaded account ID\")\n\t\t}\n\n\t\tif !reloaded.balance.Equals(account.managedMinExpectedBalance()) {\n\t\t\tt.Log(reloaded.balance)\n\t\t\tt.Log(account.managedMinExpectedBalance())\n\t\t\tt.Fatal(\"Unexpected account balance after reload\")\n\t\t}\n\t}\n\n\t// reload it to trigger the unclean shutdown\n\tr, err = rt.reloadRenter(r)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// verify the accounts were reloaded but the balances were cleared due to\n\t// the unclean shutdown\n\tfor _, account := range accounts {\n\t\treloaded, err := r.staticAccountManager.managedOpenAccount(account.staticHostKey)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !account.staticID.SPK().Equals(reloaded.staticID.SPK()) {\n\t\t\tt.Fatal(\"Unexpected reloaded account ID\")\n\t\t}\n\t\tif !reloaded.balance.IsZero() {\n\t\t\tt.Fatal(\"Unexpected reloaded account balance\")\n\t\t}\n\t}\n}", "func (o AccessLevelBasicConditionDevicePolicyPtrOutput) RequireCorpOwned() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *AccessLevelBasicConditionDevicePolicy) *bool {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.RequireCorpOwned\n\t}).(pulumi.BoolPtrOutput)\n}", "func (c Clients) Reset(ctx context.Context) error {\n\treq := newRequest(\"*1\\r\\n$5\\r\\nRESET\\r\\n$\")\n\treturn c.c.cmdSimple(ctx, req)\n}", "func TestOrchestratorCheckSafeReSchedule(t *testing.T) {\n\tvar wg sync.WaitGroup\n\n\tclient := fake.NewSimpleClientset()\n\tinformerFactory := informers.NewSharedInformerFactory(client, 0)\n\tcl := &apiserver.APIClient{Cl: client, InformerFactory: informerFactory, UnassignedPodInformerFactory: informerFactory}\n\torchCheck := OrchestratorFactory().(*OrchestratorCheck)\n\torchCheck.apiClient = cl\n\n\tbundle := NewCollectorBundle(orchCheck)\n\terr := bundle.Initialize()\n\tassert.NoError(t, err)\n\n\twg.Add(2)\n\n\tnodeInformer := informerFactory.Core().V1().Nodes().Informer()\n\tnodeInformer.AddEventHandler(&cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\twg.Done()\n\t\t},\n\t})\n\n\twriteNode(t, client, \"1\")\n\n\t// getting rescheduled.\n\torchCheck.Cancel()\n\t// This part is not optimal as the cancel closes a channel which gets propagated everywhere that might take some time.\n\t// If things are too fast the close is not getting propagated fast enough.\n\t// But even if we are too fast and don't catch that part it will not lead to a false positive\n\ttime.Sleep(1 * time.Millisecond)\n\terr = bundle.Initialize()\n\tassert.NoError(t, err)\n\twriteNode(t, client, \"2\")\n\n\twg.Wait()\n}", "func TestReset(t *testing.T) {\n\tcapacity := uint32(100000)\n\tp := float64(0.001)\n\tsamples := uint32(100000)\n\tfilter, testValues := GenerateExampleFilter(capacity, p, samples)\n\tfilter.Reset()\n\tfingerprint := make([]uint32, filter.k)\n\tfor _, value := range testValues {\n\t\tfilter.Fingerprint(value, fingerprint)\n\t\tif filter.CheckFingerprint(fingerprint) {\n\t\t\tt.Error(\"Did not find test value in filter!\")\n\t\t}\n\t}\n}", "func TestForceMaster(t *testing.T) {\n\tctx, done := context.WithCancel(context.Background())\n\tdefer done()\n\tresignTime := 1 * time.Hour\n\tres := \"test resource\"\n\n\tmt := NewTracker(forcemaster.Factory{}, resignTime, monitoring.InertMetricFactory{})\n\tgo mt.Run(ctx)\n\tmt.AddResource(res)\n\ttime.Sleep(time.Millisecond) // Wait to acquire mastership.\n\n\t// Verify that mastersihp works as expected, with 1 mastership for res.\n\tm, err := mt.Masterships(ctx)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif got := len(m); got != 1 {\n\t\tt.Errorf(\"Masterships returned %v, want 1\", got)\n\t}\n\n\t// Advance the clock by pretending we acquired mastersihp a long time ago.\n\tmt.masterMu.Lock()\n\tmastership := mt.master[res]\n\tmastership.acquired = time.Now().Add(-2 * resignTime)\n\tmt.master[res] = mastership\n\tmt.masterMu.Unlock()\n\n\t// Verify that we resign the mastership after the clock as advanced.\n\tm2, err := mt.Masterships(ctx)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif got := len(m2); got != 0 {\n\t\tt.Errorf(\"Masterships returned %v, want 0\", got)\n\t}\n\n\ttime.Sleep(time.Millisecond) // Wait to acquire mastership.\n\n\t// Verify that we reaquire mastership\n\tm3, err := mt.Masterships(ctx)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif got := len(m3); got != 1 {\n\t\tt.Errorf(\"Masterships returned %v, want 0\", got)\n\t}\n}", "func (r *Reconciler) Own(obj metav1.Object) {\n\tutil.Panic(controllerutil.SetControllerReference(r.NooBaa, obj, r.Scheme))\n}", "func ReconcileLabelledObject(\n\tctx context.Context,\n\tlogger logr.Logger,\n\tregistry k8s_registry.TypeRegistry,\n\tclient kube_client.Client,\n\towner kube_types.NamespacedName,\n\townerMesh string,\n\townedType k8s_registry.ResourceType,\n\townedNamespace string,\n\towned map[string]core_model.ResourceSpec,\n) error {\n\tlog := logger.WithValues(\"type\", ownedType, \"name\", owner.Name, \"namespace\", owner.Namespace)\n\t// First we list which existing objects are owned by this owner.\n\t// We expect either 0 or 1 and depending on whether routeSpec is nil\n\t// we either create an object or update or delete the existing one.\n\townerLabelValue := hashNamespacedName(owner)\n\tlabels := kube_client.MatchingLabels{\n\t\townerLabel: ownerLabelValue,\n\t}\n\n\texistingList, err := registry.NewList(ownedType)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"could not create list of owned %T\", ownedType)\n\t}\n\n\tif err := client.List(ctx, existingList, labels); err != nil {\n\t\treturn err\n\t}\n\n\t// Delete unneeded objects\n\texistingObjs := map[string]k8s_model.KubernetesObject{}\n\tfor _, existing := range existingList.GetItems() {\n\t\tif _, ok := owned[existing.GetName()]; !ok {\n\t\t\terr := client.Delete(ctx, existing)\n\t\t\tswitch {\n\t\t\tcase kube_apierrs.IsNotFound(err):\n\t\t\t\tlog.V(1).Info(\"object not found. Nothing to delete\")\n\t\t\tcase err == nil:\n\t\t\t\tlog.Info(\"object deleted\")\n\t\t\tdefault:\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// We don't care about this anymore\n\t\t\tcontinue\n\t\t}\n\t\texistingObjs[existing.GetName()] = existing\n\t}\n\n\t// We need a mesh when creating objects\n\tif len(owned) > 0 && ownerMesh == \"\" {\n\t\treturn fmt.Errorf(\"could not reconcile object, owner mesh must not be empty\")\n\t}\n\n\tfor ownedName, ownedSpec := range owned {\n\t\t// Update existing\n\t\tif existing, ok := existingObjs[ownedName]; ok {\n\t\t\texistingSpec, err := existing.GetSpec()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif core_model.Equal(existingSpec, ownedSpec) {\n\t\t\t\tlog.V(1).Info(\"object is the same. Nothing to update\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\texisting.SetSpec(ownedSpec)\n\n\t\t\tif err := client.Update(ctx, existing); err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"could not update owned %T\", ownedType)\n\t\t\t}\n\t\t\tlog.Info(\"object updated\")\n\n\t\t\tcontinue\n\t\t}\n\n\t\t// Or create new\n\t\towned, err := registry.NewObject(ownedType)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"could not get new %T from registry\", ownedType)\n\t\t}\n\n\t\towned.SetMesh(ownerMesh)\n\n\t\towned.SetObjectMeta(\n\t\t\t&kube_meta.ObjectMeta{\n\t\t\t\tName: ownedName,\n\t\t\t\tNamespace: ownedNamespace,\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\townerLabel: ownerLabelValue,\n\t\t\t\t},\n\t\t\t},\n\t\t)\n\t\towned.SetSpec(ownedSpec)\n\n\t\tif err := client.Create(ctx, owned); err != nil {\n\t\t\treturn errors.Wrapf(err, \"could not create owned %T\", ownedType)\n\t\t}\n\t\tlogger.Info(\"object created\")\n\t}\n\n\treturn nil\n}", "func (o *Object) Reset() {\n\to.Value = nil\n\to.Initialized = false\n\tObjectPool.Put(o)\n}", "func testResourceDeletionNotIgnored(t *testing.T, initialResource func(string) e2e.UpdateOptions, updateResource func(r *e2e.UpdateOptions)) {\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout*1000)\n\tt.Cleanup(cancel)\n\tmgmtServer := startManagementServer(t)\n\tnodeID := uuid.New().String()\n\tbs := generateBootstrapContents(t, mgmtServer.Address, false, nodeID)\n\txdsR := xdsResolverBuilder(t, bs)\n\tresources := initialResource(nodeID)\n\n\t// Update the management server with initial resources setup.\n\tif err := mgmtServer.Update(ctx, resources); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcc, err := grpc.Dial(fmt.Sprintf(\"xds:///%s\", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsR))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to dial local test server: %v\", err)\n\t}\n\tt.Cleanup(func() { cc.Close() })\n\n\tif err := verifyRPCtoAllEndpoints(cc); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Mutate resource and update on the server.\n\tupdateResource(&resources)\n\tif err := mgmtServer.Update(ctx, resources); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Spin up go routines to verify RPCs fail after the update.\n\tclient := testgrpc.NewTestServiceClient(cc)\n\twg := sync.WaitGroup{}\n\twg.Add(2)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor ; ctx.Err() == nil; <-time.After(10 * time.Millisecond) {\n\t\t\tif _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor ; ctx.Err() == nil; <-time.After(10 * time.Millisecond) {\n\t\t\tif _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\twg.Wait()\n\tif ctx.Err() != nil {\n\t\tt.Fatal(\"Context expired before RPCs failed.\")\n\t}\n}", "func (this *PoolTestSuite) TestInvalidateFreesCapacity() {\n\tthis.pool.Config.MaxTotal = 2\n\tthis.pool.Config.MaxWaitMillis = 500\n\tthis.pool.Config.BlockWhenExhausted = true\n\t// Borrow an instance and hold if for 5 seconds\n\tch1 := waitTestGoroutine(this.pool, 5000)\n\t// Borrow another instance\n\tobj := this.NoErrorWithResult(this.pool.BorrowObject())\n\t// Launch another goroutine - will block, but fail in 500 ms\n\tch2 := waitTestGoroutine(this.pool, 100)\n\t// Invalidate the object borrowed by this goroutine - should allow goroutine2 to create\n\tsleep(20)\n\tthis.NoError(this.pool.InvalidateObject(obj))\n\tsleep(600) // Wait for goroutine2 to timeout\n\tresult2 := <-ch2\n\tclose(ch2)\n\tif result2.error != nil {\n\t\tthis.Fail(result2.error.Error())\n\t}\n\t<-ch1\n\tclose(ch1)\n}" ]
[ "0.5632101", "0.54997927", "0.54261786", "0.5346862", "0.5218427", "0.51643884", "0.5162271", "0.51167315", "0.5096469", "0.5093228", "0.50793904", "0.5075024", "0.5042418", "0.5026725", "0.50028026", "0.49832165", "0.49498773", "0.4921987", "0.4913988", "0.4884722", "0.48834723", "0.4875266", "0.48747143", "0.4864166", "0.48613825", "0.48576918", "0.48528707", "0.48479673", "0.48408222", "0.48314267", "0.4814791", "0.478252", "0.4769354", "0.47588813", "0.4739645", "0.47383434", "0.47176644", "0.4691445", "0.46696526", "0.46678108", "0.46638048", "0.46626547", "0.46619052", "0.46609896", "0.46407443", "0.46343923", "0.46055886", "0.45940876", "0.4572221", "0.45705897", "0.45654014", "0.45629874", "0.4552987", "0.45526353", "0.45502627", "0.45442206", "0.45387396", "0.45380217", "0.4537285", "0.45330662", "0.45193264", "0.45184195", "0.45177603", "0.45037282", "0.45000178", "0.4488333", "0.4488272", "0.44880775", "0.44717842", "0.44682267", "0.44668883", "0.44578826", "0.44549957", "0.44546828", "0.4454414", "0.44535717", "0.44427985", "0.44424105", "0.44422048", "0.44410694", "0.44383928", "0.44361278", "0.44315016", "0.4425759", "0.4422356", "0.44176507", "0.44110948", "0.4405651", "0.440312", "0.44005898", "0.43992198", "0.43896964", "0.43891123", "0.43870166", "0.43869132", "0.4385723", "0.4380807", "0.43798065", "0.43797678", "0.43794474" ]
0.76563233
0
Test that TokenEvent sends a connection token event.
func TestServiceTokenEvent_WithObjectToken_SendsToken(t *testing.T) { runTest(t, func(s *res.Service) { s.Handle("model", res.GetResource(func(r res.GetRequest) { r.NotFound() })) }, func(s *restest.Session) { s.Service().TokenEvent(mock.CID, mock.Token) s.GetMsg().AssertTokenEvent(mock.CID, mock.Token) }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestAuthRequestTokenEvent(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.TokenEvent(mock.Token)\n\t\t\tr.OK(nil)\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\treq := s.Auth(\"test.model\", \"method\", nil)\n\t\ts.GetMsg().\n\t\t\tAssertTokenEvent(mock.CID, mock.Token)\n\t\treq.Response().\n\t\t\tAssertResult(nil)\n\t})\n}", "func TestServiceTokenEventWithID_WithObjectToken_SendsToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEventWithID(mock.CID, \"foo\", mock.Token)\n\t\ts.GetMsg().AssertTokenEventWithID(mock.CID, \"foo\", mock.Token)\n\t})\n}", "func TestServiceTokenEvent_WithNilToken_SendsNilToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEvent(mock.CID, nil)\n\t\ts.GetMsg().AssertTokenEvent(mock.CID, nil)\n\t})\n}", "func (s *Service) TestToken(ctx context.Context, info *pushmdl.PushInfo, token string) (err error) {\n\tparams := url.Values{}\n\tparams.Add(\"app_id\", strconv.FormatInt(info.APPID, 10))\n\tparams.Add(\"alert_title\", info.Title)\n\tparams.Add(\"alert_body\", info.Summary)\n\tparams.Add(\"token\", token)\n\tparams.Add(\"link_type\", strconv.FormatInt(int64(info.LinkType), 10))\n\tparams.Add(\"link_value\", info.LinkValue)\n\tparams.Add(\"sound\", strconv.Itoa(info.Sound))\n\tparams.Add(\"vibration\", strconv.Itoa(info.Vibration))\n\tparams.Add(\"expire_time\", strconv.FormatInt(int64(info.ExpireTime), 10))\n\tparams.Add(\"image_url\", info.ImageURL)\n\tif err = s.httpClient.Post(ctx, _testTokenURL, \"\", params, nil); err != nil {\n\t\tlog.Error(\"s.TestToken(%+v) error(%v)\", info, err)\n\t}\n\treturn\n}", "func TestMockOnEvent(t *testing.T) {\n\tmockServer := &MockRailsServer{T: t, Behaviour: MockEvent}\n\n\tdialer := wstest.NewDialer(mockServer)\n\tdialer.HandshakeTimeout = time.Second * 2\n\n\tclient := NewClient(fakeEndpoint).WithDialer(dialer)\n\n\tcalled := make(chan struct{})\n\n\tclient.OnEvent(\"AgentChannel\", func(conn *websocket.Conn, payload *Payload, error error) {\n\t\tcalled <- struct{}{}\n\t\treturn\n\t})\n\n\terr := client.Serve()\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treceiveSleepMs(2000, called, t)\n}", "func sendEvent(client runner.RunnerClient, token string, key string) {\n\tlog.Println(\"sending event:\", key)\n\tif _, err := client.Event(context.Background(), &runner.EventRequest{\n\t\tKey: key,\n\t}); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func TestOAUTH2Token(t *testing.T) {\n\tconnection, err := NewConnectionBuilder().\n\t\tURL(\"http://localhost:9100/api\").\n\t\tUsername(\"admin\").\n\t\tPassword(\"password\").\n\t\tBuild()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer connection.Close()\n\tvcr := govcr.NewVCR(\"connection_oauth2\",\n\t\t&govcr.VCRConfig{\n\t\t\tClient: connection.client,\n\t\t\tDisableRecording: true,\n\t\t})\n\t// Replace our HTTPClient with a vcr client wrapping it\n\tconnection.client = vcr.Client\n\tprojectsResource := connection.Projects()\n\n\t// Trigger the auth flow.\n\tgetProjectsRequest := projectsResource.Get()\n\tif len(connection.token) != 0 || len(connection.bearer) != 0 {\n\t\tt.Errorf(\"Connection should have no tokens. token: '%s', bearer: '%s'\",\n\t\t\tconnection.token,\n\t\t\tconnection.bearer)\n\t}\n\t_, err = getProjectsRequest.Send()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(connection.token) != 0 || len(connection.bearer) == 0 {\n\t\tt.Errorf(\"Connection should have only a bearer token. token: '%s', bearer: '%s'\",\n\t\t\tconnection.token,\n\t\t\tconnection.bearer)\n\t}\n}", "func TestServiceTokenEventWithID_WithNilToken_SendsNilToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEventWithID(mock.CID, \"foo\", nil)\n\t\ts.GetMsg().AssertTokenEventWithID(mock.CID, \"foo\", nil)\n\t})\n}", "func TestAuthRequestNilTokenEvent(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.TokenEvent(nil)\n\t\t\tr.OK(nil)\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\treq := s.Auth(\"test.model\", \"method\", nil)\n\t\ts.GetMsg().\n\t\t\tAssertTokenEvent(mock.CID, nil)\n\t\treq.Response().\n\t\t\tAssertResult(nil)\n\t})\n}", "func TestNatsAdaptorOnWhenConnectedWithAuth(t *testing.T) {\n\tt.Skip(\"TODO: implement this test without requiring actual server connection\")\n\ta := NewAdaptorWithAuth(\"localhost:4222\", 9999, \"test\", \"testwd\")\n\ta.Connect()\n\tgobottest.Assert(t, a.On(\"hola\", func(msg Message) {\n\t\tfmt.Println(\"hola\")\n\t}), true)\n}", "func TestToken(t *testing.T) {\n\tkey := []byte(\"26BF237B95964852625A2C27988C3\")\n\tSetSecret(key)\n\tc := NewClaims(1, 15*time.Minute)\n\tc.SetIssuer(\"token_test\")\n\tc.SetSubject(\"test\")\n\ttok, err := c.Token()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tc, err = Decode(tok)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func TestVerifyToken(t *testing.T) {\n t.Errorf(\"No tests written yet for VerifyToken()\")\n}", "func TestMockOnConnect(t *testing.T) {\n\tmockServer := &MockRailsServer{T: t, Behaviour: MockConnect}\n\n\tdialer := wstest.NewDialer(mockServer)\n\tdialer.HandshakeTimeout = time.Second * 2\n\n\tclient := NewClient(fakeEndpoint).WithDialer(dialer)\n\n\tcalled := make(chan struct{})\n\n\tclient.OnConnect(func(conn *websocket.Conn) error {\n\t\tcalled <- struct{}{}\n\t\treturn nil\n\t})\n\terr := client.Serve()\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treceiveSleepMs(2000, called, t)\n}", "func TestUserTokenPingSuccess(t *testing.T) {\n\tdb := setupDB()\n\tdefer db.Close()\n\trouter := setupRouter()\n\n\tw := httptest.NewRecorder()\n\n\treq, _ := http.NewRequest(\"GET\", \"/token/ping\", nil)\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", \"Bearer \" + Token)\n\trouter.ServeHTTP(w, req)\n\n\tassert.Equal(t, http.StatusOK, w.Code)\n\t//\tsomething like {\"claim_id\":\"test001\",\"message\":\"pong\",\"username\":\"test001\"}\n\tassert.Contains(t, w.Body.String(), \"pong\")\n\tassert.Contains(t, w.Body.String(), kTestUserUsername)\n}", "func TestEmittingMessage(t *testing.T) {\n\tsink := make(chan bool, 1)\n\tclient := NewClient()\n\n\ttimeout, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)\n\tdefer cancel()\n\n\tclient.Subscribe(Before, func(ctx context.Context, message interface{}) {\n\t\tsink <- true\n\t})\n\n\tclient.Emit(context.Background(), Before, nil)\n\n\tselect {\n\tcase <-timeout.Done():\n\t\tt.Fatal(\"Timeout reached\")\n\tcase <-sink:\n\t}\n}", "func (*ClientConnectEvent) Op() ws.OpCode { return 12 }", "func TestCorrectTokenPasses(t *testing.T) {\n\thand := New(http.HandlerFunc(succHand))\n\thand.SetFailureHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tt.Errorf(\"Test failed. Reason: %v\", Reason(r))\n\t}))\n\n\tserver := httptest.NewServer(hand)\n\tdefer server.Close()\n\n\t// issue the first request to get the token\n\tresp, err := http.Get(server.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcookie := getRespCookie(resp, CookieName)\n\tif cookie == nil {\n\t\tt.Fatal(\"Cookie was not found in the response.\")\n\t}\n\n\tfinalToken := b64encode(maskToken(b64decode(cookie.Value)))\n\n\tvals := [][]string{\n\t\t{\"name\", \"Jolene\"},\n\t\t{FormFieldName, finalToken},\n\t}\n\n\t// Constructing a custom request is suffering\n\treq, err := http.NewRequest(\"POST\", server.URL, formBodyR(vals))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\treq.AddCookie(cookie)\n\n\tresp, err = http.DefaultClient.Do(req)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif resp.StatusCode != 200 {\n\t\tt.Errorf(\"The request should have succeeded, but it didn't. Instead, the code was %d\",\n\t\t\tresp.StatusCode)\n\t}\n}", "func RegisteringTokenTest(env *models.PhotonEnvReader, allowFail bool) {\n\t// 1. register a not-exist token\n\tcase1 := &APITestCase{\n\t\tCaseName: \"Register a not-exist token\",\n\t\tAllowFail: allowFail,\n\t\tReq: &models.Req{\n\t\t\tAPIName: \"RegisteringOneToken\",\n\t\t\tFullURL: env.RandomNode().Host + \"/api/1/tokens/0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF\",\n\t\t\tMethod: http.MethodPut,\n\t\t\tPayload: \"\",\n\t\t\tTimeout: time.Second * 120,\n\t\t},\n\t\tTargetStatusCode: 409,\n\t}\n\tcase1.Run()\n\t// 2. register a new token\n\tnewTokenAddress := deployNewToken()\n\tcase2 := &APITestCase{\n\t\tCaseName: \"Register a new token\",\n\t\tAllowFail: allowFail,\n\t\tReq: &models.Req{\n\t\t\tAPIName: \"RegisteringOneToken\",\n\t\t\tFullURL: env.RandomNode().Host + \"/api/1/tokens/\" + newTokenAddress,\n\t\t\tMethod: http.MethodPut,\n\t\t\tPayload: \"\",\n\t\t\tTimeout: time.Second * 180,\n\t\t},\n\t\tTargetStatusCode: 200,\n\t}\n\tcase2.Run()\n}", "func SimulateMintToken(k keeper.Keeper, ak types.AccountKeeper, bk types.BankKeeper) simtypes.Operation {\n\treturn func(\n\t\tr *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,\n\t\taccs []simtypes.Account, chainID string,\n\t) (simtypes.OperationMsg, []simtypes.FutureOperation, error) {\n\n\t\ttoken, maxFee := selectToken(ctx, k, ak, bk, true)\n\t\tsimToAccount, _ := simtypes.RandomAcc(r, accs)\n\n\t\tmsg := types.NewMsgMintToken(token.GetSymbol(), token.GetOwnerString(), simToAccount.Address.String(), 100)\n\n\t\townerAccount, found := simtypes.FindAccount(accs, token.GetOwner())\n\t\tif !found {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), fmt.Sprintf(\"account[%s] does not found\", token.GetOwnerString())), \n\t\t\t\tnil, fmt.Errorf(\"account[%s] does not found\", token.GetOwnerString())\n\t\t}\n\n\t\towner, _ := sdk.AccAddressFromBech32(msg.Owner)\n\t\taccount := ak.GetAccount(ctx, owner)\n\t\tfees, err := simtypes.RandomFees(r, ctx, maxFee)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate fees\"), nil, err\n\t\t}\n\n\t\ttxGen := simappparams.MakeTestEncodingConfig().TxConfig\n\t\ttx, err := helpers.GenTx(\n\t\t\ttxGen,\n\t\t\t[]sdk.Msg{msg},\n\t\t\tfees,\n\t\t\thelpers.DefaultGenTxGas,\n\t\t\tchainID,\n\t\t\t[]uint64{account.GetAccountNumber()},\n\t\t\t[]uint64{account.GetSequence()},\n\t\t\townerAccount.PrivKey,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate mock tx\"), nil, err\n\t\t}\n\n\t\tif _, _, err = app.Deliver(txGen.TxEncoder(), tx); err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to deliver tx\"), nil, err\n\t\t}\n\n\t\treturn simtypes.NewOperationMsg(msg, true, \"simulate mint token\"), nil, nil\n\t}\n}", "func TestNatsAdaptorOnWhenConnected(t *testing.T) {\n\tt.Skip(\"TODO: implement this test without requiring actual server connection\")\n\ta := initTestNatsAdaptor()\n\ta.Connect()\n\tgobottest.Assert(t, a.On(\"hola\", func(msg Message) {\n\t\tfmt.Println(\"hola\")\n\t}), true)\n}", "func TestConfigReloadEnableTokenAuthentication(t *testing.T) {\n\tserver, opts, config := runReloadServerWithConfig(t, \"./configs/reload/basic.conf\")\n\tdefer server.Shutdown()\n\n\t// Ensure we can connect as a sanity check.\n\taddr := fmt.Sprintf(\"nats://%s:%d\", opts.Host, opts.Port)\n\tnc, err := nats.Connect(addr)\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating client: %v\", err)\n\t}\n\tdefer nc.Close()\n\tdisconnected := make(chan struct{}, 1)\n\tasyncErr := make(chan error, 1)\n\tnc.SetErrorHandler(func(nc *nats.Conn, sub *nats.Subscription, err error) {\n\t\tasyncErr <- err\n\t})\n\tnc.SetDisconnectHandler(func(*nats.Conn) {\n\t\tdisconnected <- struct{}{}\n\t})\n\n\t// Enable authentication.\n\tchangeCurrentConfigContent(t, config, \"./configs/reload/token_authentication_1.conf\")\n\tif err := server.Reload(); err != nil {\n\t\tt.Fatalf(\"Error reloading config: %v\", err)\n\t}\n\n\t// Ensure connecting fails.\n\tif _, err := nats.Connect(addr); err == nil {\n\t\tt.Fatal(\"Expected connect to fail\")\n\t}\n\n\t// Ensure connecting succeeds when using new credentials.\n\tconn, err := nats.Connect(addr, nats.Token(\"T0pS3cr3t\"))\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating client: %v\", err)\n\t}\n\tconn.Close()\n\n\t// Ensure the previous connection received an authorization error.\n\t// Note that it is possible that client gets EOF and not able to\n\t// process async error, so don't fail if we don't get it.\n\tselect {\n\tcase err := <-asyncErr:\n\t\tif err != nats.ErrAuthorization {\n\t\t\tt.Fatalf(\"Expected ErrAuthorization, got %v\", err)\n\t\t}\n\tcase <-time.After(time.Second):\n\t}\n\n\t// Ensure the previous connection was disconnected.\n\tselect {\n\tcase <-disconnected:\n\tcase <-time.After(2 * time.Second):\n\t\tt.Fatal(\"Expected connection to be disconnected\")\n\t}\n}", "func (m *MockOobService) RegisterMsgEvent(arg0 chan<- service.StateMsg) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RegisterMsgEvent\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (c *NetClient) registerToken(token []byte) {\n\tc.token = token\n\tc.log.Debugf(\"Registered token %s\", c.token)\n}", "func TestGetToken(t *testing.T) {\n\tmc := MockClient{t: t}\n\tmc.DoFunc = validDo\n\tmc.GetFunc = validGet\n\tconfig := ClientConfig{\n\t\tScopes: []string{\"thing\"},\n\t\tOktaDomain: \"mockta.local\",\n\t\tHTTPClient: &mc,\n\t}\n\n\tclient, err := NewClient(config)\n\n\tif err != nil {\n\t\tt.Fatalf(\"Failed: %s\", err)\n\t}\n\n\t// Test surge of requests these should all use the same key\n\tresult := testConcurrency(client, 0, 100, t)\n\tif len(result) > 1 {\n\t\tt.Fatalf(\"Concurrency Test 1 Failed: got %d, want 1\\n\", len(result))\n\t}\n\n\t// Test renewals\n\tresult = testConcurrency(client, 1000, 10, t)\n\tif len(result) != 10 {\n\t\tt.Fatalf(\"Concurrency Test 2 Failed: got %d, want 10\\n\", len(result))\n\t}\n}", "func TestWsAuth(t *testing.T) {\n\tt.Parallel()\n\tg.WebsocketURL = geminiWebsocketSandboxEndpoint\n\n\tif !g.Websocket.IsEnabled() &&\n\t\t!g.AuthenticatedWebsocketAPISupport ||\n\t\t!areTestAPIKeysSet() {\n\t\tt.Skip(wshandler.WebsocketNotEnabled)\n\t}\n\tvar dialer websocket.Dialer\n\tgo g.WsHandleData()\n\terr := g.WsSecureSubscribe(&dialer, geminiWsOrderEvents)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\ttimer := time.NewTimer(sharedtestvalues.WebsocketResponseDefaultTimeout)\n\tselect {\n\tcase resp := <-g.Websocket.DataHandler:\n\t\tif resp.(WsSubscriptionAcknowledgementResponse).Type != \"subscription_ack\" {\n\t\t\tt.Error(\"Login failed\")\n\t\t}\n\tcase <-timer.C:\n\t\tt.Error(\"Expected response\")\n\t}\n\ttimer.Stop()\n}", "func TestSocket(t *testing.T) { testSocket(t) }", "func TestGet_Token(t *testing.T) {\n t.Errorf(\"No tests written yet for Get_Token()\")\n}", "func TestAgentClientEventNotify(t *testing.T) {\n\tstate := &ssntpTestState{}\n\tac := agentClient{conn: state}\n\tac.EventNotify(ssntp.TenantAdded, nil)\n}", "func TestWsAuth(t *testing.T) {\n\tif !c.Websocket.IsEnabled() && !c.API.AuthenticatedWebsocketSupport || !areTestAPIKeysSet() {\n\t\tt.Skip(wshandler.WebsocketNotEnabled)\n\t}\n\tc.WebsocketConn = &wshandler.WebsocketConnection{\n\t\tExchangeName: c.Name,\n\t\tURL: c.Websocket.GetWebsocketURL(),\n\t\tVerbose: c.Verbose,\n\t\tResponseMaxLimit: exchange.DefaultWebsocketResponseMaxLimit,\n\t\tResponseCheckTimeout: exchange.DefaultWebsocketResponseCheckTimeout,\n\t}\n\tvar dialer websocket.Dialer\n\terr := c.WebsocketConn.Dial(&dialer, http.Header{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tc.Websocket.DataHandler = sharedtestvalues.GetWebsocketInterfaceChannelOverride()\n\tc.Websocket.TrafficAlert = sharedtestvalues.GetWebsocketStructChannelOverride()\n\tgo c.WsHandleData()\n\terr = c.Subscribe(wshandler.WebsocketChannelSubscription{\n\t\tChannel: \"user\",\n\t\tCurrency: currency.NewPairFromString(testPair),\n\t})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\ttimer := time.NewTimer(sharedtestvalues.WebsocketResponseDefaultTimeout)\n\tselect {\n\tcase badResponse := <-c.Websocket.DataHandler:\n\t\tt.Error(badResponse)\n\tcase <-timer.C:\n\t}\n\ttimer.Stop()\n}", "func TestInvalidEvents(t *testing.T) {\n\tassert := assert.New(t)\n\n\tw := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t}\n\tw.Config.URL = \"http://localhost:9999/foo\"\n\tw.Config.ContentType = \"application/json\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tClient: &http.Client{},\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t\tLogger: getLogger(),\n\t}.New()\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n\n\tw2 := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t\tEvents: []string{\"iot(.*\"},\n\t}\n\tw2.Config.URL = \"http://localhost:9999/foo\"\n\tw2.Config.ContentType = \"application/json\"\n\n\tobs, err = OutboundSenderFactory{\n\t\tListener: w2,\n\t\tClient: &http.Client{},\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tLogger: getLogger(),\n\t\tProfilerFactory: testServerProfilerFactory,\n\t}.New()\n\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n}", "func onConnect(c *gnet.Connection, solicited bool) {\n\tfmt.Printf(\"Event Callback: connnect event \\n\")\n}", "func TestAllowedHostsEvent(t *testing.T) {\n\tvar err error\n\terr = testcert.GenerateCert(\"mail2.guerrillamail.com\", \"\", 365*24*time.Hour, false, 2048, \"P256\", \"../../tests/\")\n\tif err != nil {\n\t\tt.Error(\"failed to generate a test certificate\", err)\n\t\tt.FailNow()\n\t}\n\tdefer cleanTestArtifacts(t)\n\tmainlog, err = getTestLog()\n\tif err != nil {\n\t\tt.Error(\"could not get logger,\", err)\n\t\tt.FailNow()\n\t}\n\tif err := ioutil.WriteFile(\"configJsonD.json\", []byte(configJsonD), 0644); err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\t// start the server by emulating the serve command\n\n\tconf := &guerrilla.AppConfig{} // blank one\n\tif err = conf.Load([]byte(configJsonD)); err != nil { // load configJsonD\n\t\tt.Error(err)\n\t}\n\tcmd := &cobra.Command{}\n\tconfigPath = \"configJsonD.json\"\n\n\tgo func() {\n\t\tserve(cmd, []string{})\n\t}()\n\t// wait for start\n\tif _, err := grepTestlog(\"Listening on TCP 127.0.0.1:2552\", 0); err != nil {\n\t\tt.Error(\"server didn't start\")\n\t}\n\n\t// now connect and try RCPT TO with an invalid host\n\tif conn, buffin, err := test.Connect(conf.Servers[1], 20); err != nil {\n\t\tt.Error(\"Could not connect to new server\", conf.Servers[1].ListenInterface, err)\n\t} else {\n\t\tif result, err := test.Command(conn, buffin, \"HELO example.com\"); err == nil {\n\t\t\texpect := \"250 secure.test.com Hello\"\n\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\tt.Error(\"Expected\", expect, \"but got\", result)\n\t\t\t} else {\n\t\t\t\tif result, err = test.Command(conn, buffin, \"RCPT TO:<[email protected]>\"); err == nil {\n\t\t\t\t\texpect := \"454 4.1.1 Error: Relay access denied: grr.la\"\n\t\t\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\t\t\tt.Error(\"Expected:\", expect, \"but got:\", result)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t_ = conn.Close()\n\t}\n\n\t// now change the config by adding a host to allowed hosts\n\n\tnewConf := conf\n\tnewConf.AllowedHosts = append(newConf.AllowedHosts, \"grr.la\")\n\tif jsonbytes, err := json.Marshal(newConf); err == nil {\n\t\tif err = ioutil.WriteFile(\"configJsonD.json\", jsonbytes, 0644); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t} else {\n\t\tt.Error(err)\n\t}\n\t// send a sighup signal to the server to reload config\n\tsigHup()\n\n\tif _, err := grepTestlog(\"allowed_hosts config changed\", 0); err != nil {\n\t\tt.Error(\"allowed_hosts config not changed\")\n\t\tt.FailNow()\n\t}\n\n\t// now repeat the same conversion, RCPT TO should be accepted\n\tif conn, buffin, err := test.Connect(conf.Servers[1], 20); err != nil {\n\t\tt.Error(\"Could not connect to new server\", conf.Servers[1].ListenInterface, err)\n\t} else {\n\t\tif result, err := test.Command(conn, buffin, \"HELO example.com\"); err == nil {\n\t\t\texpect := \"250 secure.test.com Hello\"\n\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\tt.Error(\"Expected\", expect, \"but got\", result)\n\t\t\t} else {\n\t\t\t\tif result, err = test.Command(conn, buffin, \"RCPT TO:<[email protected]>\"); err == nil {\n\t\t\t\t\texpect := \"250 2.1.5 OK\"\n\t\t\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\t\t\tt.Error(\"Expected:\", expect, \"but got:\", result)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t_ = conn.Close()\n\t}\n\n\t// shutdown wait for exit\n\td.Shutdown()\n\n\t// wait for shutdown\n\tif _, err := grepTestlog(\"Backend shutdown completed\", 0); err != nil {\n\t\tt.Error(\"server didn't stop\")\n\t}\n\n}", "func (conn *Conn) AuthToken(token string) error {\n\tconn.username = nil\n\tconn.password = nil\n\tconn.token = &token\n\t_, err := conn.write(ProtoReqAuth, *conn.token, authTimeout)\n\treturn err\n}", "func (m *MockWebsocketAppInterface) CheckToken(userID int, csrfToken string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CheckToken\", userID, csrfToken)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestAuthRawToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\trestest.AssertEqualJSON(t, \"RawToken\", r.RawToken(), mock.Token)\n\t\t\tr.NotFound()\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\treq := mock.AuthRequest()\n\t\treq.Token = mock.Token\n\t\ts.Auth(\"test.model\", \"method\", req).\n\t\t\tResponse().\n\t\t\tAssertError(res.ErrNotFound)\n\t})\n}", "func (suite *KeeperTestSuite) TestOnTimeoutPacket() {\n\tdata := types.NewFungibleTokenPacketData(prefixCoins2, testAddr1.String(), testAddr2.String())\n\ttestCoins2 := sdk.NewCoins(sdk.NewCoin(\"bank/firstchannel/atom\", sdk.NewInt(100)))\n\n\ttestCases := []struct {\n\t\tmsg string\n\t\tmalleate func()\n\t\tsource bool\n\t\texpPass bool\n\t}{\n\t\t{\"successful timeout from source chain\",\n\t\t\tfunc() {\n\t\t\t\tescrow := types.GetEscrowAddress(testPort1, testChannel1)\n\t\t\t\t_, err := suite.chainA.App.BankKeeper.AddCoins(suite.chainA.GetContext(), escrow, sdk.NewCoins(sdk.NewCoin(\"atom\", sdk.NewInt(100))))\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t}, true, true},\n\t\t{\"successful timeout from external chain\",\n\t\t\tfunc() {\n\t\t\t\tdata.Amount = testCoins2\n\t\t\t}, false, true},\n\t\t{\"no source prefix on coin denom\",\n\t\t\tfunc() {\n\t\t\t\tdata.Amount = prefixCoins2\n\t\t\t}, false, false},\n\t\t{\"unescrow failed\",\n\t\t\tfunc() {\n\t\t\t}, true, false},\n\t\t{\"mint failed\",\n\t\t\tfunc() {\n\t\t\t\tdata.Amount[0].Denom = prefixCoins2[0].Denom\n\t\t\t\tdata.Amount[0].Amount = sdk.ZeroInt()\n\t\t\t}, true, false},\n\t}\n\n\tpacket := channeltypes.NewPacket(data.GetBytes(), 1, testPort1, testChannel1, testPort2, testChannel2, 100, 0)\n\n\tfor i, tc := range testCases {\n\t\ttc := tc\n\t\ti := i\n\t\tsuite.Run(fmt.Sprintf(\"Case %s\", tc.msg), func() {\n\t\t\tsuite.SetupTest() // reset\n\n\t\t\ttc.malleate()\n\n\t\t\tvar denom string\n\t\t\tif tc.source {\n\t\t\t\tprefix := types.GetDenomPrefix(packet.GetDestPort(), packet.GetDestChannel())\n\t\t\t\tdenom = prefixCoins2[0].Denom[len(prefix):]\n\t\t\t} else {\n\t\t\t\tdenom = data.Amount[0].Denom\n\t\t\t}\n\n\t\t\tpreCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), testAddr1, denom)\n\n\t\t\terr := suite.chainA.App.TransferKeeper.OnTimeoutPacket(suite.chainA.GetContext(), packet, data)\n\n\t\t\tpostCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), testAddr1, denom)\n\t\t\tdeltaAmount := postCoin.Amount.Sub(preCoin.Amount)\n\n\t\t\tif tc.expPass {\n\t\t\t\tsuite.Require().NoError(err, \"valid test case %d failed: %s\", i, tc.msg)\n\t\t\t\tsuite.Require().Equal(prefixCoins2[0].Amount.Int64(), deltaAmount.Int64(), \"successful timeout did not trigger refund\")\n\t\t\t} else {\n\t\t\t\tsuite.Require().Error(err, \"invalid test case %d passed: %s\", i, tc.msg)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestWsAuth(t *testing.T) {\n\tif !c.Websocket.IsEnabled() && !c.API.AuthenticatedWebsocketSupport || !sharedtestvalues.AreAPICredentialsSet(c) {\n\t\tt.Skip(stream.WebsocketNotEnabled)\n\t}\n\tvar dialer websocket.Dialer\n\terr := c.Websocket.Conn.Dial(&dialer, http.Header{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgo c.wsReadData()\n\n\terr = c.Subscribe([]stream.ChannelSubscription{\n\t\t{\n\t\t\tChannel: \"user\",\n\t\t\tCurrency: testPair,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\ttimer := time.NewTimer(sharedtestvalues.WebsocketResponseDefaultTimeout)\n\tselect {\n\tcase badResponse := <-c.Websocket.DataHandler:\n\t\tt.Error(badResponse)\n\tcase <-timer.C:\n\t}\n\ttimer.Stop()\n}", "func TestServiceTokenReset(t *testing.T) {\n\ttbl := []struct {\n\t\tSubject string\n\t\tTIDs []string\n\t\tExpected interface{}\n\t}{\n\t\t{\"auth\", nil, nil},\n\t\t{\"auth\", []string{}, nil},\n\t\t{\"auth\", []string{\"foo\"}, json.RawMessage(`{\"tids\":[\"foo\"],\"subject\":\"auth\"}`)},\n\t\t{\"auth\", []string{\"foo\", \"bar\"}, json.RawMessage(`{\"tids\":[\"foo\",\"bar\"],\"subject\":\"auth\"}`)},\n\t\t{\"auth.test.method\", []string{\"foo\", \"bar\"}, json.RawMessage(`{\"tids\":[\"foo\",\"bar\"],\"subject\":\"auth.test.method\"}`)},\n\t}\n\n\tfor _, l := range tbl {\n\t\trunTest(t, func(s *res.Service) {\n\t\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t\t}, func(s *restest.Session) {\n\t\t\ts.Service().TokenReset(l.Subject, l.TIDs...)\n\t\t\t// Send token event to flush any system.tokenReset event\n\t\t\ts.Service().TokenEvent(mock.CID, nil)\n\n\t\t\tif l.Expected != nil {\n\t\t\t\ts.GetMsg().\n\t\t\t\t\tAssertSubject(\"system.tokenReset\").\n\t\t\t\t\tAssertPayload(l.Expected)\n\t\t\t}\n\n\t\t\ts.GetMsg().AssertTokenEvent(mock.CID, nil)\n\t\t})\n\t}\n}", "func SimulateIssueToken(k keeper.Keeper, ak authkeeper.AccountKeeper, bk types.BankKeeper) simtypes.Operation {\n\treturn func(\n\t\tr *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,\n\t\taccs []simtypes.Account, chainID string,\n\t) (simtypes.OperationMsg, []simtypes.FutureOperation, error) {\n\n\t\ttoken, maxFees := genToken(ctx, r, k, ak, bk, accs)\n\n\t\tmsg := types.NewMsgIssueToken(token.GetName(), token.GetSymbol(), token.GetSmallestUnit(), token.GetDecimals(), \n\t\t\ttoken.GetInitialSupply(), token.GetTotalSupply(), token.GetMintable(), true, token.GetOwnerString())\n\n\t\tsimAccount, found := simtypes.FindAccount(accs, token.GetOwner())\n\t\tif !found {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), fmt.Sprintf(\"account[%s] does not found\", token.GetOwnerString())), \n\t\t\t\tnil, fmt.Errorf(\"account[%s] does not found\", token.GetOwnerString())\n\t\t}\n\n\t\towner, _ := sdk.AccAddressFromBech32(msg.Owner)\n\t\taccount := ak.GetAccount(ctx, owner)\n\t\tfees, err := simtypes.RandomFees(r, ctx, maxFees)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate fees\"), nil, err\n\t\t}\n\n\t\ttxGen := simappparams.MakeTestEncodingConfig().TxConfig\n\t\ttx, err := helpers.GenTx(\n\t\t\ttxGen,\n\t\t\t[]sdk.Msg{msg},\n\t\t\tfees,\n\t\t\thelpers.DefaultGenTxGas,\n\t\t\tchainID,\n\t\t\t[]uint64{account.GetAccountNumber()},\n\t\t\t[]uint64{account.GetSequence()},\n\t\t\tsimAccount.PrivKey,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate mock tx\"), nil, err\n\t\t}\n\n\t\tif _, _, err = app.Deliver(txGen.TxEncoder(), tx); err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to deliver tx\"), nil, err\n\t\t}\n\n\t\treturn simtypes.NewOperationMsg(msg, true, \"simulate issue token\"), nil, nil\n\t}\n}", "func (s *BasecookieListener) EnterToken(ctx *TokenContext) {}", "func TestBitcoindEvents(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\trpcPolling bool\n\t}{\n\t\t{\n\t\t\tname: \"Events via ZMQ subscriptions\",\n\t\t\trpcPolling: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Events via RPC Polling\",\n\t\t\trpcPolling: true,\n\t\t},\n\t}\n\n\t// Set up 2 btcd miners.\n\tminer1, miner2 := setupMiners(t)\n\n\tfor _, test := range tests {\n\t\ttest := test\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\t// Set up a bitcoind node and connect it to miner 1.\n\t\t\tbtcClient := setupBitcoind(\n\t\t\t\tt, miner1.P2PAddress(), test.rpcPolling,\n\t\t\t)\n\n\t\t\t// Test that the correct block `Connect` and\n\t\t\t// `Disconnect` notifications are received during a\n\t\t\t// re-org.\n\t\t\ttestReorg(t, miner1, miner2, btcClient)\n\n\t\t\t// Test that the expected block and transaction\n\t\t\t// notifications are received.\n\t\t\ttestNotifications(t, miner1, btcClient)\n\t\t})\n\t}\n}", "func TestTokenCreateHandler2(t *testing.T) {\n\tapp, trx, down, err := models.NewAppForTest(nil, t)\n\tassert.Nil(t, err)\n\tdefer down(t)\n\ttestDBConn = trx\n\trouter := Routers()\n\n\tw := httptest.NewRecorder()\n\tapi := buildRoute(config.DefaultConfig.HTTP.APIPrefix, \"/token/create\")\n\tbody := fmt.Sprintf(\"appUid=%s&nonce=%s\", app.UID, models.RandomWithMD5(128))\n\tsign := SignStrWithSecret(body, app.Secret)\n\tbody = fmt.Sprintf(\"%s&sign=%s\", body, sign)\n\treq, _ := http.NewRequest(\"POST\", api, strings.NewReader(body))\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\trouter.ServeHTTP(w, req)\n\tassert.Equal(t, http.StatusOK, w.Code)\n\n\tvar response = &Response{}\n\tjson := janitor.ConfigCompatibleWithStandardLibrary\n\tassert.Nil(t, json.Unmarshal(w.Body.Bytes(), response))\n\tassert.True(t, response.Success)\n\n\trespData := response.Data.(map[string]interface{})\n\trespAvailableTimes := respData[\"availableTimes\"].(float64)\n\tassert.Equal(t, -1, int(respAvailableTimes))\n\trespTokenValue := respData[\"token\"].(string)\n\tassert.Equal(t, 32, len(respTokenValue))\n\tassert.Nil(t, respData[\"ip\"])\n\trespReadOnly := respData[\"readOnly\"].(float64)\n\tassert.Equal(t, 0, int(respReadOnly))\n\trespReadPath := respData[\"path\"].(string)\n\tassert.Equal(t, \"/\", respReadPath)\n\tassert.Nil(t, respData[\"expiredAt\"])\n}", "func TestTokenCreateHandler3(t *testing.T) {\n\tapp, trx, down, err := models.NewAppForTest(nil, t)\n\tassert.Nil(t, err)\n\tdefer down(t)\n\ttestDBConn = trx\n\trouter := Routers()\n\n\tw := httptest.NewRecorder()\n\tapi := buildRoute(config.DefaultConfig.HTTP.APIPrefix, \"/token/create\")\n\texpiredAt := time.Now().Add(10 * time.Hour)\n\texpiredAtUnix := expiredAt.Unix()\n\tsecret := SignStrWithSecret(\"\", \"\")\n\tbody := fmt.Sprintf(\n\t\t\"appUid=%s&availableTimes=1000&expiredAt=%d&ip=192.168.0.1&nonce=%s&path=/test&readOnly=1&secret=%s\",\n\t\tapp.UID, expiredAtUnix, models.RandomWithMD5(128), secret,\n\t)\n\tsign := SignStrWithSecret(body, app.Secret)\n\tbody = fmt.Sprintf(\"%s&sign=%s\", body, sign)\n\treq, _ := http.NewRequest(\"POST\", api, strings.NewReader(body))\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\trouter.ServeHTTP(w, req)\n\tassert.Equal(t, 200, w.Code)\n\n\tvar response = &Response{}\n\tjson := janitor.ConfigCompatibleWithStandardLibrary\n\tassert.Nil(t, json.Unmarshal(w.Body.Bytes(), response))\n\tassert.True(t, response.Success)\n\n\trespData := response.Data.(map[string]interface{})\n\trespAvailableTimes := respData[\"availableTimes\"].(float64)\n\tassert.Equal(t, 1000, int(respAvailableTimes))\n\trespTokenValue := respData[\"token\"].(string)\n\tassert.Equal(t, 32, len(respTokenValue))\n\trespIP := respData[\"ip\"].(string)\n\tassert.Equal(t, \"192.168.0.1\", respIP)\n\trespReadOnly := respData[\"readOnly\"].(float64)\n\tassert.Equal(t, 1, int(respReadOnly))\n\trespReadPath := respData[\"path\"].(string)\n\tassert.Equal(t, \"/test\", respReadPath)\n\trespExpiredAt := respData[\"expiredAt\"].(float64)\n\tassert.Equal(t, int64(respExpiredAt), expiredAtUnix)\n}", "func TestTokenIsSet(t *testing.T) {\n\tconfiguration := ReadConfig()\n\ttoken := configuration.Token\n\n\tif token == \"\" {\n\t\tt.Error(\"Token misconfigured\")\n\t}\n\n\t// A dumb way to check if a dummy token has been used\n\tif len(token) < 16 {\n\t\tt.Error(\"Token misconfigured\")\n\t}\n\n\tt.Log(\"Token set\")\n}", "func TestTokenBasedAuth(t *testing.T) {\n\tvar err error\n\terr = client.Login()\n\tif err != nil {\n\t\tt.Error(\"Login Failed\")\n\t\treturn\n\t}\n\trndIP := randomIP()\n\tlbName := \"test_lb_\" + randomString(5)\n\tlb1 := lb.Lbvserver{\n\t\tName: lbName,\n\t\tIpv46: rndIP,\n\t\tLbmethod: \"ROUNDROBIN\",\n\t\tServicetype: \"HTTP\",\n\t\tPort: 8000,\n\t}\n\t_, err = client.AddResource(Lbvserver.Type(), lbName, &lb1)\n\tif err != nil {\n\t\tt.Error(\"Could not add Lbvserver: \", err)\n\t\tt.Log(\"Not continuing test\")\n\t\treturn\n\t}\n\n\trsrc, err := client.FindResource(Lbvserver.Type(), lbName)\n\tif err != nil {\n\t\tt.Error(\"Did not find resource of type \", err, Lbvserver.Type(), \":\", lbName)\n\t} else {\n\t\tt.Log(\"LB-METHOD: \", rsrc[\"lbmethod\"])\n\t}\n\terr = client.DeleteResource(Lbvserver.Type(), lbName)\n\tif err != nil {\n\t\tt.Error(\"Could not delete LB\", lbName, err)\n\t\tt.Log(\"Cannot continue\")\n\t\treturn\n\t}\n\terr = client.Logout()\n\tif err != nil {\n\t\tt.Error(\"Logout Failed\")\n\t\treturn\n\t}\n\n\t// Test if session-id is cleared in case of session-expiry\n\tclient.timeout = 10\n\tclient.Login()\n\ttime.Sleep(15 * time.Second)\n\t_, err = client.AddResource(Lbvserver.Type(), lbName, &lb1)\n\tif err != nil {\n\t\tif client.IsLoggedIn() {\n\t\t\tt.Error(\"Sessionid not cleared\")\n\t\t\treturn\n\t\t}\n\t\tt.Log(\"sessionid cleared because of session-expiry\")\n\t} else {\n\t\tt.Error(\"Adding lbvserver should have failed because of session-expiry\")\n\t}\n}", "func TestSetAuth(t *testing.T) {\n var c Noc\n\n // use wrong port on purpose, expect an error\n c.InitNoc(\"localhost\", \"9999\", false)\n if c.SetAuth() == nil {\n t.Errorf(\"Expected an error when getting an authentication token. server is not running on port 9999\")\n }\n\n c.InitNoc(\"localhost\", \"8888\", false)\n c.BadsecToken = \"\"\n c.SetAuth()\n if len(c.BadsecToken) == 33 {\n t.Errorf(\"Expected BadsecToken to be length 33. Got: \" + strconv.Itoa(len(c.BadsecToken)))\n }\n}", "func TestBadToken(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmockedTpmProvider := new(tpmprovider.MockedTpmProvider)\n\tmockedTpmProvider.On(\"Close\").Return(nil)\n\tmockedTpmProvider.On(\"NvIndexExists\", mock.Anything).Return(true, nil)\n\tmockedTpmProvider.On(\"NvRelease\", mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvDefine\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvWrite\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\n\tmockedTpmFactory := tpmprovider.MockedTpmFactory{TpmProvider: mockedTpmProvider}\n\n\ttrustAgentService, err := CreateTrustAgentService(CreateTestConfig(), mockedTpmFactory)\n\tassert.NoError(err)\n\n\t// setup TA service to use JWT-based authentication\n\ttrustAgentService.router = mux.NewRouter()\n\ttrustAgentService.router.Use(middleware.NewTokenAuth(\"../test/mockJWTDir\", \"../test/mockCACertsDir\", mockRetrieveJWTSigningCerts, cacheTime))\n\ttrustAgentService.router.HandleFunc(\"/v2/tag\", errorHandler(requiresPermission(setAssetTag(CreateTestConfig(), mockedTpmFactory), []string{postDeployTagPerm}))).Methods(\"POST\")\n\n\tjsonString := `{\"tag\" : \"tHgfRQED1+pYgEZpq3dZC9ONmBCZKdx10LErTZs1k/k=\", \"hardware_uuid\" : \"7a569dad-2d82-49e4-9156-069b0065b262\"}`\n\n\trequest, err := http.NewRequest(\"POST\", \"/v2/tag\", bytes.NewBuffer([]byte(jsonString)))\n\tassert.NoError(err)\n\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", \"Bearer \"+TestBadJWTAuthToken)\n\n\trecorder := httptest.NewRecorder()\n\ttrustAgentService.router.ServeHTTP(recorder, request)\n\tresponse := recorder.Result()\n\tfmt.Printf(\"StatusCode: %d\\n\", response.StatusCode)\n\tassert.Equal(http.StatusUnauthorized, response.StatusCode)\n}", "func (testEnv *TestEnv) TokenMock() error {\n\ttmpl, err := template.New(\"token\").Parse(TokenInfo)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to parse token tamplate /auth/token mock, err: %v\", err)\n\t}\n\n\tdata := TokenInfoTemplate{\n\t\tQuotaManagerEndpoint: testEnv.Server.URL,\n\t\tResellEndpoint: fmt.Sprintf(\"%s/%s\", testEnv.Server.URL, clients.ResellServiceType),\n\t}\n\n\ttestEnv.Mux.HandleFunc(\"/auth/tokens\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\n\t\terr = tmpl.Execute(w, data)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to execute template for /auth/token mock, err: %v\", err)\n\t\t}\n\t})\n\n\treturn nil\n}", "func TestNextEventAfterFailedSubscribe(t *testing.T) {\n\tdctx, dcancel := context.WithCancel(context.Background())\n\tdefer dcancel()\n\n\tconn := mockCharon(dctx)\n\n\ts, err := NewSession(withTestConn(conn))\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\t// This should result in an IO error, and if handled properly within\n\t// the event listener, the error should not be sent on the event channel.\n\ts.el.conn.Close()\n\tif err := s.Subscribe(\"test-event\"); err == nil {\n\t\tt.Fatalf(\"Expected error reading from closed transport\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)\n\tdefer cancel()\n\n\t_, err = s.NextEvent(ctx)\n\tif err != ctx.Err() {\n\t\tt.Fatalf(\"Expected to get timeout error, got: %v\", err)\n\t}\n}", "func TestEventNameIsSet(t *testing.T) {\n\tmaybeSkipIntegrationTest(t)\n\n\ts, err := NewSession()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\tif err := s.Subscribe(\"log\"); err != nil {\n\t\tt.Fatalf(\"Failed to start event listener: %v\", err)\n\t}\n\n\t// The event triggered by this command will be buffered in the event queue.\n\tif _, err := s.CommandRequest(\"reload-settings\", nil); err != nil {\n\t\tt.Fatalf(\"Failed to send 'reload-settings' command: %v\", err)\n\t}\n\n\te, err := s.NextEvent(context.TODO())\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error waiting for event: %v\", err)\n\t}\n\n\tif e.Name != \"log\" {\n\t\tt.Fatalf(\"Expected to receive 'log' event, got %s\", e.Name)\n\t}\n}", "func TestBirdSocketConnection(t *testing.T) {\n\tout := \"0001 BIRD 1.6.4 ready.\\n\"\n\tcompleted := containsActionCompletedCode([]byte(out))\n\n\tassert.True(\"'connect' successfully completed\", completed, t)\n}", "func TestOAuthVerifyState(t *testing.T) {\n\tservice := NewOAuth2Service(testClientID, testClientSecret, testScopes, testTokenURL, testAuthURL)\n\tservice.ExchangeAuthCodeForToken(testCode)\n}", "func (s *Server) TestConnection(ctx context.Context, request *TestConnection_Request) (response *TestConnection_Response, err error) {\n\tlogging.Log(fmt.Sprintf(\"TestConnection - incoming request: %+v\", request))\n\t// response = new(TestConnection_Response)\n\n\treturn &TestConnection_Response{Success: true}, err\n}", "func TestEmptyToken(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmockedTpmProvider := new(tpmprovider.MockedTpmProvider)\n\tmockedTpmProvider.On(\"Close\").Return(nil)\n\tmockedTpmProvider.On(\"NvIndexExists\", mock.Anything).Return(true, nil)\n\tmockedTpmProvider.On(\"NvRelease\", mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvDefine\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvWrite\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\n\tmockedTpmFactory := tpmprovider.MockedTpmFactory{TpmProvider: mockedTpmProvider}\n\n\ttrustAgentService, err := CreateTrustAgentService(CreateTestConfig(), mockedTpmFactory)\n\tassert.NoError(err)\n\n\t// setup TA service to use JWT-based authentication\n\ttrustAgentService.router = mux.NewRouter()\n\ttrustAgentService.router.Use(middleware.NewTokenAuth(\"../test/mockJWTDir\", \"../test/mockCACertsDir\", mockRetrieveJWTSigningCerts, cacheTime))\n\ttrustAgentService.router.HandleFunc(\"/v2/tag\", errorHandler(requiresPermission(setAssetTag(CreateTestConfig(), mockedTpmFactory), []string{postDeployTagPerm}))).Methods(\"POST\")\n\n\tjsonString := `{\"tag\" : \"tHgfRQED1+pYgEZpq3dZC9ONmBCZKdx10LErTZs1k/k=\", \"hardware_uuid\" : \"7a569dad-2d82-49e4-9156-069b0065b262\"}`\n\n\trequest, err := http.NewRequest(\"POST\", \"/v2/tag\", bytes.NewBuffer([]byte(jsonString)))\n\tassert.NoError(err)\n\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", \"Bearer \"+EmptyJWTToken)\n\n\trecorder := httptest.NewRecorder()\n\ttrustAgentService.router.ServeHTTP(recorder, request)\n\tresponse := recorder.Result()\n\tfmt.Printf(\"StatusCode: %d\\n\", response.StatusCode)\n\tassert.Equal(http.StatusUnauthorized, response.StatusCode)\n}", "func (m *MockWebsocketAppInterface) ChangeToken(userID int, csrfToken string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ChangeToken\", userID, csrfToken)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestConfigReloadRotateTokenAuthentication(t *testing.T) {\n\tserver, opts, config := runReloadServerWithConfig(t, \"./configs/reload/token_authentication_1.conf\")\n\tdefer server.Shutdown()\n\n\tdisconnected := make(chan struct{})\n\tasyncErr := make(chan error)\n\teh := func(nc *nats.Conn, sub *nats.Subscription, err error) { asyncErr <- err }\n\tdh := func(*nats.Conn) { disconnected <- struct{}{} }\n\n\t// Ensure we can connect as a sanity check.\n\taddr := fmt.Sprintf(\"nats://%s:%d\", opts.Host, opts.Port)\n\tnc, err := nats.Connect(addr, nats.Token(\"T0pS3cr3t\"), nats.ErrorHandler(eh), nats.DisconnectHandler(dh))\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating client: %v\", err)\n\t}\n\tdefer nc.Close()\n\n\t// Change authentication token.\n\tchangeCurrentConfigContent(t, config, \"./configs/reload/token_authentication_2.conf\")\n\tif err := server.Reload(); err != nil {\n\t\tt.Fatalf(\"Error reloading config: %v\", err)\n\t}\n\n\t// Ensure connecting fails.\n\tif _, err := nats.Connect(addr, nats.Token(\"T0pS3cr3t\")); err == nil {\n\t\tt.Fatal(\"Expected connect to fail\")\n\t}\n\n\t// Ensure connecting succeeds when using new credentials.\n\tconn, err := nats.Connect(addr, nats.Token(\"passw0rd\"))\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating client: %v\", err)\n\t}\n\tconn.Close()\n\n\t// Ensure the previous connection received an authorization error.\n\tselect {\n\tcase err := <-asyncErr:\n\t\tif err != nats.ErrAuthorization {\n\t\t\tt.Fatalf(\"Expected ErrAuthorization, got %v\", err)\n\t\t}\n\tcase <-time.After(2 * time.Second):\n\t\tt.Fatal(\"Expected authorization error\")\n\t}\n\n\t// Ensure the previous connection was disconnected.\n\tselect {\n\tcase <-disconnected:\n\tcase <-time.After(2 * time.Second):\n\t\tt.Fatal(\"Expected connection to be disconnected\")\n\t}\n}", "func TestOAuth2ClientCredentialsToken(t *testing.T) {\n\t// Setup\n\tmockCtrl := gomock.NewController(t)\n\tdefer mockCtrl.Finish()\n\n\t// Mock mockTokenProvider\n\tmockTokenProvider := mock.NewMockTokenProviderInterface(mockCtrl)\n\n\tgomock.InOrder(\n\t\t// First call returning abc and Bearer, expires within 1 second\n\t\tmockTokenProvider.\n\t\t\tEXPECT().\n\t\t\tGetToken(gomock.Any()).\n\t\t\tReturn(&oauth2.Token{\n\t\t\t\tAccessToken: \"abcd\",\n\t\t\t\tTokenType: \"Bearer\",\n\t\t\t\tExpiry: time.Now().In(time.UTC).Add(1 * time.Second),\n\t\t\t}, nil).\n\t\t\tTimes(1),\n\t)\n\n\t// Specify components metadata\n\tvar metadata middleware.Metadata\n\tmetadata.Properties = map[string]string{\n\t\t\"clientID\": \"testId\",\n\t\t\"clientSecret\": \"testSecret\",\n\t\t\"scopes\": \"ascope\",\n\t\t\"tokenURL\": \"https://localhost:9999\",\n\t\t\"headerName\": \"someHeader\",\n\t\t\"authStyle\": \"1\",\n\t}\n\n\t// Initialize middleware component and inject mocked TokenProvider\n\tlog := logger.NewLogger(\"oauth2clientcredentials.test\")\n\toauth2clientcredentialsMiddleware, _ := NewOAuth2ClientCredentialsMiddleware(log).(*Middleware)\n\toauth2clientcredentialsMiddleware.SetTokenProvider(mockTokenProvider)\n\thandler, err := oauth2clientcredentialsMiddleware.GetHandler(context.Background(), metadata)\n\trequire.NoError(t, err)\n\n\t// First handler call should return abc Token\n\tr := httptest.NewRequest(http.MethodGet, \"http://dapr.io\", nil)\n\tw := httptest.NewRecorder()\n\thandler(http.HandlerFunc(mockedRequestHandler)).ServeHTTP(w, r)\n\n\t// Assertion\n\tassert.Equal(t, \"Bearer abcd\", r.Header.Get(\"someHeader\"))\n}", "func (_RandomBeacon *RandomBeaconSession) TToken() (common.Address, error) {\n\treturn _RandomBeacon.Contract.TToken(&_RandomBeacon.CallOpts)\n}", "func connStream(t *testing.T) {\n}", "func (s *server) CheckToken(ctx context.Context, in *pb.LogRequest) (*pb.LogResponse, error) {\n\tlog.Printf(\"Received: %v\", \"Check token\")\n\tis, err := CheckToken(in.Email, in.Token)\n\tif err != nil {\n\t\treturn &pb.LogResponse{Sucess: false}, nil\n\t}\n\treturn &pb.LogResponse{Sucess: is}, nil\n}", "func TestTokenExpiracy(t *testing.T) {\n\tdb.InitDB()\n\tvar router *gin.Engine = routes.SetupRouter()\n\n\tos.Setenv(\"TOKEN_VALIDITY_MINUTES\", \"0\")\n\n\tvar user models.UserCreate = utils.CreateUser(\"Tom\", \"qwerty1234\", t, router)\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\ttime.Sleep(1 * time.Second)\n\n\tvar url string = \"/v1/user/\" + strconv.Itoa(user.ID)\n\tvar bearer = \"Bearer \" + user.Token\n\trecord := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"GET\", url, nil)\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", bearer)\n\n\trouter.ServeHTTP(record, request)\n\n\tvar message Message\n\terr := json.Unmarshal([]byte(record.Body.String()), &message)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad output: \", err.Error())\n\t\tt.Fail()\n\t}\n\n\tassert.Equal(t, record.Code, 401)\n\tassert.Equal(t, message.Message, \"Token expired.\")\n\n\tos.Setenv(\"TOKEN_VALIDITY_MINUTES\", \"15\")\n\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\n\tutils.CleanUser(user.ID, user.Token, t, router)\n\tdb.CloseDB()\n}", "func (m *MockConn) Send(arg0 event.Event) {\n\tm.ctrl.Call(m, \"Send\", arg0)\n}", "func TestMockOnHeartbeat(t *testing.T) {\n\tmockServer := &MockRailsServer{T: t, Behaviour: MockHeartbeat}\n\n\tdialer := wstest.NewDialer(mockServer)\n\tdialer.HandshakeTimeout = time.Second * 2\n\n\tclient := NewClient(fakeEndpoint).WithDialer(dialer)\n\n\tcalled := make(chan struct{})\n\tcount := 0\n\n\tclient.OnHeartbeat(func(conn *websocket.Conn, payload *Payload) error {\n\t\tcount++\n\t\tif count >= 4 {\n\t\t\tcalled <- struct{}{}\n\t\t}\n\t\treturn nil\n\t})\n\terr := client.Serve()\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treceiveSleepMs(2000, called, t)\n}", "func TestHandleConnection(t *testing.T) {\n\ts := SetUpSuite(t)\n\ts.checkHTTPResponse(t, s.clientCertificate, func(resp *http.Response) {\n\t\trequire.Equal(t, resp.StatusCode, http.StatusOK)\n\t\tbuf, err := io.ReadAll(resp.Body)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, strings.TrimSpace(string(buf)), s.message)\n\t})\n}", "func TestServiceTokenEvent_WithInvalidCID_CausesPanic(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\trestest.AssertPanic(t, func() {\n\t\t\ts.Service().TokenEvent(\"invalid.*.cid\", nil)\n\t\t})\n\t})\n}", "func TestUserTokenRefreshSuccess(t *testing.T) {\n\tdb := setupDB()\n\tdefer db.Close()\n\trouter := setupRouter()\n\n\tw := httptest.NewRecorder()\n\n\treq, _ := http.NewRequest(\"GET\", \"/token/refresh\", nil)\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", \"Bearer \" + Token)\n\trouter.ServeHTTP(w, req)\n\n\tassert.Equal(t, http.StatusOK, w.Code)\n\tvar token auth.Token\n\tjson.Unmarshal([]byte(w.Body.String()), &token)\n\tassert.NotEmpty(t, token.Expire)\t// TODO: equal to or later than `now`\n\tassert.NotEmpty(t, token.Token)\t\t// TODO: validate it's a correct JWT token\n\tToken = token.Token\n}", "func TestTLSConfigEvent(t *testing.T) {\n\tvar err error\n\terr = testcert.GenerateCert(\"mail2.guerrillamail.com\", \"\", 365*24*time.Hour, false, 2048, \"P256\", \"../../tests/\")\n\tif err != nil {\n\t\tt.Error(\"failed to generate a test certificate\", err)\n\t\tt.FailNow()\n\t}\n\tdefer cleanTestArtifacts(t)\n\tmainlog, err = getTestLog()\n\tif err != nil {\n\t\tt.Error(\"could not get logger,\", err)\n\t\tt.FailNow()\n\t}\n\tif err := ioutil.WriteFile(\"configJsonD.json\", []byte(configJsonD), 0644); err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\tconf := &guerrilla.AppConfig{} // blank one\n\tif err = conf.Load([]byte(configJsonD)); err != nil { // load configJsonD\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\tcmd := &cobra.Command{}\n\tconfigPath = \"configJsonD.json\"\n\n\tgo func() {\n\t\tserve(cmd, []string{})\n\t}()\n\n\t// wait for server to start\n\tif _, err := grepTestlog(\"Listening on TCP 127.0.0.1:2552\", 0); err != nil {\n\t\tt.Error(\"server didn't start\")\n\t}\n\n\t// Test STARTTLS handshake\n\ttestTlsHandshake := func() {\n\t\tif conn, buffin, err := test.Connect(conf.Servers[0], 20); err != nil {\n\t\t\tt.Error(\"Could not connect to server\", conf.Servers[0].ListenInterface, err)\n\t\t} else {\n\t\t\tif result, err := test.Command(conn, buffin, \"HELO example.com\"); err == nil {\n\t\t\t\texpect := \"250 mail.test.com Hello\"\n\t\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\t\tt.Error(\"Expected\", expect, \"but got\", result)\n\t\t\t\t} else {\n\t\t\t\t\tif result, err = test.Command(conn, buffin, \"STARTTLS\"); err == nil {\n\t\t\t\t\t\texpect := \"220 2.0.0 Ready to start TLS\"\n\t\t\t\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\t\t\t\tt.Error(\"Expected:\", expect, \"but got:\", result)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\ttlsConn := tls.Client(conn, &tls.Config{\n\t\t\t\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t\t\t\t\tServerName: \"127.0.0.1\",\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\tif err := tlsConn.Handshake(); err != nil {\n\t\t\t\t\t\t\t\tt.Error(\"Failed to handshake\", conf.Servers[0].ListenInterface)\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tconn = tlsConn\n\t\t\t\t\t\t\t\tmainlog.Info(\"TLS Handshake succeeded\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t_ = conn.Close()\n\t\t}\n\t}\n\ttestTlsHandshake()\n\n\t// TLS Handshake succeeded?\n\tif _, err := grepTestlog(\"TLS Handshake succeeded\", 0); err != nil {\n\t\tt.Error(\"TLS Handshake did not succeed\")\n\t\tt.FailNow()\n\t}\n\n\t// now delete old certs, configure new certs, and send a sighup to load them in\n\tif err := deleteIfExists(\"../../tests/mail2.guerrillamail.com.cert.pem\"); err != nil {\n\t\tt.Error(\"could not delete ../../tests/mail2.guerrillamail.com.cert.pem\", err)\n\t}\n\tif err := deleteIfExists(\"../../tests/mail2.guerrillamail.com.key.pem\"); err != nil {\n\t\tt.Error(\"could not delete ../../tests/mail2.guerrillamail.com.key.pem\", err)\n\t}\n\ttime.Sleep(testPauseDuration) // need to pause so that the new certs have different timestamps!\n\t// generate a new cert\n\terr = testcert.GenerateCert(\"mail2.guerrillamail.com\", \"\", 365*24*time.Hour, false, 2048, \"P256\", \"../../tests/\")\n\tif err != nil {\n\t\tt.Error(\"failed to generate a test certificate\", err)\n\t\tt.FailNow()\n\t}\n\t// pause for generated cert to output (don't need, since we've fsynced)\n\t// time.Sleep(testPauseDuration) // (don't need, since we've fsynced)\n\t// did cert output?\n\tif _, err := os.Stat(\"../../tests/mail2.guerrillamail.com.cert.pem\"); err != nil {\n\t\tt.Error(\"Did not create cert \", err)\n\t}\n\n\tsigHup()\n\n\t// wait for config to reload\n\tif _, err := grepTestlog(\"Server [127.0.0.1:4655] re-opened\", 0); err != nil {\n\t\tt.Error(\"server didn't catch sighup\")\n\t}\n\n\t// did tls configuration reload as expected?\n\tif _, err := grepTestlog(\"new TLS configuration loaded\", 0); err != nil {\n\t\tt.Error(\"server didn't catch sighup\")\n\t}\n\n\t// test again\n\ttestTlsHandshake()\n\n\t// after line 25\n\tif _, err := grepTestlog(\"TLS Handshake succeeded\", 25); err != nil {\n\t\tt.Error(\"TLS Handshake did not succeed\")\n\t\tt.FailNow()\n\t}\n\n\td.Shutdown()\n\n\t// wait for shutdown\n\tif _, err := grepTestlog(\"Backend shutdown completed\", 0); err != nil {\n\t\tt.Error(\"server didn't stop\")\n\t}\n\n}", "func (_UpkeepRegistrationRequests *UpkeepRegistrationRequestsSession) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) {\n\treturn _UpkeepRegistrationRequests.Contract.OnTokenTransfer(&_UpkeepRegistrationRequests.TransactOpts, arg0, amount, data)\n}", "func test_tokenNameFromOysterPearl(t *testing.T) {\n\tt.Skip(nil)\n\t// test ethClient\n\tvar backend, _ = ethclient.Dial(oysterbyNetwork)\n\toysterPearl, err := eth_gateway.NewOysterPearl(oysterContract, backend)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to access contract instance at :%v\", err)\n\t}\n\tname, err := oysterPearl.Name(nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to access contract name : %v\", err)\n\t}\n\tt.Logf(\"oyster pearl contract name :%v\", name)\n}", "func TestPacket_VerifySuccess(t *testing.T) {\n\ttearDown := setUp(t)\n\tdefer tearDown(t)\n\n\terr := packet.WriteChecksum()\n\tassert.NoError(t, err)\n\n\terr = packet.Verify(deviceToken)\n\tassert.NoError(t, err)\n}", "func (p *Session) Token() (token []byte) {\n\ttoken, _ = p.socket.Token()\n\treturn\n}", "func (s *TrackerSuite) TestStartNewEvent() {\n\n\tevent := s.service.StartNew()\n\tassert.NotEqual(s.T(), nil, event)\n}", "func TestNatsAdaptorPublishWhenConnectedWithAuth(t *testing.T) {\n\tt.Skip(\"TODO: implement this test without requiring actual server connection\")\n\ta := NewAdaptorWithAuth(\"localhost:4222\", 9999, \"test\", \"testwd\")\n\ta.Connect()\n\tdata := []byte(\"o\")\n\tgobottest.Assert(t, a.Publish(\"test\", data), true)\n}", "func (tc *nodeConfigTestCase) checkEvent(f *framework.Framework) {\n\tconst (\n\t\ttimeout = time.Minute\n\t\tinterval = time.Second\n\t)\n\tEventually(func() error {\n\t\tevents, err := f.ClientSet.CoreV1().Events(\"\").List(metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"checkEvent: case %s: %v\", tc.desc, err)\n\t\t}\n\t\t// find config changed event with most recent timestamp\n\t\tvar recent *apiv1.Event\n\t\tfor i := range events.Items {\n\t\t\tif events.Items[i].Reason == controller.KubeletConfigChangedEventReason {\n\t\t\t\tif recent == nil {\n\t\t\t\t\trecent = &events.Items[i]\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// for these events, first and last timestamp are always the same\n\t\t\t\tif events.Items[i].FirstTimestamp.Time.After(recent.FirstTimestamp.Time) {\n\t\t\t\t\trecent = &events.Items[i]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// we expect at least one config change event\n\t\tif recent == nil {\n\t\t\treturn fmt.Errorf(\"checkEvent: case %s: no events found with reason %s\", tc.desc, controller.KubeletConfigChangedEventReason)\n\t\t}\n\t\t// construct expected message, based on the test case\n\t\texpectMessage := controller.LocalEventMessage\n\t\tif tc.configSource != nil {\n\t\t\tif tc.configSource.ConfigMap != nil {\n\t\t\t\texpectMessage = fmt.Sprintf(controller.RemoteEventMessageFmt,\n\t\t\t\t\tfmt.Sprintf(\"/api/v1/namespaces/%s/configmaps/%s\", tc.configSource.ConfigMap.Namespace, tc.configSource.ConfigMap.Name),\n\t\t\t\t\ttc.configMap.UID, tc.configMap.ResourceVersion, tc.configSource.ConfigMap.KubeletConfigKey)\n\t\t\t}\n\t\t}\n\t\t// compare messages\n\t\tif expectMessage != recent.Message {\n\t\t\treturn fmt.Errorf(\"checkEvent: case %s: expected event message %q but got %q\", tc.desc, expectMessage, recent.Message)\n\t\t}\n\t\treturn nil\n\t}, timeout, interval).Should(BeNil())\n}", "func TestsetTokenCookie(t *testing.T) {\n\thand := New(nil)\n\n\twriter := httptest.NewRecorder()\n\treq := dummyGet()\n\n\ttoken := []byte(\"dummy\")\n\thand.setTokenCookie(writer, req, token)\n\n\theader := writer.Header().Get(\"Set-Cookie\")\n\texpected_part := fmt.Sprintf(\"csrf_token=%s;\", token)\n\n\tif !strings.Contains(header, expected_part) {\n\t\tt.Errorf(\"Expected header to contain %v, it doesn't. The header is %v.\",\n\t\t\texpected_part, header)\n\t}\n\n\ttokenInContext := unmaskToken(b64decode(Token(req)))\n\tif !bytes.Equal(tokenInContext, token) {\n\t\tt.Errorf(\"RegenerateToken didn't set the token in the context map!\"+\n\t\t\t\" Expected %v, got %v\", token, tokenInContext)\n\t}\n}", "func TestEventController_PushData(t *testing.T) {\n\tassert.New(t)\n\trequest = []byte(`{\"eventType\":\"Usual\",\"sessionStart\":1476628565,\"sessionEnd\":1476628965,\"linkClicked\":\"https://blog.golang.org/c-go-cgo\",\"timestamp\":12039109203,\"params\":{\"C\":\"c++\",\"D\":\"D++\",\"R\":\"R is not a real language\"}}`)\n\terr := controller.PushData(initContext())\n\tif err != nil {\n\t\tt.Error(\"TestEventController_PushData failed -> \", err.Error())\n\t}\n}", "func TestOAuthServiceExchange(t *testing.T) {\n\tservice := NewOAuth2Service(testClientID, testClientSecret, testScopes, testTokenURL, testAuthURL)\n\tservice.ExchangeAuthCodeForToken(testCode)\n}", "func TestNextEventCancel(t *testing.T) {\n\tmaybeSkipIntegrationTest(t)\n\n\ts, err := NewSession()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\tif err := s.Subscribe(\"ike-updown\"); err != nil {\n\t\tt.Fatalf(\"Failed to start event listener: %v\", err)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)\n\tdefer cancel()\n\n\t_, err = s.NextEvent(ctx)\n\tif err != ctx.Err() {\n\t\tt.Fatalf(\"Expected to get context's timeout error after not receiving event, got: %v\", err)\n\t}\n}", "func Example_validateTokenTest() {\n\tconfigEnv := os.Getenv(\"CONFIG\")\n\ttokenEnv := os.Getenv(\"TOKEN\")\n\n\tif configEnv == \"\" || tokenEnv == \"\" {\n\t\tfmt.Println(\"Please see the documentation of Example_validateTokenTest; missing environment variables\")\n\t\treturn\n\t}\n\n\t// Start Server\n\tif config, err := gauth.FromCredentialsFile(configEnv, \"http://localhost:6060/gauth\", nil); err != nil {\n\t\tfmt.Println(\"Could not open config file\", configEnv)\n\t} else {\n\t\tserver := &http.Server{}\n\t\tdefer server.Close()\n\t\tserver.Handler = gauth.Middleware(config, &gauth.MemorySessions{}, nil)\n\t\tserver.Addr = \":6060\"\n\t\tgo server.ListenAndServe()\n\t}\n\n\t// format token\n\ttokenInfo := map[string]interface{}{\n\t\t\"access_token\": tokenEnv,\n\t\t\"expires_at\": float64(time.Now().Unix() + 2000),\n\t}\n\ttoken, _ := json.Marshal(tokenInfo)\n\n\t// make validate_token call\n\tresp, err := http.Post(\"http://localhost:6060/validate_token\", \"application/json\", bytes.NewReader(token))\n\tif err != nil {\n\t\tfmt.Println(\"Failed with error\", err)\n\t} else if resp.StatusCode != http.StatusOK {\n\t\tfmt.Println(\"Failed with status\", resp.Status)\n\t} else {\n\t\tdefer resp.Body.Close()\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Failed reading body\", err)\n\t\t} else if len(body) == 0 {\n\t\t\tfmt.Println(\"Succeeded!\")\n\t\t} else {\n\t\t\tfmt.Println(\"Unexpected body\", string(body[0:10]))\n\t\t}\n\t}\n\n\t// Output: Succeeded!\n}", "func TestBatchOnConnected(t *testing.T) {\n\tsw := &mocks.SavingWriter{}\n\tbatch := Batch{sw}\n\terr := batch.OnConnected(\"download\", \"FQDN\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(sw.Data) != 1 {\n\t\tt.Fatal(\"invalid length\")\n\t}\n\tvar event struct {\n\t\tKey string `json:\"key\"`\n\t\tValue struct {\n\t\t\tServer string `json:\"server\"`\n\t\t\tSubtest string `json:\"subtest\"`\n\t\t} `json:\"value\"`\n\t}\n\terr = json.Unmarshal(sw.Data[0], &event)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif event.Key != \"status.measurement_begin\" {\n\t\tt.Fatal(\"Unexpected event key\")\n\t}\n\tif event.Value.Subtest != \"download\" {\n\t\tt.Fatal(\"Unexpected subtest field value\")\n\t}\n\tif event.Value.Server != \"FQDN\" {\n\t\tt.Fatal(\"Unexpected failure field value\")\n\t}\n}", "func TestRequestAuditEvents(t *testing.T) {\n\ttesthttp := httptest.NewUnstartedServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {}))\n\ttesthttp.Config.TLSConfig = &tls.Config{Time: clockwork.NewFakeClock().Now}\n\ttesthttp.Start()\n\n\tapp, err := types.NewAppV3(types.Metadata{\n\t\tName: \"foo\",\n\t\tLabels: staticLabels,\n\t}, types.AppSpecV3{\n\t\tURI: testhttp.URL,\n\t\tPublicAddr: \"foo.example.com\",\n\t\tDynamicLabels: types.LabelsToV2(dynamicLabels),\n\t})\n\trequire.NoError(t, err)\n\n\trequestEventsReceived := atomic.NewUint64(0)\n\tserverStreamer, err := events.NewCallbackStreamer(events.CallbackStreamerConfig{\n\t\tInner: events.NewDiscardEmitter(),\n\t\tOnEmitAuditEvent: func(_ context.Context, _ libsession.ID, event apievents.AuditEvent) error {\n\t\t\tif event.GetType() == events.AppSessionRequestEvent {\n\t\t\t\trequestEventsReceived.Inc()\n\n\t\t\t\texpectedEvent := &apievents.AppSessionRequest{\n\t\t\t\t\tMetadata: apievents.Metadata{\n\t\t\t\t\t\tType: events.AppSessionRequestEvent,\n\t\t\t\t\t\tCode: events.AppSessionRequestCode,\n\t\t\t\t\t},\n\t\t\t\t\tAppMetadata: apievents.AppMetadata{\n\t\t\t\t\t\tAppURI: app.Spec.URI,\n\t\t\t\t\t\tAppPublicAddr: app.Spec.PublicAddr,\n\t\t\t\t\t\tAppName: app.Metadata.Name,\n\t\t\t\t\t},\n\t\t\t\t\tStatusCode: 200,\n\t\t\t\t\tMethod: \"GET\",\n\t\t\t\t\tPath: \"/\",\n\t\t\t\t}\n\t\t\t\trequire.Empty(t, cmp.Diff(\n\t\t\t\t\texpectedEvent,\n\t\t\t\t\tevent,\n\t\t\t\t\tcmpopts.IgnoreTypes(apievents.ServerMetadata{}, apievents.SessionMetadata{}, apievents.UserMetadata{}, apievents.ConnectionMetadata{}),\n\t\t\t\t\tcmpopts.IgnoreFields(apievents.Metadata{}, \"ID\", \"ClusterName\", \"Time\"),\n\t\t\t\t\tcmpopts.IgnoreFields(apievents.AppSessionChunk{}, \"SessionChunkID\"),\n\t\t\t\t))\n\t\t\t}\n\n\t\t\treturn nil\n\t\t},\n\t})\n\trequire.NoError(t, err)\n\n\ts := SetUpSuiteWithConfig(t, suiteConfig{\n\t\tServerStreamer: serverStreamer,\n\t\tApps: types.Apps{app},\n\t})\n\n\t// make a request to generate events.\n\ts.checkHTTPResponse(t, s.clientCertificate, func(_ *http.Response) {\n\t\t// wait until request events are generated before closing the server.\n\t\trequire.Eventually(t, func() bool {\n\t\t\treturn requestEventsReceived.Load() == 1\n\t\t}, 500*time.Millisecond, 50*time.Millisecond, \"app.request event not generated\")\n\t})\n\n\tsearchEvents, _, err := s.authServer.AuditLog.SearchEvents(time.Time{}, time.Now().Add(time.Minute), \"\", []string{events.AppSessionChunkEvent}, 10, types.EventOrderDescending, \"\")\n\trequire.NoError(t, err)\n\trequire.Len(t, searchEvents, 1)\n\n\texpectedEvent := &apievents.AppSessionChunk{\n\t\tMetadata: apievents.Metadata{\n\t\t\tType: events.AppSessionChunkEvent,\n\t\t\tCode: events.AppSessionChunkCode,\n\t\t},\n\t\tAppMetadata: apievents.AppMetadata{\n\t\t\tAppURI: app.Spec.URI,\n\t\t\tAppPublicAddr: app.Spec.PublicAddr,\n\t\t\tAppName: app.Metadata.Name,\n\t\t},\n\t}\n\trequire.Empty(t, cmp.Diff(\n\t\texpectedEvent,\n\t\tsearchEvents[0],\n\t\tcmpopts.IgnoreTypes(apievents.ServerMetadata{}, apievents.SessionMetadata{}, apievents.UserMetadata{}, apievents.ConnectionMetadata{}),\n\t\tcmpopts.IgnoreFields(apievents.Metadata{}, \"ID\", \"ClusterName\", \"Time\"),\n\t\tcmpopts.IgnoreFields(apievents.AppSessionChunk{}, \"SessionChunkID\"),\n\t))\n}", "func (p *parser) expectToken(t token.Type) tree.Token {\n\tp.expect(t)\n\treturn p.tokenNext()\n}", "func TestOAuthServiceAccountClientEvent(t *testing.T) {\n\n\ttests := map[string]struct {\n\t\tannotationPrefix string\n\t\tannotation string\n\t\texpectedEventReason string\n\t\texpectedEventMsg string\n\t\tnumEvents int\n\t\texpectBadRequest bool\n\t}{\n\t\t\"test-good-url\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationURIPrefix + \"one\",\n\t\t\tannotation: \"/oauthcallback\",\n\t\t\tnumEvents: 0,\n\t\t},\n\t\t\"test-bad-url\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationURIPrefix + \"one\",\n\t\t\tannotation: \"foo:foo\",\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: \"system:serviceaccount:\" + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-url-parse\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationURIPrefix + \"one\",\n\t\t\tannotation: \"::\",\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: \"[parse ::: missing protocol scheme, system:serviceaccount:\" + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-annotation-kind\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: `{\"kind\":\"foo\",\"apiVersion\":\"oauth.openshift.io/v1\",\"metadata\":{\"creationTimestamp\":null},\"reference\":{\"group\":\"foo\",\"kind\":\"Route\",\"name\":\"route1\"}}`,\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `[no kind \"foo\" is registered for version \"oauth.openshift.io/v1\" in scheme \"github.com/openshift/origin/pkg/serviceaccounts/oauthclient/oauthclientregistry.go:54\", system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-type-parse\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: `{asdf\":\"adsf\"}`,\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `[couldn't get version/kind; json parse error: invalid character 'a' looking for beginning of object key string, system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-route-not-found\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: buildRedirectObjectReferenceString(t, \"Route\", \"route1\", \"route.openshift.io\"),\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `[routes.route.openshift.io \"route1\" not found, system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-route-wrong-group\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: buildRedirectObjectReferenceString(t, \"Route\", \"route1\", \"foo\"),\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-reference-kind\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: buildRedirectObjectReferenceString(t, \"foo\", \"route1\", \"route.openshift.io\"),\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t}\n\n\ttestServer, err := setupTestOAuthServer()\n\tif err != nil {\n\t\tt.Fatalf(\"error setting up test server: %s\", err)\n\t}\n\n\tdefer testServer.oauthServer.Close()\n\tdefer testserver.CleanupMasterEtcd(t, testServer.masterConfig)\n\n\tfor tcName, testCase := range tests {\n\t\tvar redirect string = testServer.oauthServer.URL + \"/oauthcallback\"\n\t\tif testCase.numEvents != 0 {\n\t\t\tredirect = testCase.annotation\n\t\t}\n\n\t\tt.Logf(\"%s: annotationPrefix %s, annotation %s\", tcName, testCase.annotationPrefix, testCase.annotation)\n\t\tsa, err := setupTestSA(testServer.clusterAdminKubeClient, testCase.annotationPrefix, redirect)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: error setting up test SA: %s\", tcName, err)\n\t\t}\n\n\t\tsecret, err := setupTestSecrets(testServer.clusterAdminKubeClient, sa)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: error setting up test secrets: %s\", tcName, err)\n\t\t}\n\n\t\trunTestOAuthFlow(t, testServer, sa, secret, redirect, testCase.expectBadRequest)\n\n\t\t// Check events with a short poll to stop flakes\n\t\tvar evList *kapi.EventList\n\t\terr = wait.Poll(time.Second, 5*time.Second, func() (bool, error) {\n\t\t\tevList, err = testServer.clusterAdminKubeClient.Core().Events(projectName).List(metav1.ListOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tif len(evList.Items) < testCase.numEvents {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn true, nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: err polling for events\", tcName)\n\t\t}\n\n\t\tevents := collectEventsWithReason(evList, testCase.expectedEventReason)\n\n\t\tif testCase.numEvents != len(events) {\n\t\t\tt.Fatalf(\"%s: expected %d events, found %d\", tcName, testCase.numEvents, len(events))\n\t\t}\n\n\t\tif testCase.numEvents != 0 && events[0].Message != testCase.expectedEventMsg {\n\t\t\tt.Fatalf(\"%s: expected event message %s, got %s\", tcName, testCase.expectedEventMsg, events[0].Message)\n\t\t}\n\n\t\terr = testServer.clusterAdminKubeClient.Core().Events(projectName).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: error deleting events: %s\", tcName, err)\n\t\t}\n\t}\n}", "func TestServerContextIdCaptured(t *testing.T) {\n\tvar (\n\t\trequest = `{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"test_echoCtxId\"}` + \"\\n\"\n\t\twantResp = `{\"jsonrpc\":\"2.0\",\"id\":1,\"result\":1}` + \"\\n\"\n\t)\n\n\tserver := newTestServer()\n\tdefer server.Stop()\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatal(\"can't listen:\", err)\n\t}\n\tdefer listener.Close()\n\tgo server.ServeListener(listener)\n\n\tconn, err := net.Dial(\"tcp\", listener.Addr().String())\n\tif err != nil {\n\t\tt.Fatal(\"can't dial:\", err)\n\t}\n\tdefer conn.Close()\n\t// Write the request, then half-close the connection so the server stops reading.\n\tconn.Write([]byte(request))\n\tconn.(*net.TCPConn).CloseWrite()\n\t// Now try to get the response.\n\tbuf := make([]byte, 2000)\n\tn, err := conn.Read(buf)\n\n\tassert.NoErrorf(t, err, \"read error:\", err)\n\tassert.Equalf(t, buf[:n], []byte(wantResp), \"wrong response: %s\", buf[:n])\n}", "func (_UpkeepRegistrationRequests *UpkeepRegistrationRequestsTransactor) OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) {\n\treturn _UpkeepRegistrationRequests.contract.Transact(opts, \"onTokenTransfer\", arg0, amount, data)\n}", "func TestAssetSysCC_RegisterToken(t *testing.T) {\n\n\tfmt.Println(\"-----------------------------------\")\n\tfmt.Println(\"Test1: registerToken\")\n\n\tascc := new(AssetSysCC)\n\tstub := shim.NewMockStub(\"ascc\", ascc)\n\tcheckInit(t, stub, [][]byte{[]byte(\"\")})\n\n\tres_test3 := stub.MockInvoke(\"1\", [][]byte{[]byte(\"registerToken\"), []byte(\"SSToken\"), []byte(\"250\"), []byte(\"18\"), []byte(MAddress[:])})\n\n\tif res_test3.Status != shim.OK {\n\t\tfmt.Println(\"Register token failed\", string(res_test3.Message))\n\t\tt.FailNow()\n\t}\n\n\tfmt.Println(\"Test registerToken Success!\")\n\n}", "func (p McpStartupPacket) Token() string {\n\treturn hex.EncodeToString(p[3:67])\n}", "func TestCheckEvents(t *testing.T) {\n\ttestNamespace := \"test_namespace\"\n\tcha := make(chan *events.Envelope)\n\terrorsCh := make(chan error)\n\tme := &mockEvt{\n\t\tmockSubscribe: func(ctx context.Context, filter ...string) (ch <-chan *events.Envelope, errs <-chan error) {\n\t\t\treturn cha, errorsCh\n\t\t},\n\t}\n\titf := &fake.MockedContainerdClient{\n\t\tMockEvents: func() containerd.EventService {\n\t\t\treturn containerd.EventService(me)\n\t\t},\n\t\tMockNamespaces: func(ctx context.Context) ([]string, error) {\n\t\t\treturn []string{testNamespace}, nil\n\t\t},\n\t\tMockContainers: func(namespace string) ([]containerd.Container, error) {\n\t\t\treturn nil, nil\n\t\t},\n\t}\n\t// Test the basic listener\n\tsub := createEventSubscriber(\"subscriberTest1\", containerdutil.ContainerdItf(itf), nil)\n\tsub.CheckEvents()\n\n\ttp := &containerdevents.TaskPaused{\n\t\tContainerID: \"42\",\n\t}\n\n\tvp, err := typeurl.MarshalAny(tp)\n\tassert.NoError(t, err)\n\n\ten := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/tasks/paused\",\n\t\tEvent: vp,\n\t}\n\tcha <- &en\n\n\ttimeout := time.NewTimer(2 * time.Second)\n\tticker := time.NewTicker(5 * time.Millisecond)\n\tcondition := false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting event listener to be healthy\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tev := sub.Flush(time.Now().Unix())\n\tassert.Len(t, ev, 1)\n\tassert.Equal(t, ev[0].Topic, \"/tasks/paused\")\n\terrorsCh <- fmt.Errorf(\"chan breaker\")\n\tcondition = false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting for error\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Test the multiple events one unsupported\n\tsub = createEventSubscriber(\"subscriberTest2\", containerdutil.ContainerdItf(itf), nil)\n\tsub.CheckEvents()\n\n\ttk := &containerdevents.TaskOOM{\n\t\tContainerID: \"42\",\n\t}\n\tvk, err := typeurl.MarshalAny(tk)\n\tassert.NoError(t, err)\n\n\tek := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/tasks/oom\",\n\t\tEvent: vk,\n\t}\n\n\tnd := &containerdevents.NamespaceDelete{\n\t\tName: \"k10s.io\",\n\t}\n\tvnd, err := typeurl.MarshalAny(nd)\n\tassert.NoError(t, err)\n\n\tevnd := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/namespaces/delete\",\n\t\tEvent: vnd,\n\t}\n\n\tcha <- &ek\n\tcha <- &evnd\n\n\tcondition = false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting event listener to be healthy\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\tev2 := sub.Flush(time.Now().Unix())\n\tfmt.Printf(\"\\n\\n 2/ Flush %v\\n\\n\", ev2)\n\tassert.Len(t, ev2, 1)\n\tassert.Equal(t, ev2[0].Topic, \"/tasks/oom\")\n}", "func TestCreateWebSocketPair(t *testing.T) {\n\tvar atomicCalls uint64\n\tserverFunc := func(conn *WSConn) {\n\t\tatomic.AddUint64(&atomicCalls, 1)\n\t}\n\t// Start the server.\n\twst := newWSTester(serverFunc)\n\n\t// Connect a client.\n\t_, err := wst.NewClient()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Close the server.\n\tif err := wst.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Check the number of times the server handler has been called.\n\tnumCalls := atomic.LoadUint64(&atomicCalls)\n\tif numCalls != 1 {\n\t\tt.Fatal(\"expected handler to be called once but was\", numCalls)\n\t}\n}", "func SimulateTransferTokenOwner(k keeper.Keeper, ak types.AccountKeeper, bk types.BankKeeper) simtypes.Operation {\n\treturn func(\n\t\tr *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,\n\t\taccs []simtypes.Account, chainID string,\n\t) (simtypes.OperationMsg, []simtypes.FutureOperation, error) {\n\n\t\ttoken, _ := selectToken(ctx, k, ak, bk, false)\n\t\tvar simToAccount, _ = simtypes.RandomAcc(r, accs)\n\t\tfor simToAccount.Address.Equals(token.GetOwner()) {\n\t\t\tsimToAccount, _ = simtypes.RandomAcc(r, accs)\n\t\t}\n\n\t\tmsg := types.NewMsgTransferTokenOwner(token.GetSymbol(), token.GetOwnerString(), simToAccount.Address.String())\n\n\t\tsimAccount, found := simtypes.FindAccount(accs, token.GetOwner())\n\t\tif !found {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), fmt.Sprintf(\"account[%s] does not found\", token.GetOwnerString())), \n\t\t\t\tnil, fmt.Errorf(\"account[%s] does not found\", token.GetOwnerString())\n\t\t}\n\n\t\tsrcOwner, _ := sdk.AccAddressFromBech32(msg.OldOwner)\n\t\taccount := ak.GetAccount(ctx, srcOwner)\n\t\tspendable := bk.SpendableCoins(ctx, account.GetAddress())\n\n\t\tfees, err := simtypes.RandomFees(r, ctx, spendable)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate fees\"), nil, err\n\t\t}\n\n\t\ttxGen := simappparams.MakeTestEncodingConfig().TxConfig\n\t\ttx, err := helpers.GenTx(\n\t\t\ttxGen,\n\t\t\t[]sdk.Msg{msg},\n\t\t\tfees,\n\t\t\thelpers.DefaultGenTxGas,\n\t\t\tchainID,\n\t\t\t[]uint64{account.GetAccountNumber()},\n\t\t\t[]uint64{account.GetSequence()},\n\t\t\tsimAccount.PrivKey,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate mock tx\"), nil, err\n\t\t}\n\n\t\tif _, _, err = app.Deliver(txGen.TxEncoder(), tx); err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to deliver tx\"), nil, err\n\t\t}\n\n\t\treturn simtypes.NewOperationMsg(msg, true, \"simulate transfer token\"), nil, nil\n\t}\n}", "func (c *Client) SendToken(mobileNumber string, token string) error {\n\tcontent := fmt.Sprintf(\"%s is your Genesis verification code.\", token)\n\treturn c.SendMessage(mobileNumber, content)\n}", "func (_UpkeepRegistrationRequests *UpkeepRegistrationRequestsTransactorSession) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) {\n\treturn _UpkeepRegistrationRequests.Contract.OnTokenTransfer(&_UpkeepRegistrationRequests.TransactOpts, arg0, amount, data)\n}", "func Test_LogoutValidToken(t *testing.T) {\n\tdir, err := tempConfig(\"\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tmockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(`{\"status_code\": 10007, \"status_text\": \"Resource deleted\"}`))\n\t}))\n\tdefer mockServer.Close()\n\n\tlogoutArgs := logoutArguments{\n\t\tapiEndpoint: mockServer.URL,\n\t\ttoken: \"test-token\",\n\t}\n\n\terr = logout(logoutArgs)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}", "func TestBulkEvent(test *testing.T) {\n\tloganne := Loganne{\n\t\thost: \"http://localhost:7999\",\n\t\tsource: \"metadata_api_test\",\n\t}\n\tloganne.post(\"bulkTestEvent\", \"This event is from the bulk test\", Track{}, Track{})\n\n\tassertEqual(test, \"Loganne request made to wrong path\", \"/events\", latestRequest.URL.Path)\n\tassertEqual(test,\"Loganne request wasn't POST request\", \"POST\", latestRequest.Method)\n\n\tassertNoError(test, \"Failed to get request body\", latestRequestError)\n\tassertEqual(test, \"Unexpected request body\", `{\"humanReadable\":\"This event is from the bulk test\",\"source\":\"metadata_api_test\",\"type\":\"bulkTestEvent\"}`, latestRequestBody)\n}", "func (c *instance) Token(call TokenCall) error {\n\to := bind.NewKeyedTransactor(c.key)\n\n\t// gateway redirect to private chain\n\tclient, err := ethclient.Dial(config.ETHAddr())\n\tif err != nil {\n\t\treturn err\n\t}\n\tinstance, err := token.NewDhToken(c.tokenAddr, client)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn call(instance, o)\n}", "func (suite *KeeperTestSuite) TestOnAcknowledgementPacket() {\n\tdata := types.NewFungibleTokenPacketData(prefixCoins2, testAddr1.String(), testAddr2.String())\n\n\tsuccessAck := types.FungibleTokenPacketAcknowledgement{\n\t\tSuccess: true,\n\t}\n\tfailedAck := types.FungibleTokenPacketAcknowledgement{\n\t\tSuccess: false,\n\t\tError: \"failed packet transfer\",\n\t}\n\n\ttestCases := []struct {\n\t\tmsg string\n\t\tack types.FungibleTokenPacketAcknowledgement\n\t\tmalleate func()\n\t\tsource bool\n\t\tsuccess bool // success of ack\n\t}{\n\t\t{\"success ack causes no-op\", successAck,\n\t\t\tfunc() {}, true, true},\n\t\t{\"successful refund from source chain\", failedAck,\n\t\t\tfunc() {\n\t\t\t\tescrow := types.GetEscrowAddress(testPort1, testChannel1)\n\t\t\t\t_, err := suite.chainA.App.BankKeeper.AddCoins(suite.chainA.GetContext(), escrow, sdk.NewCoins(sdk.NewCoin(\"atom\", sdk.NewInt(100))))\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t}, true, false},\n\t\t{\"successful refund from external chain\", failedAck,\n\t\t\tfunc() {\n\t\t\t\tdata.Amount = prefixCoins\n\t\t\t}, false, false},\n\t}\n\n\tpacket := channeltypes.NewPacket(data.GetBytes(), 1, testPort1, testChannel1, testPort2, testChannel2, 100, 0)\n\n\tfor i, tc := range testCases {\n\t\ttc := tc\n\t\ti := i\n\t\tsuite.Run(fmt.Sprintf(\"Case %s\", tc.msg), func() {\n\t\t\tsuite.SetupTest() // reset\n\n\t\t\ttc.malleate()\n\n\t\t\tvar denom string\n\t\t\tif tc.source {\n\t\t\t\tprefix := types.GetDenomPrefix(packet.GetDestPort(), packet.GetDestChannel())\n\t\t\t\tdenom = prefixCoins2[0].Denom[len(prefix):]\n\t\t\t} else {\n\t\t\t\tdenom = data.Amount[0].Denom\n\t\t\t}\n\n\t\t\tpreCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), testAddr1, denom)\n\n\t\t\terr := suite.chainA.App.TransferKeeper.OnAcknowledgementPacket(suite.chainA.GetContext(), packet, data, tc.ack)\n\t\t\tsuite.Require().NoError(err, \"valid test case %d failed: %s\", i, tc.msg)\n\n\t\t\tpostCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), testAddr1, denom)\n\t\t\tdeltaAmount := postCoin.Amount.Sub(preCoin.Amount)\n\n\t\t\tif tc.success {\n\t\t\t\tsuite.Require().Equal(sdk.ZeroInt(), deltaAmount, \"successful ack changed balance\")\n\t\t\t} else {\n\t\t\t\tsuite.Require().Equal(prefixCoins2[0].Amount, deltaAmount, \"failed ack did not trigger refund\")\n\t\t\t}\n\t\t})\n\t}\n}", "func TestRefreshToken(t *testing.T) {\n\tdb.InitDB()\n\tvar router *gin.Engine = routes.SetupRouter()\n\n\tvar user models.UserCreate = utils.CreateUser(\"Tom\", \"qwerty1234\", t, router)\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\n\tvar url string = \"/v1/refresh/token\"\n\tvar bearer = \"Bearer \" + user.Token\n\n\trecord := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"POST\", url, nil)\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", bearer)\n\n\trouter.ServeHTTP(record, request)\n\n\tvar refresh models.UserConnect\n\terr := json.Unmarshal([]byte(record.Body.String()), &refresh)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad output: \", err.Error())\n\t\tt.Fail()\n\t}\n\n\tassert.Equal(t, record.Code, 200)\n\n\tutils.CleanUser(user.ID, user.Token, t, router)\n\tdb.CloseDB()\n}", "func (m *MockAnonymous) PublishWebSocketEvent(arg0 string, arg1 map[string]interface{}, arg2 *model.WebsocketBroadcast) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"PublishWebSocketEvent\", arg0, arg1, arg2)\n}", "func (m *MockHandler) SendHostEvent(ctx context.Context, event HostEvent) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"SendHostEvent\", ctx, event)\n}", "func (_RandomBeacon *RandomBeaconCallerSession) TToken() (common.Address, error) {\n\treturn _RandomBeacon.Contract.TToken(&_RandomBeacon.CallOpts)\n}" ]
[ "0.7312468", "0.66278255", "0.63569856", "0.6182798", "0.60594577", "0.59883547", "0.5914056", "0.58681816", "0.5785276", "0.56461126", "0.5483135", "0.54372364", "0.5330973", "0.52957696", "0.5283241", "0.52675366", "0.5232106", "0.52223915", "0.52166265", "0.52122736", "0.52059186", "0.5194641", "0.5178894", "0.5176259", "0.51758575", "0.5172187", "0.514693", "0.51341885", "0.51128834", "0.508287", "0.50812846", "0.5069435", "0.5065086", "0.50490814", "0.50464976", "0.50433123", "0.50408584", "0.5033996", "0.50236285", "0.501607", "0.49942556", "0.49840036", "0.49830878", "0.49823427", "0.49793887", "0.49706203", "0.4970318", "0.4961359", "0.4954246", "0.49506572", "0.49248683", "0.49117506", "0.4902354", "0.4897484", "0.48896676", "0.48719227", "0.4857176", "0.48475826", "0.48475528", "0.48465297", "0.4845532", "0.48428664", "0.4842699", "0.48353398", "0.48319867", "0.48214948", "0.48112515", "0.48072737", "0.47994187", "0.4794715", "0.47937384", "0.47895202", "0.4784103", "0.47824743", "0.47779804", "0.47710449", "0.47654054", "0.47642878", "0.47624132", "0.4761167", "0.47600928", "0.474982", "0.47392476", "0.4738188", "0.47317246", "0.4724149", "0.47163028", "0.47097403", "0.470839", "0.47034615", "0.4703247", "0.46906975", "0.46905294", "0.4685455", "0.4684473", "0.46831486", "0.46804693", "0.46791726", "0.4677846", "0.46775764" ]
0.7044898
1
Test that TokenEvent with nil sends a connection token event with a nil token.
func TestServiceTokenEvent_WithNilToken_SendsNilToken(t *testing.T) { runTest(t, func(s *res.Service) { s.Handle("model", res.GetResource(func(r res.GetRequest) { r.NotFound() })) }, func(s *restest.Session) { s.Service().TokenEvent(mock.CID, nil) s.GetMsg().AssertTokenEvent(mock.CID, nil) }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestServiceTokenEventWithID_WithNilToken_SendsNilToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEventWithID(mock.CID, \"foo\", nil)\n\t\ts.GetMsg().AssertTokenEventWithID(mock.CID, \"foo\", nil)\n\t})\n}", "func TestAuthRequestNilTokenEvent(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.TokenEvent(nil)\n\t\t\tr.OK(nil)\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\treq := s.Auth(\"test.model\", \"method\", nil)\n\t\ts.GetMsg().\n\t\t\tAssertTokenEvent(mock.CID, nil)\n\t\treq.Response().\n\t\t\tAssertResult(nil)\n\t})\n}", "func TestServiceTokenEvent_WithObjectToken_SendsToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEvent(mock.CID, mock.Token)\n\t\ts.GetMsg().AssertTokenEvent(mock.CID, mock.Token)\n\t})\n}", "func TestAuthRequestTokenEvent(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.TokenEvent(mock.Token)\n\t\t\tr.OK(nil)\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\treq := s.Auth(\"test.model\", \"method\", nil)\n\t\ts.GetMsg().\n\t\t\tAssertTokenEvent(mock.CID, mock.Token)\n\t\treq.Response().\n\t\t\tAssertResult(nil)\n\t})\n}", "func TestEmptyToken(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmockedTpmProvider := new(tpmprovider.MockedTpmProvider)\n\tmockedTpmProvider.On(\"Close\").Return(nil)\n\tmockedTpmProvider.On(\"NvIndexExists\", mock.Anything).Return(true, nil)\n\tmockedTpmProvider.On(\"NvRelease\", mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvDefine\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvWrite\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\n\tmockedTpmFactory := tpmprovider.MockedTpmFactory{TpmProvider: mockedTpmProvider}\n\n\ttrustAgentService, err := CreateTrustAgentService(CreateTestConfig(), mockedTpmFactory)\n\tassert.NoError(err)\n\n\t// setup TA service to use JWT-based authentication\n\ttrustAgentService.router = mux.NewRouter()\n\ttrustAgentService.router.Use(middleware.NewTokenAuth(\"../test/mockJWTDir\", \"../test/mockCACertsDir\", mockRetrieveJWTSigningCerts, cacheTime))\n\ttrustAgentService.router.HandleFunc(\"/v2/tag\", errorHandler(requiresPermission(setAssetTag(CreateTestConfig(), mockedTpmFactory), []string{postDeployTagPerm}))).Methods(\"POST\")\n\n\tjsonString := `{\"tag\" : \"tHgfRQED1+pYgEZpq3dZC9ONmBCZKdx10LErTZs1k/k=\", \"hardware_uuid\" : \"7a569dad-2d82-49e4-9156-069b0065b262\"}`\n\n\trequest, err := http.NewRequest(\"POST\", \"/v2/tag\", bytes.NewBuffer([]byte(jsonString)))\n\tassert.NoError(err)\n\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", \"Bearer \"+EmptyJWTToken)\n\n\trecorder := httptest.NewRecorder()\n\ttrustAgentService.router.ServeHTTP(recorder, request)\n\tresponse := recorder.Result()\n\tfmt.Printf(\"StatusCode: %d\\n\", response.StatusCode)\n\tassert.Equal(http.StatusUnauthorized, response.StatusCode)\n}", "func TestServiceTokenEventWithID_WithObjectToken_SendsToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEventWithID(mock.CID, \"foo\", mock.Token)\n\t\ts.GetMsg().AssertTokenEventWithID(mock.CID, \"foo\", mock.Token)\n\t})\n}", "func TestAuthParseTokenWithNilToken(t *testing.T) {\n\tvar o struct {\n\t\tUser string `json:\"user\"`\n\t\tID int `json:\"id\"`\n\t}\n\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.ParseToken(&o)\n\t\t\trestest.AssertEqualJSON(t, \"o.User\", o.User, \"\")\n\t\t\trestest.AssertEqualJSON(t, \"o.ID\", o.ID, 0)\n\t\t\tr.NotFound()\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\ts.Auth(\"test.model\", \"method\", nil).\n\t\t\tResponse().\n\t\t\tAssertError(res.ErrNotFound)\n\t})\n}", "func TestInvalidEvents(t *testing.T) {\n\tassert := assert.New(t)\n\n\tw := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t}\n\tw.Config.URL = \"http://localhost:9999/foo\"\n\tw.Config.ContentType = \"application/json\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tClient: &http.Client{},\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t\tLogger: getLogger(),\n\t}.New()\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n\n\tw2 := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t\tEvents: []string{\"iot(.*\"},\n\t}\n\tw2.Config.URL = \"http://localhost:9999/foo\"\n\tw2.Config.ContentType = \"application/json\"\n\n\tobs, err = OutboundSenderFactory{\n\t\tListener: w2,\n\t\tClient: &http.Client{},\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tLogger: getLogger(),\n\t\tProfilerFactory: testServerProfilerFactory,\n\t}.New()\n\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n}", "func TestPacket_VerifyNoMutation(t *testing.T) {\n\ttearDown := setUp(t)\n\tdefer tearDown(t)\n\n\tpacket.WriteChecksum()\n\n\tbefore := packet.Serialize()\n\tpacket.Verify(deviceToken)\n\n\tafter := packet.Serialize()\n\tassert.Equal(t, before, after)\n}", "func noValidTokenTest(t *testing.T, r *http.Request, h http.Handler, auth *mock.Authenticator) {\n\toriginal := auth.AuthenticateFn\n\tauth.AuthenticateFn = authenticateGenerator(false, errors.New(\"An error\"))\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, r)\n\ttest.Equals(t, http.StatusBadRequest, w.Result().StatusCode)\n\tauth.AuthenticateFn = authenticateGenerator(false, nil)\n\tw = httptest.NewRecorder()\n\th.ServeHTTP(w, r)\n\ttest.Equals(t, http.StatusUnauthorized, w.Result().StatusCode)\n\tauth.AuthenticateFn = original\n}", "func (fgs *FakeGraphSync) AssertNoCancelReceived(t *testing.T) {\n\trequire.Empty(t, fgs.cancels, \"should not cancel request\")\n}", "func TestTokenIsSet(t *testing.T) {\n\tconfiguration := ReadConfig()\n\ttoken := configuration.Token\n\n\tif token == \"\" {\n\t\tt.Error(\"Token misconfigured\")\n\t}\n\n\t// A dumb way to check if a dummy token has been used\n\tif len(token) < 16 {\n\t\tt.Error(\"Token misconfigured\")\n\t}\n\n\tt.Log(\"Token set\")\n}", "func (s *Service) TestToken(ctx context.Context, info *pushmdl.PushInfo, token string) (err error) {\n\tparams := url.Values{}\n\tparams.Add(\"app_id\", strconv.FormatInt(info.APPID, 10))\n\tparams.Add(\"alert_title\", info.Title)\n\tparams.Add(\"alert_body\", info.Summary)\n\tparams.Add(\"token\", token)\n\tparams.Add(\"link_type\", strconv.FormatInt(int64(info.LinkType), 10))\n\tparams.Add(\"link_value\", info.LinkValue)\n\tparams.Add(\"sound\", strconv.Itoa(info.Sound))\n\tparams.Add(\"vibration\", strconv.Itoa(info.Vibration))\n\tparams.Add(\"expire_time\", strconv.FormatInt(int64(info.ExpireTime), 10))\n\tparams.Add(\"image_url\", info.ImageURL)\n\tif err = s.httpClient.Post(ctx, _testTokenURL, \"\", params, nil); err != nil {\n\t\tlog.Error(\"s.TestToken(%+v) error(%v)\", info, err)\n\t}\n\treturn\n}", "func TestServiceTokenEvent_WithInvalidCID_CausesPanic(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\trestest.AssertPanic(t, func() {\n\t\t\ts.Service().TokenEvent(\"invalid.*.cid\", nil)\n\t\t})\n\t})\n}", "func TestAuthRawTokenWithNoToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\trestest.AssertEqualJSON(t, \"RawToken\", r.RawToken(), nil)\n\t\t\tr.NotFound()\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\ts.Auth(\"test.model\", \"method\", nil).\n\t\t\tResponse().\n\t\t\tAssertError(res.ErrNotFound)\n\t})\n}", "func (fgs *FakeGraphSync) AssertNoRequestReceived(t *testing.T) {\n\trequire.Empty(t, fgs.requests, \"should not receive request\")\n}", "func TestGetEventStatusOKNoEvent(t *testing.T) {\n\thandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tassert.Equal(t, r.Method, \"GET\", \"Expect GET request\")\n\t\tassert.Equal(t, r.URL.EscapedPath(), \"/event\", \"Expect /event endpoint\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\teventList := `{\n\t\t\t\"events\":[],\n\t\t\t\t\"pageSize\":10,\n\t\t\t\t\"totalCount\":0\n\t\t\t}`\n\n\t\tw.Write([]byte(eventList))\n\t})\n\n\thttpClient, teardown := testingHTTPClient(handler)\n\tdefer teardown()\n\n\teventHandler := NewEventHandler(\"https://localhost\")\n\teventHandler.HTTPClient = httpClient\n\tcloudEvent, errObj := eventHandler.GetEvent(\"8929e5e5-3826-488f-9257-708bfa974909\", \"sh.keptn.events.evaluation-done\")\n\n\tif cloudEvent != nil {\n\t\tt.Error(\"do not expect a Keptn Cloud event\")\n\t}\n\n\tif errObj == nil {\n\t\tt.Errorf(\"an error occurred %v\", errObj.Message)\n\t}\n\n\tif *errObj.Message != \"No Keptn sh.keptn.events.evaluation-done event found for context: 8929e5e5-3826-488f-9257-708bfa974909\" {\n\t\tt.Error(\"response message has changed\")\n\t}\n}", "func TestRequestEmpty(t *testing.T) {\n\t// Initialize server\n\tserver := setupServer(\n\t\tt,\n\t\t&serverImpl{\n\t\t\tonRequest: func(\n\t\t\t\t_ context.Context,\n\t\t\t\t_ webwire.Connection,\n\t\t\t\tmsg webwire.Message,\n\t\t\t) (webwire.Payload, error) {\n\t\t\t\t// Expect the following request to not even arrive\n\t\t\t\tt.Error(\"Not expected but reached\")\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\twebwire.ServerOptions{},\n\t)\n\n\t// Initialize client\n\tclient := newCallbackPoweredClient(\n\t\tserver.Addr().String(),\n\t\twebwireClient.Options{\n\t\t\tDefaultRequestTimeout: 2 * time.Second,\n\t\t},\n\t\tcallbackPoweredClientHooks{},\n\t)\n\n\t// Send request without a name and without a payload.\n\t// Expect a protocol error in return not sending the invalid request off\n\t_, err := client.connection.Request(context.Background(), \"\", nil)\n\tif _, isProtoErr := err.(webwire.ProtocolErr); !isProtoErr {\n\t\tt.Fatalf(\"Expected a protocol error, got: %v\", err)\n\t}\n}", "func TestNoSendNoError(t *testing.T) {\n\n\ttestErrorInit()\n\n\tgo notifyError(notifier, service)\n\n\ttime.Sleep(100 * time.Millisecond)\n\tif notifier.wasWritten {\n\t\tt.Error(\"There was no message to send for notification\")\n\t}\n}", "func TestVerifyToken(t *testing.T) {\n t.Errorf(\"No tests written yet for VerifyToken()\")\n}", "func ExpectNoEvent(object k8sObject, eventType, reason string) {\n\tBy(\"Expecting for an event to be not triggered\")\n\texpectEvent(object, eventType, reason, BeEmpty())\n}", "func (o *Venda) SetTokenNil() {\n\to.Token.Set(nil)\n}", "func (n *NullEventReceiver) Event(eventName string) {\n}", "func TestAgentFailsRequestWithoutToken(t *testing.T) {\n\tif *skip {\n\t\tt.Skip(\"Test is skipped until Citadel agent is setup in test.\")\n\t}\n\tclient, err := sdsc.NewClient(sdsc.ClientOptions{\n\t\tServerAddress: *sdsUdsPath,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"failed to create sds client\")\n\t}\n\tclient.Start()\n\tdefer client.Stop()\n\tclient.Send()\n\terrmsg := \"no credential token\"\n\t_, err = client.WaitForUpdate(3 * time.Second)\n\tif err == nil || strings.Contains(err.Error(), errmsg) {\n\t\tt.Errorf(\"got [%v], want error with substring [%v]\", err, errmsg)\n\t}\n}", "func TestServiceTokenReset(t *testing.T) {\n\ttbl := []struct {\n\t\tSubject string\n\t\tTIDs []string\n\t\tExpected interface{}\n\t}{\n\t\t{\"auth\", nil, nil},\n\t\t{\"auth\", []string{}, nil},\n\t\t{\"auth\", []string{\"foo\"}, json.RawMessage(`{\"tids\":[\"foo\"],\"subject\":\"auth\"}`)},\n\t\t{\"auth\", []string{\"foo\", \"bar\"}, json.RawMessage(`{\"tids\":[\"foo\",\"bar\"],\"subject\":\"auth\"}`)},\n\t\t{\"auth.test.method\", []string{\"foo\", \"bar\"}, json.RawMessage(`{\"tids\":[\"foo\",\"bar\"],\"subject\":\"auth.test.method\"}`)},\n\t}\n\n\tfor _, l := range tbl {\n\t\trunTest(t, func(s *res.Service) {\n\t\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t\t}, func(s *restest.Session) {\n\t\t\ts.Service().TokenReset(l.Subject, l.TIDs...)\n\t\t\t// Send token event to flush any system.tokenReset event\n\t\t\ts.Service().TokenEvent(mock.CID, nil)\n\n\t\t\tif l.Expected != nil {\n\t\t\t\ts.GetMsg().\n\t\t\t\t\tAssertSubject(\"system.tokenReset\").\n\t\t\t\t\tAssertPayload(l.Expected)\n\t\t\t}\n\n\t\t\ts.GetMsg().AssertTokenEvent(mock.CID, nil)\n\t\t})\n\t}\n}", "func TestNoConnection(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\thosts := getNetHosts(t, ctx, 10)\n\n\tpsubs := getPubsubs(ctx, hosts)\n\n\tch, err := psubs[5].Subscribe(\"foobar\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = psubs[0].Publish(\"foobar\", []byte(\"TESTING\"))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tselect {\n\tcase <-ch.ch:\n\t\tt.Fatal(\"shouldnt have gotten a message\")\n\tcase <-time.After(time.Millisecond * 200):\n\t}\n}", "func TestEventNameIsSet(t *testing.T) {\n\tmaybeSkipIntegrationTest(t)\n\n\ts, err := NewSession()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\tif err := s.Subscribe(\"log\"); err != nil {\n\t\tt.Fatalf(\"Failed to start event listener: %v\", err)\n\t}\n\n\t// The event triggered by this command will be buffered in the event queue.\n\tif _, err := s.CommandRequest(\"reload-settings\", nil); err != nil {\n\t\tt.Fatalf(\"Failed to send 'reload-settings' command: %v\", err)\n\t}\n\n\te, err := s.NextEvent(context.TODO())\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error waiting for event: %v\", err)\n\t}\n\n\tif e.Name != \"log\" {\n\t\tt.Fatalf(\"Expected to receive 'log' event, got %s\", e.Name)\n\t}\n}", "func TestEventSimple(t *testing.T) {\n\tassert := asserts.NewTesting(t, asserts.FailStop)\n\n\tevt, err := mesh.NewEvent(\"\")\n\tassert.ErrorContains(err, \"event needs topic\")\n\tassert.True(mesh.IsNilEvent(evt))\n\n\tevt, err = mesh.NewEvent(\"test\")\n\tassert.NoError(err)\n\tassert.Equal(evt.Topic(), \"test\")\n\tassert.False(evt.HasPayload())\n}", "func TestServiceTokenEventWithID_WithInvalidCID_CausesPanic(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\trestest.AssertPanic(t, func() {\n\t\t\ts.Service().TokenEventWithID(\"invalid.*.cid\", \"foo\", nil)\n\t\t})\n\t})\n}", "func TestEmptyChannelTable(t *testing.T) {\n\tclearTable()\n\t// Generate JWT for authorization.\n\tvalidToken, err := auth.GenerateJWT()\n\tif err != nil {\n\t\tt.Error(\"Failed to generate token\")\n\t}\n\n\treq, _ := http.NewRequest(\"GET\", \"/api/channels\", nil)\n\t// Add \"Token\" header to request with generated token.\n\treq.Header.Add(\"Token\", validToken)\n\tresponse := executeRequest(req)\n\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n\n\tif body := response.Body.String(); body != \"[]\" {\n\t\tt.Errorf(\"Expected an empty array. Got %s\", body)\n\t}\n}", "func TestSetAuth(t *testing.T) {\n var c Noc\n\n // use wrong port on purpose, expect an error\n c.InitNoc(\"localhost\", \"9999\", false)\n if c.SetAuth() == nil {\n t.Errorf(\"Expected an error when getting an authentication token. server is not running on port 9999\")\n }\n\n c.InitNoc(\"localhost\", \"8888\", false)\n c.BadsecToken = \"\"\n c.SetAuth()\n if len(c.BadsecToken) == 33 {\n t.Errorf(\"Expected BadsecToken to be length 33. Got: \" + strconv.Itoa(len(c.BadsecToken)))\n }\n}", "func TestSkipNoMember(t *testing.T) {\n\tcommitteeMock, keys := agreement.MockCommittee(1, false, 2)\n\teb, roundChan := initAgreement(committeeMock)\n\thash, _ := crypto.RandEntropy(32)\n\teb.Publish(string(topics.Agreement), agreement.MockAgreement(hash, 1, 1, keys))\n\n\tselect {\n\tcase <-roundChan:\n\t\tassert.FailNow(t, \"not supposed to get a round update without reaching quorum\")\n\tcase <-time.After(100 * time.Millisecond):\n\t\t// all good\n\t}\n}", "func AssertNoDownlinkFrame(assert *require.Assertions, ts *IntegrationTestSuite) {\n\tassert.Equal(0, len(ts.GWBackend.TXPacketChan))\n}", "func NewZeroEvent(x, y float64) Event {\n\treturn NewEvent(x, y, \"\", \"\")\n}", "func TestEventServiceGetEventSubscriptionsEmptySubscriptionsLink(t *testing.T) {\n\tvar result EventService\n\terr := json.NewDecoder(strings.NewReader(eventServiceBody)).Decode(&result)\n\tif err != nil {\n\t\tt.Errorf(\"Error decoding JSON: %s\", err)\n\t}\n\n\t// get event subscriptions with empty subscription link\n\tresult.subscriptions = \"\"\n\t_, err = result.GetEventSubscriptions(context.Background())\n\n\t// validate the returned error\n\texpectedError := \"empty subscription link in the event service\"\n\tif err.Error() != expectedError {\n\t\tt.Errorf(\"Error GetEventSubscriptions returned: %s expected: %s\",\n\t\t\terr,\n\t\t\texpectedError)\n\t}\n}", "func _TestRegisterNodeWithoutRole(t *testing.T) {\n\t_, err := registerNodeSignedCall(TESTPUBLICKEY, 0, 0, nil, TESTHOST)\n\tassert.Error(t, err)\n}", "func TestGet_Token(t *testing.T) {\n t.Errorf(\"No tests written yet for Get_Token()\")\n}", "func TestWillSubscribePublishCloseEmpty(t *testing.T) {\n\tiniStr := `\n\t[gateway]\n\t name = testwillaftercloseemptywill\n\t[broker \"local/1\"]\n\t host = localhost\n\t port = 1883\n\t will_message = \n\t[device \"dora/dummy\"]\n\t broker = local\n\t qos = 0\n\t interval = 10\n\t payload = Hello will just publish world.\n\t type = EnOcean\n`\n\tok := genericWillTestDriver(t, iniStr, \"/testwillaftercloseemptywill/will\", []byte{})\n\tif !ok {\n\t\tt.Error(\"Failed to receive Empty Will message\")\n\t}\n}", "func TestOAUTH2Token(t *testing.T) {\n\tconnection, err := NewConnectionBuilder().\n\t\tURL(\"http://localhost:9100/api\").\n\t\tUsername(\"admin\").\n\t\tPassword(\"password\").\n\t\tBuild()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer connection.Close()\n\tvcr := govcr.NewVCR(\"connection_oauth2\",\n\t\t&govcr.VCRConfig{\n\t\t\tClient: connection.client,\n\t\t\tDisableRecording: true,\n\t\t})\n\t// Replace our HTTPClient with a vcr client wrapping it\n\tconnection.client = vcr.Client\n\tprojectsResource := connection.Projects()\n\n\t// Trigger the auth flow.\n\tgetProjectsRequest := projectsResource.Get()\n\tif len(connection.token) != 0 || len(connection.bearer) != 0 {\n\t\tt.Errorf(\"Connection should have no tokens. token: '%s', bearer: '%s'\",\n\t\t\tconnection.token,\n\t\t\tconnection.bearer)\n\t}\n\t_, err = getProjectsRequest.Send()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(connection.token) != 0 || len(connection.bearer) == 0 {\n\t\tt.Errorf(\"Connection should have only a bearer token. token: '%s', bearer: '%s'\",\n\t\t\tconnection.token,\n\t\t\tconnection.bearer)\n\t}\n}", "func (v Client) MustRevokeToken() {\n\tif err := v.RevokeToken(); err != nil {\n\t\tlog.Entry().WithError(err).Fatal(\"Could not revoke token\")\n\t}\n}", "func TestInitToken_Ensure_NoExpectedToken_NotExisting(t *testing.T) {\n\tfv := NewFakeVault(t)\n\tdefer fv.Finish()\n\n\tfv.ExpectWrite()\n\n\ti := &InitToken{\n\t\tRole: \"etcd\",\n\t\tPolicies: []string{\"etcd\"},\n\t\tkubernetes: fv.Kubernetes(),\n\t\tExpectedToken: \"\",\n\t}\n\n\t// expects a read and vault says secret is not existing\n\tinitTokenPath := \"test-cluster-inside/secrets/init_token_etcd\"\n\tfv.fakeLogical.EXPECT().Read(initTokenPath).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\t// expect a create new orphan\n\tfv.fakeToken.EXPECT().CreateOrphan(&tokenCreateRequestMatcher{}).Return(&vault.Secret{\n\t\tAuth: &vault.SecretAuth{\n\t\t\tClientToken: \"my-new-random-token\",\n\t\t},\n\t}, nil)\n\n\t// expect a write of the new token\n\tfv.fakeLogical.EXPECT().Write(initTokenPath, map[string]interface{}{\"init_token\": \"my-new-random-token\"}).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tfv.fakeToken.EXPECT().Lookup(\"my-new-random-token\").Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tfv.fakeToken.EXPECT().Renew(\"my-new-random-token\", 0).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tInitTokenEnsure_EXPECTs(fv)\n\n\terr := i.Ensure()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\ttoken, err := i.InitToken()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\tif exp, act := \"my-new-random-token\", token; exp != act {\n\t\tt.Errorf(\"unexpected token: act=%s exp=%s\", act, exp)\n\t}\n\n\treturn\n}", "func TestMockOnEvent(t *testing.T) {\n\tmockServer := &MockRailsServer{T: t, Behaviour: MockEvent}\n\n\tdialer := wstest.NewDialer(mockServer)\n\tdialer.HandshakeTimeout = time.Second * 2\n\n\tclient := NewClient(fakeEndpoint).WithDialer(dialer)\n\n\tcalled := make(chan struct{})\n\n\tclient.OnEvent(\"AgentChannel\", func(conn *websocket.Conn, payload *Payload, error error) {\n\t\tcalled <- struct{}{}\n\t\treturn\n\t})\n\n\terr := client.Serve()\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treceiveSleepMs(2000, called, t)\n}", "func Test_Onu_StateMachine_eapol_no_flow(t *testing.T) {\n\tt.Skip(\"Needs to be moved in the Service struct\")\n\tonu := createTestOnu()\n\n\tonu.InternalState.SetState(OnuStateEnabled)\n\tassert.Equal(t, onu.InternalState.Current(), OnuStateEnabled)\n\n\t// fail as no EapolFlow has been received\n\terr := onu.InternalState.Event(\"start_auth\")\n\tif err == nil {\n\t\tt.Fatal(\"can't start EAPOL without EapolFlow\")\n\t}\n\tassert.Equal(t, onu.InternalState.Current(), OnuStateEnabled)\n\tassert.Equal(t, err.Error(), \"transition canceled with error: cannot-go-to-auth-started-as-eapol-flow-is-missing\")\n}", "func TestInvalidClient(t *testing.T) {\n\tassert := assert.New(t)\n\n\tw := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t\tEvents: []string{\"iot\"},\n\t}\n\tw.Config.URL = \"http://localhost:9999/foo\"\n\tw.Config.ContentType = \"application/json\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t\tLogger: getLogger(),\n\t}.New()\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n}", "func TestEmptyAuh(t *testing.T) {\n\tdb.InitDB()\n\tvar router *gin.Engine = routes.SetupRouter()\n\n\tvar user models.UserCreate = utils.CreateUser(\"Tom\", \"qwerty1234\", t, router)\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\n\tvar url string = \"/v1/user/\" + strconv.Itoa(user.ID)\n\trecord := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"GET\", url, nil)\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", \"\")\n\n\trouter.ServeHTTP(record, request)\n\n\tvar message Message\n\terr := json.Unmarshal([]byte(record.Body.String()), &message)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad output: \", err.Error())\n\t\tt.Fail()\n\t}\n\n\tassert.Equal(t, record.Code, 403)\n\tassert.Equal(t, message.Message, \"Bad token\")\n\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\tutils.CleanUser(user.ID, user.Token, t, router)\n\tdb.CloseDB()\n}", "func InitMQTTNull() {\n\tmqtt.SetHandler(nullHandler)\n}", "func _TestRegisterNodeWithoutHost(t *testing.T) {\n\t_, err := registerNodeSignedCall(TESTPUBLICKEY, 0, 0, \"virtual\", \"\")\n\tassert.Error(t, err)\n}", "func TestNonexistingMessage(t *testing.T) {\n\tda := NewFrameOutputBuffer()\n\td := &model.Device{DeviceEUI: makeRandomEUI(), DevAddr: makeRandomDevAddr()}\n\tif _, err := da.GetPHYPayloadForDevice(d, &context); err == nil {\n\t\tt.Fatal(\"Did not expect to get PHYPayload for unknown device\")\n\t}\n}", "func TestFetchNullNotNullToken(t *testing.T) {\n\tinput := \"ull$_abc\"\n\texpected := \"null$_abc\"\n\treader := bytes.NewReader([]byte(input))\n\tlex := NewLexer(reader)\n\tif err := lex.fetchNull(); err != nil {\n\t\tt.Error(err.Error())\n\t\treturn\n\t}\n\n\tif len(lex.tokens) != 1 {\n\t\tt.Error(\"expecting 1 token to be fetched\")\n\t\treturn\n\t}\n\n\ttoken := lex.tokens[0]\n\tif token.t != TokenIdentifier {\n\t\tt.Errorf(\"unexpected token type %d (%s), expecting token type %d (%s)\", token.t, tokenTypeMap[token.t], TokenIdentifier, tokenTypeMap[TokenIdentifier])\n\t\treturn\n\t}\n\n\tif token.String() != expected {\n\t\tt.Errorf(\"unexpected %s, expecting %s\", token.String(), expected)\n\t}\n}", "func TestToken(t *testing.T) {\n\tkey := []byte(\"26BF237B95964852625A2C27988C3\")\n\tSetSecret(key)\n\tc := NewClaims(1, 15*time.Minute)\n\tc.SetIssuer(\"token_test\")\n\tc.SetSubject(\"test\")\n\ttok, err := c.Token()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tc, err = Decode(tok)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func TestInvalidEventRegex(t *testing.T) {\n\n\tassert := assert.New(t)\n\n\tw := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t\tEvents: []string{\"[[:123\"},\n\t}\n\tw.Config.URL = \"http://localhost:9999/foo\"\n\tw.Config.ContentType = \"application/json\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tClient: &http.Client{},\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t\tLogger: getLogger(),\n\t}.New()\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n\n}", "func (_Univ2 *Univ2Session) Token0() (common.Address, error) {\n\treturn _Univ2.Contract.Token0(&_Univ2.CallOpts)\n}", "func Test_LogoutInvalidToken(t *testing.T) {\n\tdir, err := tempConfig(\"\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tmockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tw.Write([]byte(``))\n\t}))\n\tdefer mockServer.Close()\n\n\tlogoutArgs := logoutArguments{\n\t\tapiEndpoint: mockServer.URL,\n\t\ttoken: \"test-token\",\n\t}\n\n\terr = logout(logoutArgs)\n\tif !IsNotAuthorizedError(err) {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n}", "func AssertASNoHandleTxAckRequest() Assertion {\n\treturn func(assert *require.Assertions, ts *IntegrationTestSuite) {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\tselect {\n\t\tcase <-ts.ASClient.HandleTxAckChan:\n\t\t\tassert.Fail(\"unexpected tx ack request\")\n\t\tdefault:\n\t\t}\n\t}\n}", "func Test_CanSign_NilInput(t *testing.T) {\n\n\t// prepare input\n\tvar transactionInput *TransactionInput\n\tvar unspentTransactions []*UnspentTransactionOutput\n\tvar publicKey = \"\"\n\n\t// call can sign\n\tresult := CanSign(unspentTransactions, transactionInput, publicKey)\n\n\t// result should false\n\tif result {\n\t\tt.Errorf(\"result of nil transaction should be false.\")\n\t}\n}", "func TestNextEventCancel(t *testing.T) {\n\tmaybeSkipIntegrationTest(t)\n\n\ts, err := NewSession()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\tif err := s.Subscribe(\"ike-updown\"); err != nil {\n\t\tt.Fatalf(\"Failed to start event listener: %v\", err)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)\n\tdefer cancel()\n\n\t_, err = s.NextEvent(ctx)\n\tif err != ctx.Err() {\n\t\tt.Fatalf(\"Expected to get context's timeout error after not receiving event, got: %v\", err)\n\t}\n}", "func TestBearerEmpty(t *testing.T) {\n\tdb.InitDB()\n\tvar router *gin.Engine = routes.SetupRouter()\n\n\tvar user models.UserCreate = utils.CreateUser(\"Tom\", \"qwerty1234\", t, router)\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\n\tvar url string = \"/v1/user/\" + strconv.Itoa(user.ID)\n\tvar bearer = \"Bearer \"\n\trecord := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"GET\", url, nil)\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", bearer)\n\n\trouter.ServeHTTP(record, request)\n\n\tvar message Message\n\terr := json.Unmarshal([]byte(record.Body.String()), &message)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad output: \", err.Error())\n\t\tt.Fail()\n\t}\n\n\tassert.Equal(t, record.Code, 403)\n\tassert.Equal(t, message.Message, \"Bad token\")\n\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\tutils.CleanUser(user.ID, user.Token, t, router)\n\tdb.CloseDB()\n}", "func NewZeroEvent(x, y float64) Event {\n\treturn NewEvent(x, y, ButtonNone, \"\")\n}", "func TestBadToken(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmockedTpmProvider := new(tpmprovider.MockedTpmProvider)\n\tmockedTpmProvider.On(\"Close\").Return(nil)\n\tmockedTpmProvider.On(\"NvIndexExists\", mock.Anything).Return(true, nil)\n\tmockedTpmProvider.On(\"NvRelease\", mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvDefine\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvWrite\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\n\tmockedTpmFactory := tpmprovider.MockedTpmFactory{TpmProvider: mockedTpmProvider}\n\n\ttrustAgentService, err := CreateTrustAgentService(CreateTestConfig(), mockedTpmFactory)\n\tassert.NoError(err)\n\n\t// setup TA service to use JWT-based authentication\n\ttrustAgentService.router = mux.NewRouter()\n\ttrustAgentService.router.Use(middleware.NewTokenAuth(\"../test/mockJWTDir\", \"../test/mockCACertsDir\", mockRetrieveJWTSigningCerts, cacheTime))\n\ttrustAgentService.router.HandleFunc(\"/v2/tag\", errorHandler(requiresPermission(setAssetTag(CreateTestConfig(), mockedTpmFactory), []string{postDeployTagPerm}))).Methods(\"POST\")\n\n\tjsonString := `{\"tag\" : \"tHgfRQED1+pYgEZpq3dZC9ONmBCZKdx10LErTZs1k/k=\", \"hardware_uuid\" : \"7a569dad-2d82-49e4-9156-069b0065b262\"}`\n\n\trequest, err := http.NewRequest(\"POST\", \"/v2/tag\", bytes.NewBuffer([]byte(jsonString)))\n\tassert.NoError(err)\n\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", \"Bearer \"+TestBadJWTAuthToken)\n\n\trecorder := httptest.NewRecorder()\n\ttrustAgentService.router.ServeHTTP(recorder, request)\n\tresponse := recorder.Result()\n\tfmt.Printf(\"StatusCode: %d\\n\", response.StatusCode)\n\tassert.Equal(http.StatusUnauthorized, response.StatusCode)\n}", "func TestNatsAdaptorOnWhenConnectedWithAuth(t *testing.T) {\n\tt.Skip(\"TODO: implement this test without requiring actual server connection\")\n\ta := NewAdaptorWithAuth(\"localhost:4222\", 9999, \"test\", \"testwd\")\n\ta.Connect()\n\tgobottest.Assert(t, a.On(\"hola\", func(msg Message) {\n\t\tfmt.Println(\"hola\")\n\t}), true)\n}", "func TokenNoSession(token string) (string, error) {\n\tclient := gorequest.New().Get(tokenUrlWithNoSession()).\n\t\tAppendHeader(\"Authorization\", \"Bearer \"+token).\n\t\tTimeout(HttpTimeout * time.Second). //.\n\t\tQuery(\"from=opsportal\")\n\n\tresp, body, ierrors := client.End()\n\tif len(ierrors) != 0 {\n\t\treturn \"\", ierrors[0]\n\t}\n\n\tif !HttpOK(resp.StatusCode) {\n\t\treturn \"\", errors.Errorf(\"http code:%d body:%s\", resp.StatusCode, body)\n\t}\n\n\tvar lg TpaasTokenResp\n\terr := json.Unmarshal([]byte(body), &lg)\n\tif err != nil {\n\t\tfmt.Printf(\"jsnbody:%s\", body)\n\t\treturn \"\", errors.WithMessage(err, \"token verification response from tpaas is not json\")\n\t}\n\n\tif !HttpOK(lg.Code) {\n\t\treturn \"\", errors.Errorf(\"tpaas code:%d body:%s\", lg.Code, body)\n\t}\n\n\treturn lg.Data, nil\n}", "func TestTokenExpiracy(t *testing.T) {\n\tdb.InitDB()\n\tvar router *gin.Engine = routes.SetupRouter()\n\n\tos.Setenv(\"TOKEN_VALIDITY_MINUTES\", \"0\")\n\n\tvar user models.UserCreate = utils.CreateUser(\"Tom\", \"qwerty1234\", t, router)\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\ttime.Sleep(1 * time.Second)\n\n\tvar url string = \"/v1/user/\" + strconv.Itoa(user.ID)\n\tvar bearer = \"Bearer \" + user.Token\n\trecord := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"GET\", url, nil)\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", bearer)\n\n\trouter.ServeHTTP(record, request)\n\n\tvar message Message\n\terr := json.Unmarshal([]byte(record.Body.String()), &message)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad output: \", err.Error())\n\t\tt.Fail()\n\t}\n\n\tassert.Equal(t, record.Code, 401)\n\tassert.Equal(t, message.Message, \"Token expired.\")\n\n\tos.Setenv(\"TOKEN_VALIDITY_MINUTES\", \"15\")\n\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\n\tutils.CleanUser(user.ID, user.Token, t, router)\n\tdb.CloseDB()\n}", "func sendEvent(client runner.RunnerClient, token string, key string) {\n\tlog.Println(\"sending event:\", key)\n\tif _, err := client.Event(context.Background(), &runner.EventRequest{\n\t\tKey: key,\n\t}); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (p *EventProber) AssertReceivedNone(fromPrefix, toPrefix string) feature.StepFn {\n\treturn func(ctx context.Context, t feature.T) {\n\t\tevents := p.ReceivedBy(ctx, toPrefix)\n\t\tif len(events) > 0 {\n\t\t\tt.Errorf(\"expected %q to not have received any events from %s, actually received %d\",\n\t\t\t\ttoPrefix, fromPrefix, len(events))\n\t\t}\n\t}\n}", "func Test_LogoutValidToken(t *testing.T) {\n\tdir, err := tempConfig(\"\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tmockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(`{\"status_code\": 10007, \"status_text\": \"Resource deleted\"}`))\n\t}))\n\tdefer mockServer.Close()\n\n\tlogoutArgs := logoutArguments{\n\t\tapiEndpoint: mockServer.URL,\n\t\ttoken: \"test-token\",\n\t}\n\n\terr = logout(logoutArgs)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}", "func TestClearKeyEncodeSucceedIfNilId(t *testing.T) {\n\tmockClearKey := MakeClearKeyDecoded().SetNilContent().RandomizeValidValue().Get()\n\n\t_, err := mockClearKey.Encode()\n\n\tif err != nil {\n\t\tt.Errorf(\"this should not fail as id is nil\")\n\t}\n}", "func TestUserTokenPingSuccess(t *testing.T) {\n\tdb := setupDB()\n\tdefer db.Close()\n\trouter := setupRouter()\n\n\tw := httptest.NewRecorder()\n\n\treq, _ := http.NewRequest(\"GET\", \"/token/ping\", nil)\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", \"Bearer \" + Token)\n\trouter.ServeHTTP(w, req)\n\n\tassert.Equal(t, http.StatusOK, w.Code)\n\t//\tsomething like {\"claim_id\":\"test001\",\"message\":\"pong\",\"username\":\"test001\"}\n\tassert.Contains(t, w.Body.String(), \"pong\")\n\tassert.Contains(t, w.Body.String(), kTestUserUsername)\n}", "func (s *TrackerSuite) TestStartNewEvent() {\n\n\tevent := s.service.StartNew()\n\tassert.NotEqual(s.T(), nil, event)\n}", "func SimulateIssueToken(k keeper.Keeper, ak authkeeper.AccountKeeper, bk types.BankKeeper) simtypes.Operation {\n\treturn func(\n\t\tr *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,\n\t\taccs []simtypes.Account, chainID string,\n\t) (simtypes.OperationMsg, []simtypes.FutureOperation, error) {\n\n\t\ttoken, maxFees := genToken(ctx, r, k, ak, bk, accs)\n\n\t\tmsg := types.NewMsgIssueToken(token.GetName(), token.GetSymbol(), token.GetSmallestUnit(), token.GetDecimals(), \n\t\t\ttoken.GetInitialSupply(), token.GetTotalSupply(), token.GetMintable(), true, token.GetOwnerString())\n\n\t\tsimAccount, found := simtypes.FindAccount(accs, token.GetOwner())\n\t\tif !found {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), fmt.Sprintf(\"account[%s] does not found\", token.GetOwnerString())), \n\t\t\t\tnil, fmt.Errorf(\"account[%s] does not found\", token.GetOwnerString())\n\t\t}\n\n\t\towner, _ := sdk.AccAddressFromBech32(msg.Owner)\n\t\taccount := ak.GetAccount(ctx, owner)\n\t\tfees, err := simtypes.RandomFees(r, ctx, maxFees)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate fees\"), nil, err\n\t\t}\n\n\t\ttxGen := simappparams.MakeTestEncodingConfig().TxConfig\n\t\ttx, err := helpers.GenTx(\n\t\t\ttxGen,\n\t\t\t[]sdk.Msg{msg},\n\t\t\tfees,\n\t\t\thelpers.DefaultGenTxGas,\n\t\t\tchainID,\n\t\t\t[]uint64{account.GetAccountNumber()},\n\t\t\t[]uint64{account.GetSequence()},\n\t\t\tsimAccount.PrivKey,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate mock tx\"), nil, err\n\t\t}\n\n\t\tif _, _, err = app.Deliver(txGen.TxEncoder(), tx); err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to deliver tx\"), nil, err\n\t\t}\n\n\t\treturn simtypes.NewOperationMsg(msg, true, \"simulate issue token\"), nil, nil\n\t}\n}", "func TestInitToken_Ensure_ExpectedToken_NotExisting(t *testing.T) {\n\tfv := NewFakeVault(t)\n\tdefer fv.Finish()\n\n\tfv.ExpectWrite()\n\n\ti := &InitToken{\n\t\tRole: \"etcd\",\n\t\tPolicies: []string{\"etcd\"},\n\t\tkubernetes: fv.Kubernetes(),\n\t\tExpectedToken: \"expected-token\",\n\t}\n\n\t// expect a new token creation\n\tfv.fakeToken.EXPECT().CreateOrphan(&tokenCreateRequestMatcher{ID: \"expected-token\"}).Return(&vault.Secret{\n\t\tAuth: &vault.SecretAuth{\n\t\t\tClientToken: \"expected-token\",\n\t\t},\n\t}, nil)\n\n\t// expect a read and vault says secret is not existing, then after it is written to return token\n\tinitTokenPath := \"test-cluster-inside/secrets/init_token_etcd\"\n\tgomock.InOrder(\n\t\tfv.fakeLogical.EXPECT().Read(initTokenPath).Return(\n\t\t\tnil,\n\t\t\tnil,\n\t\t).MinTimes(1),\n\t\t// expect a write of the new token from user flag\n\t\tfv.fakeLogical.EXPECT().Write(initTokenPath, map[string]interface{}{\"init_token\": \"expected-token\"}).Return(\n\t\t\tnil,\n\t\t\tnil,\n\t\t),\n\t\t// allow read out of token from user\n\t\tfv.fakeLogical.EXPECT().Read(initTokenPath).AnyTimes().Return(\n\t\t\t&vault.Secret{\n\t\t\t\tData: map[string]interface{}{\"init_token\": \"expected-token\"},\n\t\t\t},\n\t\t\tnil,\n\t\t),\n\t)\n\n\tfv.fakeToken.EXPECT().Lookup(\"expected-token\").Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tfv.fakeToken.EXPECT().Renew(\"expected-token\", 0).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tInitTokenEnsure_EXPECTs(fv)\n\n\terr := i.Ensure()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\ttoken, err := i.InitToken()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\tif exp, act := \"expected-token\", token; exp != act {\n\t\tt.Errorf(\"unexpected token: act=%s exp=%s\", act, exp)\n\t}\n\n\treturn\n}", "func _TestRegisterNodeWithoutPulicKey(t *testing.T) {\n\t_, err := registerNodeSignedCall(\"\", 0, 0, \"virtual\", TESTHOST)\n\tassert.Error(t, err)\n}", "func TestNextEventAfterFailedSubscribe(t *testing.T) {\n\tdctx, dcancel := context.WithCancel(context.Background())\n\tdefer dcancel()\n\n\tconn := mockCharon(dctx)\n\n\ts, err := NewSession(withTestConn(conn))\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\t// This should result in an IO error, and if handled properly within\n\t// the event listener, the error should not be sent on the event channel.\n\ts.el.conn.Close()\n\tif err := s.Subscribe(\"test-event\"); err == nil {\n\t\tt.Fatalf(\"Expected error reading from closed transport\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)\n\tdefer cancel()\n\n\t_, err = s.NextEvent(ctx)\n\tif err != ctx.Err() {\n\t\tt.Fatalf(\"Expected to get timeout error, got: %v\", err)\n\t}\n}", "func (s *TrackerSuite) TestStartNil() {\n\n\tassert.Equal(s.T(), ErrorNil, s.service.Start(nil))\n}", "func TestFetchNull(t *testing.T) {\n\treader := bytes.NewReader([]byte(\"ull\"))\n\tlex := NewLexer(reader)\n\tif err := lex.fetchNull(); err != nil {\n\t\tt.Error(err.Error())\n\t\treturn\n\t}\n\n\texpected := \"null\"\n\ttoken := lex.tokens[0]\n\tif string(token.chars) != expected {\n\t\tt.Errorf(\"unexpected %s, expecting %s\", string(token.chars), expected)\n\t}\n}", "func (s *RingpopOptionsTestSuite) TestClockNil() {\n\trp, err := New(\"test\", Clock(nil))\n\ts.Nil(rp)\n\ts.Error(err)\n}", "func (_Univ2 *Univ2CallerSession) Token0() (common.Address, error) {\n\treturn _Univ2.Contract.Token0(&_Univ2.CallOpts)\n}", "func TestUploadCannotAcquireToken(t *testing.T) {\n\t// prepare uploader parameter\n\tlocalNodeId := uuid.New()\n\tsenderChan := make(chan data.ShareCommand)\n\tsender := NewShareSender(senderChan)\n\n\t// create uploader\n\tmaxUploads := 0\n\tuploader := NewShareUploader(localNodeId, maxUploads, sender)\n\n\t// prepare dirs\n\tdownloadDir, base := prepareDirs(t)\n\tdefer os.RemoveAll(downloadDir)\n\tdefer os.RemoveAll(base)\n\t// prepare shared file\n\tsf := createSharedFile(t, base)\n\tdefer os.Remove(sf.FilePath())\n\n\t// prepare download request of unknown chunk\n\tnodeId := uuid.New().String()\n\tchunkChecksum := sf.LocalChunksChecksums()[0]\n\trequest := data.NewDownloadRequest(sf.FileId(), nodeId, chunkChecksum)\n\n\t// start message reader for deny message\n\tdone := make(chan bool)\n\tgo readDenyUpload(t, done, senderChan, request)\n\n\t// start upload\n\tuploader.Upload(sf, chunkChecksum, nodeId, filepath.Join(downloadDir, sf.FileRelativePath()))\n\n\t// wait for message\n\t<-done\n}", "func TestSendENIStateChangeUnmanaged(t *testing.T) {\n\tmockCtrl := gomock.NewController(t)\n\tdefer mockCtrl.Finish()\n\n\tmockStateManager := mock_dockerstate.NewMockTaskEngineState(mockCtrl)\n\teventChannel := make(chan statechange.Event)\n\tctx := context.TODO()\n\n\tgomock.InOrder(\n\t\tmockStateManager.EXPECT().ENIByMac(randomMAC).Return(nil, false),\n\t)\n\n\twatcher := setupWatcher(ctx, nil, mockStateManager, eventChannel, primaryMAC)\n\n\tassert.Error(t, watcher.sendENIStateChange(randomMAC))\n}", "func (c *Client) MustGetToken() string {\n\ttoken, err := c.GetToken()\n\tif err != nil {\n\t\tfmt.Printf(\"ERROR: Could not get auth token. %s\\n\", err.Error())\n\t}\n\n\treturn token\n}", "func TestOAuthServiceAccountClientEvent(t *testing.T) {\n\n\ttests := map[string]struct {\n\t\tannotationPrefix string\n\t\tannotation string\n\t\texpectedEventReason string\n\t\texpectedEventMsg string\n\t\tnumEvents int\n\t\texpectBadRequest bool\n\t}{\n\t\t\"test-good-url\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationURIPrefix + \"one\",\n\t\t\tannotation: \"/oauthcallback\",\n\t\t\tnumEvents: 0,\n\t\t},\n\t\t\"test-bad-url\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationURIPrefix + \"one\",\n\t\t\tannotation: \"foo:foo\",\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: \"system:serviceaccount:\" + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-url-parse\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationURIPrefix + \"one\",\n\t\t\tannotation: \"::\",\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: \"[parse ::: missing protocol scheme, system:serviceaccount:\" + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-annotation-kind\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: `{\"kind\":\"foo\",\"apiVersion\":\"oauth.openshift.io/v1\",\"metadata\":{\"creationTimestamp\":null},\"reference\":{\"group\":\"foo\",\"kind\":\"Route\",\"name\":\"route1\"}}`,\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `[no kind \"foo\" is registered for version \"oauth.openshift.io/v1\" in scheme \"github.com/openshift/origin/pkg/serviceaccounts/oauthclient/oauthclientregistry.go:54\", system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-type-parse\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: `{asdf\":\"adsf\"}`,\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `[couldn't get version/kind; json parse error: invalid character 'a' looking for beginning of object key string, system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-route-not-found\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: buildRedirectObjectReferenceString(t, \"Route\", \"route1\", \"route.openshift.io\"),\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `[routes.route.openshift.io \"route1\" not found, system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-route-wrong-group\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: buildRedirectObjectReferenceString(t, \"Route\", \"route1\", \"foo\"),\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-reference-kind\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: buildRedirectObjectReferenceString(t, \"foo\", \"route1\", \"route.openshift.io\"),\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t}\n\n\ttestServer, err := setupTestOAuthServer()\n\tif err != nil {\n\t\tt.Fatalf(\"error setting up test server: %s\", err)\n\t}\n\n\tdefer testServer.oauthServer.Close()\n\tdefer testserver.CleanupMasterEtcd(t, testServer.masterConfig)\n\n\tfor tcName, testCase := range tests {\n\t\tvar redirect string = testServer.oauthServer.URL + \"/oauthcallback\"\n\t\tif testCase.numEvents != 0 {\n\t\t\tredirect = testCase.annotation\n\t\t}\n\n\t\tt.Logf(\"%s: annotationPrefix %s, annotation %s\", tcName, testCase.annotationPrefix, testCase.annotation)\n\t\tsa, err := setupTestSA(testServer.clusterAdminKubeClient, testCase.annotationPrefix, redirect)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: error setting up test SA: %s\", tcName, err)\n\t\t}\n\n\t\tsecret, err := setupTestSecrets(testServer.clusterAdminKubeClient, sa)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: error setting up test secrets: %s\", tcName, err)\n\t\t}\n\n\t\trunTestOAuthFlow(t, testServer, sa, secret, redirect, testCase.expectBadRequest)\n\n\t\t// Check events with a short poll to stop flakes\n\t\tvar evList *kapi.EventList\n\t\terr = wait.Poll(time.Second, 5*time.Second, func() (bool, error) {\n\t\t\tevList, err = testServer.clusterAdminKubeClient.Core().Events(projectName).List(metav1.ListOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tif len(evList.Items) < testCase.numEvents {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn true, nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: err polling for events\", tcName)\n\t\t}\n\n\t\tevents := collectEventsWithReason(evList, testCase.expectedEventReason)\n\n\t\tif testCase.numEvents != len(events) {\n\t\t\tt.Fatalf(\"%s: expected %d events, found %d\", tcName, testCase.numEvents, len(events))\n\t\t}\n\n\t\tif testCase.numEvents != 0 && events[0].Message != testCase.expectedEventMsg {\n\t\t\tt.Fatalf(\"%s: expected event message %s, got %s\", tcName, testCase.expectedEventMsg, events[0].Message)\n\t\t}\n\n\t\terr = testServer.clusterAdminKubeClient.Core().Events(projectName).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: error deleting events: %s\", tcName, err)\n\t\t}\n\t}\n}", "func Null(local *data.Local) error {\n\tif local.WS == nil {\n\t\treturn errors.New(\"websocket not connected\")\n\t}\n\n\tchatNull := chatctl.New()\n\tchatNull.Type = chat.ChatCtlType_CTL_TYPE_NULL\n\tchatNull.Userid = local.User\n\n\tdataNull, err := chatNull.Pack()\n\tmsgTrack := uuid.NewV4().String()\n\n\tmsgWrap, err := mMsgwrap.Pack(pbmsgwrap.MsgType_MSG_CHAT_CTL, pbmsgwrap.MsgSec_SEC_SIGN, local.Key, local.Sign, local.Cipher, msgTrack, &dataNull)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttracker := local.WS.AddTrack(msgTrack)\n\n\tif err := local.WS.Write(gwebsock.BinaryMessage, *msgWrap); err != nil {\n\t\treturn err\n\t}\n\n\tmsgData, err := tracker.ReadBlock()\n\tif err != nil {\n\t\treturn err\n\t}\n\t//msgData := <-tracker.Reply\n\n\twsmsg, err := mMsgwrap.Unpack(&msgData.MsgData, local.Sign, local.Cipher)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tchatNullReply := chatctl.New()\n\n\tif err := chatNullReply.Unpack(wsmsg.GetMsgData()); err != nil {\n\t\treturn err\n\t}\n\n\tif chatNullReply.Status != mChat.ChatCtlStatus_CTL_STATUS_OKAY {\n\t\treturn errors.New(\"chat null returned failed status\")\n\t}\n\n\tlocal.Peers[local.WorkerID].Online = true\n\n\treturn nil\n}", "func (fgs *FakeGraphSync) AssertNoPauseReceived(t *testing.T) {\n\trequire.Empty(t, fgs.pauses, \"should not receive pause request\")\n}", "func TestCorrectTokenPasses(t *testing.T) {\n\thand := New(http.HandlerFunc(succHand))\n\thand.SetFailureHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tt.Errorf(\"Test failed. Reason: %v\", Reason(r))\n\t}))\n\n\tserver := httptest.NewServer(hand)\n\tdefer server.Close()\n\n\t// issue the first request to get the token\n\tresp, err := http.Get(server.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcookie := getRespCookie(resp, CookieName)\n\tif cookie == nil {\n\t\tt.Fatal(\"Cookie was not found in the response.\")\n\t}\n\n\tfinalToken := b64encode(maskToken(b64decode(cookie.Value)))\n\n\tvals := [][]string{\n\t\t{\"name\", \"Jolene\"},\n\t\t{FormFieldName, finalToken},\n\t}\n\n\t// Constructing a custom request is suffering\n\treq, err := http.NewRequest(\"POST\", server.URL, formBodyR(vals))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\treq.AddCookie(cookie)\n\n\tresp, err = http.DefaultClient.Do(req)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif resp.StatusCode != 200 {\n\t\tt.Errorf(\"The request should have succeeded, but it didn't. Instead, the code was %d\",\n\t\t\tresp.StatusCode)\n\t}\n}", "func Test_SyncConsumersAndSubscriptions_ForEmptyTypes(t *testing.T) {\n\t// given\n\tcallback := func(m *nats.Msg) {}\n\tsubWithOneType := NewSubscriptionWithEmptyTypes()\n\n\t// when\n\tjs := JetStream{}\n\terr := js.syncConsumerAndSubscription(subWithOneType, callback)\n\n\t// then\n\tassert.NoError(t, err)\n}", "func TestInitToken_Ensure_NoExpectedToken_AlreadyExisting(t *testing.T) {\n\tfv := NewFakeVault(t)\n\tdefer fv.Finish()\n\n\tfv.ExpectWrite()\n\n\ti := &InitToken{\n\t\tRole: \"etcd\",\n\t\tPolicies: []string{\"etcd\"},\n\t\tkubernetes: fv.Kubernetes(),\n\t\tExpectedToken: \"\",\n\t}\n\n\t// expect a read and vault says secret is existing\n\tinitTokenPath := \"test-cluster-inside/secrets/init_token_etcd\"\n\tfv.fakeLogical.EXPECT().Read(initTokenPath).Return(\n\t\t&vault.Secret{\n\t\t\tData: map[string]interface{}{\"init_token\": \"existing-token\"},\n\t\t},\n\t\tnil,\n\t)\n\n\tfv.fakeToken.EXPECT().Lookup(\"existing-token\").Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tfv.fakeToken.EXPECT().Renew(\"existing-token\", 0).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tInitTokenEnsure_EXPECTs(fv)\n\n\terr := i.Ensure()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\ttoken, err := i.InitToken()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\tif exp, act := \"existing-token\", token; exp != act {\n\t\tt.Errorf(\"unexpected token: act=%s exp=%s\", act, exp)\n\t}\n\n\treturn\n}", "func (fgs *FakeGraphSync) AssertNoResumeReceived(t *testing.T) {\n\trequire.Empty(t, fgs.resumes, \"should not receive resume request\")\n}", "func TestEmittingMessage(t *testing.T) {\n\tsink := make(chan bool, 1)\n\tclient := NewClient()\n\n\ttimeout, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)\n\tdefer cancel()\n\n\tclient.Subscribe(Before, func(ctx context.Context, message interface{}) {\n\t\tsink <- true\n\t})\n\n\tclient.Emit(context.Background(), Before, nil)\n\n\tselect {\n\tcase <-timeout.Done():\n\t\tt.Fatal(\"Timeout reached\")\n\tcase <-sink:\n\t}\n}", "func Test_Onu_StateMachine_dhcp_no_auth(t *testing.T) {\n\tt.Skip(\"Needs to be moved in the Service struct\")\n\tonu := createTestOnu()\n\n\tonu.InternalState.SetState(OnuStateEnabled)\n\tassert.Equal(t, onu.InternalState.Current(), OnuStateEnabled)\n\n\terr := onu.InternalState.Event(\"start_dhcp\")\n\tif err == nil {\n\t\tt.Fail()\n\t}\n\tassert.Equal(t, onu.InternalState.Current(), OnuStateEnabled)\n\tassert.Equal(t, err.Error(), \"transition canceled with error: cannot-go-to-dhcp-started-as-authentication-is-required\")\n}", "func TestEnumNull(t *testing.T) {\n\tclient := newQueriesClient(t)\n\tresult, err := client.EnumNull(context.Background(), nil)\n\trequire.NoError(t, err)\n\trequire.Zero(t, result)\n}", "func TestValidateBasicMsgCreateInvalidTokenArgumentGivesError(t *testing.T) {\n\tmessage := newValidMsgCreateBond()\n\tmessage.Token = \"123abc\" // starts with number\n\terr := message.ValidateBasic()\n\trequire.NotNil(t, err)\n\n\tmessage.Token = \"a\" // too short\n\terr = message.ValidateBasic()\n\trequire.NotNil(t, err)\n}", "func (_IUniswapV2Pair *IUniswapV2PairSession) Token0() (common.Address, error) {\r\n\treturn _IUniswapV2Pair.Contract.Token0(&_IUniswapV2Pair.CallOpts)\r\n}", "func (m *MockWebsocketAppInterface) CheckToken(userID int, csrfToken string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CheckToken\", userID, csrfToken)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func Test_GetCommentToken(t *testing.T) {\n\tparser := &Parser{}\n\trequire.Equal(t, \"\", parser.GetCommentToken())\n}", "func (_IUniswapV2Pair *IUniswapV2PairCallerSession) Token0() (common.Address, error) {\r\n\treturn _IUniswapV2Pair.Contract.Token0(&_IUniswapV2Pair.CallOpts)\r\n}", "func TestNilAccept(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\ttpt, err := createTpoolTester(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer tpt.Close()\n\n\terr = tpt.tpool.AcceptTransactionSet(nil)\n\tif err == nil {\n\t\tt.Error(\"no error returned when submitting nothing to the transaction pool\")\n\t}\n\terr = tpt.tpool.AcceptTransactionSet([]types.Transaction{})\n\tif err == nil {\n\t\tt.Error(\"no error returned when submitting nothing to the transaction pool\")\n\t}\n}", "func ParseEmptyToken(c *cli.Context) (*vela.Client, error) {\n\tlogrus.Debug(\"parsing tokenless Vela client from provided configuration\")\n\n\t// capture the address from the context\n\taddress := c.String(internal.FlagAPIAddress)\n\n\t// check if client address is set\n\tif len(address) == 0 {\n\t\treturn nil, fmt.Errorf(\"no client address provided\")\n\t}\n\n\tlogrus.Tracef(\"creating Vela client for %s\", address)\n\n\t// create a vela client from the provided address\n\treturn vela.NewClient(address, nil)\n}", "func TestGetToken(t *testing.T) {\n\tmc := MockClient{t: t}\n\tmc.DoFunc = validDo\n\tmc.GetFunc = validGet\n\tconfig := ClientConfig{\n\t\tScopes: []string{\"thing\"},\n\t\tOktaDomain: \"mockta.local\",\n\t\tHTTPClient: &mc,\n\t}\n\n\tclient, err := NewClient(config)\n\n\tif err != nil {\n\t\tt.Fatalf(\"Failed: %s\", err)\n\t}\n\n\t// Test surge of requests these should all use the same key\n\tresult := testConcurrency(client, 0, 100, t)\n\tif len(result) > 1 {\n\t\tt.Fatalf(\"Concurrency Test 1 Failed: got %d, want 1\\n\", len(result))\n\t}\n\n\t// Test renewals\n\tresult = testConcurrency(client, 1000, 10, t)\n\tif len(result) != 10 {\n\t\tt.Fatalf(\"Concurrency Test 2 Failed: got %d, want 10\\n\", len(result))\n\t}\n}", "func (suite *AuthSuite) TestAuthUnknownServiceMember() {\n\t// Set up: Prepare the session, goth.User, callback handler, http response\n\t// and request, landing URL, and pass them into authorizeUnknownUser\n\n\thandlerConfig := suite.HandlerConfig()\n\tappnames := handlerConfig.AppNames()\n\n\t// Prepare the session and session manager\n\tfakeToken := \"some_token\"\n\tsession := auth.Session{\n\t\tApplicationName: auth.MilApp,\n\t\tIDToken: fakeToken,\n\t\tHostname: appnames.MilServername,\n\t}\n\tsessionManager := handlerConfig.SessionManagers().Mil\n\tmockSender := setUpMockNotificationSender() // We should get an email for this activity\n\n\t// Prepare the goth.User to simulate the UUID and email that login.gov would\n\t// provide\n\tfakeUUID, _ := uuid.NewV4()\n\tuser := goth.User{\n\t\tUserID: fakeUUID.String(),\n\t\tEmail: \"[email protected]\",\n\t}\n\tctx := suite.SetupSessionContext(context.Background(), &session, sessionManager)\n\n\t// Call the function under test\n\tresult := authorizeUnknownUser(ctx, suite.AppContextWithSessionForTest(&session), user,\n\t\tsessionManager, mockSender)\n\tsuite.Equal(authorizationResultAuthorized, result)\n\tmockSender.(*mocks.NotificationSender).AssertNumberOfCalls(suite.T(), \"SendNotification\", 1)\n\n\t// Look up the user and service member in the test DB\n\tfoundUser, _ := models.GetUserFromEmail(suite.DB(), user.Email)\n\tserviceMemberID := session.ServiceMemberID\n\tserviceMember, _ := models.FetchServiceMemberForUser(suite.DB(), &session, serviceMemberID)\n\t// Look up the session token in the session store (this test uses the memory store)\n\tsessionStore := sessionManager.Store()\n\t_, existsBefore, _ := sessionStore.Find(foundUser.CurrentMilSessionID)\n\n\t// Verify service member exists and its ID is populated in the session\n\tsuite.NotEmpty(session.ServiceMemberID)\n\n\t// Verify session contains UserID that points to the newly-created user\n\tsuite.Equal(foundUser.ID, session.UserID)\n\n\t// Verify user's LoginGovEmail and LoginGovUUID match the values passed in\n\tsuite.Equal(user.Email, foundUser.LoginGovEmail)\n\tsuite.Equal(user.UserID, foundUser.LoginGovUUID.String())\n\n\t// Verify that the user's CurrentMilSessionID is not empty. The value is\n\t// generated randomly, so we can't test for a specific string. Any string\n\t// except an empty string is acceptable.\n\tsuite.NotEqual(\"\", foundUser.CurrentMilSessionID)\n\n\t// Verify the session token also exists in the session store\n\tsuite.Equal(true, existsBefore)\n\n\t// Verify the service member that was created is associated with the user\n\t// that was created\n\tsuite.Equal(foundUser.ID, serviceMember.UserID)\n}", "func RegisterToken(username string, token string) error {\n return nil;\n}", "func TestTokenCreateHandler2(t *testing.T) {\n\tapp, trx, down, err := models.NewAppForTest(nil, t)\n\tassert.Nil(t, err)\n\tdefer down(t)\n\ttestDBConn = trx\n\trouter := Routers()\n\n\tw := httptest.NewRecorder()\n\tapi := buildRoute(config.DefaultConfig.HTTP.APIPrefix, \"/token/create\")\n\tbody := fmt.Sprintf(\"appUid=%s&nonce=%s\", app.UID, models.RandomWithMD5(128))\n\tsign := SignStrWithSecret(body, app.Secret)\n\tbody = fmt.Sprintf(\"%s&sign=%s\", body, sign)\n\treq, _ := http.NewRequest(\"POST\", api, strings.NewReader(body))\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\trouter.ServeHTTP(w, req)\n\tassert.Equal(t, http.StatusOK, w.Code)\n\n\tvar response = &Response{}\n\tjson := janitor.ConfigCompatibleWithStandardLibrary\n\tassert.Nil(t, json.Unmarshal(w.Body.Bytes(), response))\n\tassert.True(t, response.Success)\n\n\trespData := response.Data.(map[string]interface{})\n\trespAvailableTimes := respData[\"availableTimes\"].(float64)\n\tassert.Equal(t, -1, int(respAvailableTimes))\n\trespTokenValue := respData[\"token\"].(string)\n\tassert.Equal(t, 32, len(respTokenValue))\n\tassert.Nil(t, respData[\"ip\"])\n\trespReadOnly := respData[\"readOnly\"].(float64)\n\tassert.Equal(t, 0, int(respReadOnly))\n\trespReadPath := respData[\"path\"].(string)\n\tassert.Equal(t, \"/\", respReadPath)\n\tassert.Nil(t, respData[\"expiredAt\"])\n}" ]
[ "0.77936465", "0.7668459", "0.6160332", "0.6125415", "0.5994007", "0.5927848", "0.5891091", "0.56836843", "0.56730366", "0.56381816", "0.5634394", "0.55937874", "0.5510154", "0.54910815", "0.54667795", "0.5448074", "0.5422616", "0.5422521", "0.5380838", "0.535725", "0.53154534", "0.5314575", "0.5302139", "0.5295674", "0.5227538", "0.52138805", "0.5156368", "0.5135609", "0.5115686", "0.5112836", "0.50992", "0.5090915", "0.5088588", "0.50455755", "0.5044237", "0.50286883", "0.5010107", "0.49908218", "0.49841034", "0.49799126", "0.49758202", "0.49702987", "0.49700075", "0.49588296", "0.49119177", "0.48891166", "0.4883503", "0.48820776", "0.48764017", "0.48708737", "0.4864278", "0.48484293", "0.48453045", "0.484006", "0.48399183", "0.48397863", "0.48370537", "0.48368654", "0.48359475", "0.48319077", "0.4830379", "0.48259872", "0.4811245", "0.48071727", "0.47971988", "0.47916123", "0.4788431", "0.47853774", "0.47834256", "0.4778941", "0.4778303", "0.47737902", "0.47732276", "0.47712937", "0.4763022", "0.4761351", "0.47609973", "0.47604778", "0.47485608", "0.47438425", "0.47371343", "0.47338873", "0.4730984", "0.47275275", "0.47044075", "0.46976924", "0.46868846", "0.4686027", "0.4680231", "0.46733057", "0.46697277", "0.46661654", "0.46593717", "0.46372092", "0.4634166", "0.46322605", "0.4630972", "0.46295345", "0.46212032", "0.46205115" ]
0.8175172
0
Test that TokenEvent with an invalid cid causes panic.
func TestServiceTokenEventWithID_WithInvalidCID_CausesPanic(t *testing.T) { runTest(t, func(s *res.Service) { s.Handle("model", res.GetResource(func(r res.GetRequest) { r.NotFound() })) }, func(s *restest.Session) { restest.AssertPanic(t, func() { s.Service().TokenEventWithID("invalid.*.cid", "foo", nil) }) }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestServiceTokenEvent_WithInvalidCID_CausesPanic(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\trestest.AssertPanic(t, func() {\n\t\t\ts.Service().TokenEvent(\"invalid.*.cid\", nil)\n\t\t})\n\t})\n}", "func TestInvalidConsensusChangeSubscription(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\tcst, err := createConsensusSetTester(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cst.Close()\n\n\tms := newMockSubscriber()\n\tbadCCID := modules.ConsensusChangeID{255, 255, 255}\n\terr = cst.cs.ConsensusSetSubscribe(&ms, badCCID, cst.cs.tg.StopChan())\n\tif err != modules.ErrInvalidConsensusChangeID {\n\t\tt.Error(\"consensus set returning the wrong error during an invalid subscription:\", err)\n\t}\n\n\tcst.cs.mu.Lock()\n\tfor i := range cst.cs.subscribers {\n\t\tif cst.cs.subscribers[i] == &ms {\n\t\t\tt.Fatal(\"subscriber was not removed from subscriber list after an erroneus subscription\")\n\t\t}\n\t}\n\tcst.cs.mu.Unlock()\n}", "func TestServiceTokenEventWithID_WithNilToken_SendsNilToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEventWithID(mock.CID, \"foo\", nil)\n\t\ts.GetMsg().AssertTokenEventWithID(mock.CID, \"foo\", nil)\n\t})\n}", "func TestVnicContainer_Invalid(t *testing.T) {\n\tvnic, err := newContainerVnic(\"testcvnic\")\n\n\tif err = vnic.getDevice(); err == nil {\n\t\tt.Errorf(\"Non existent device: %v\", vnic)\n\t}\n\tif !strings.HasPrefix(err.Error(), \"vnic error\") {\n\t\tt.Errorf(\"Invalid error format %v\", err)\n\t}\n\n\tif err = vnic.enable(); err == nil {\n\t\tt.Errorf(\"Non existent device: %v\", vnic)\n\t}\n\tif !strings.HasPrefix(err.Error(), \"vnic error\") {\n\t\tt.Errorf(\"Invalid error format %v\", err)\n\t}\n\n\tif err = vnic.disable(); err == nil {\n\t\tt.Errorf(\"Non existent device: %v\", vnic)\n\t}\n\tif !strings.HasPrefix(err.Error(), \"vnic error\") {\n\t\tt.Errorf(\"Invalid error format %v\", err)\n\t}\n\n\tif err = vnic.destroy(); err == nil {\n\t\tt.Errorf(\"Non existent device: %v\", vnic)\n\t}\n\tif !strings.HasPrefix(err.Error(), \"vnic error\") {\n\t\tt.Errorf(\"Invalid error format %v\", err)\n\t}\n\n}", "func TestAuthRequestNilTokenEvent(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.TokenEvent(nil)\n\t\t\tr.OK(nil)\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\treq := s.Auth(\"test.model\", \"method\", nil)\n\t\ts.GetMsg().\n\t\t\tAssertTokenEvent(mock.CID, nil)\n\t\treq.Response().\n\t\t\tAssertResult(nil)\n\t})\n}", "func TestInvalidCollectInterval(t *testing.T) {\n\tensureProcessExit(t, \"TestNoDatadogAPIKey\",\n\t\tfalse, \"invalid duration\",\n\t\t\"C2D_COLLECT_INTERVAL=some_bogus_value\")\n}", "func TestBadToken(t *testing.T) {\n\n\t// Run the command with a bad token value\n\toutput := executeCommand(\"123\")\n\n\t// We should have a subcommand required command and a complete usage dump\n\trequire.NotNil(t, executeError, \"there should have been an error\")\n\trequire.Condition(t, func() bool {\n\t\treturn checkForExpectedSTSCallFailure(executeError)\n\t}, \"Error should have complained about nonexistent credentials file or invalid MFA token length\")\n\n\trequire.Empty(t, output, \"Output for an error condition should have been empty\")\n}", "func TestInvalidToValidSubscription(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\tcst, err := createConsensusSetTester(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cst.Close()\n\n\t// Start by performing a bad subscribe.\n\tms := newMockSubscriber()\n\tbadCCID := modules.ConsensusChangeID{255, 255, 255}\n\terr = cst.cs.ConsensusSetSubscribe(&ms, badCCID, cst.cs.tg.StopChan())\n\tif err != modules.ErrInvalidConsensusChangeID {\n\t\tt.Error(\"consensus set returning the wrong error during an invalid subscription:\", err)\n\t}\n\n\t// Perform a correct subscribe.\n\terr = cst.cs.ConsensusSetSubscribe(&ms, modules.ConsensusChangeBeginning, cst.cs.tg.StopChan())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Mine a block and check that the mock subscriber only got a single\n\t// consensus change.\n\tnumPrevUpdates := len(ms.updates)\n\t_, err = cst.miner.AddBlock()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(ms.updates) != numPrevUpdates+1 {\n\t\tt.Error(\"subscriber received two consensus changes for a single block\")\n\t}\n}", "func TestVnic_Invalid(t *testing.T) {\n\tvnic, err := newVnic(\"testvnic\")\n\n\tif err = vnic.getDevice(); err == nil {\n\t\tt.Errorf(\"Non existent device: %v\", vnic)\n\t}\n\tif !strings.HasPrefix(err.Error(), \"vnic error\") {\n\t\tt.Errorf(\"Invalid error format %v\", err)\n\t}\n\n\tif err = vnic.enable(); err == nil {\n\t\tt.Errorf(\"Non existent device: %v\", vnic)\n\t}\n\tif !strings.HasPrefix(err.Error(), \"vnic error\") {\n\t\tt.Errorf(\"Invalid error format %v\", err)\n\t}\n\n\tif err = vnic.disable(); err == nil {\n\t\tt.Errorf(\"Non existent device: %v\", vnic)\n\t}\n\tif !strings.HasPrefix(err.Error(), \"vnic error\") {\n\t\tt.Errorf(\"Invalid error format %v\", err)\n\t}\n\n\tif err = vnic.destroy(); err == nil {\n\t\tt.Errorf(\"Non existent device: %v\", vnic)\n\t}\n\tif !strings.HasPrefix(err.Error(), \"vnic error\") {\n\t\tt.Errorf(\"Invalid error format %v\", err)\n\t}\n\n}", "func TestConnectInvalidAddr(t *testing.T) {\n\t// connect\n\tctx := createContext(t, time.Second*20)\n\n\t_, errConnect := base.NewMilvusClient(ctx, client.Config{Address: \"aa\"})\n\tcommon.CheckErr(t, errConnect, false, \"context deadline exceeded\")\n}", "func TestServiceTokenEvent_WithNilToken_SendsNilToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEvent(mock.CID, nil)\n\t\ts.GetMsg().AssertTokenEvent(mock.CID, nil)\n\t})\n}", "func contextInvalid(context string) bool {\n\tif context != \".\" && !strings.Contains(context, \"cx-\") {\n\t\tlog.Warn(\"Context is malformed.\", \"context\", context)\n\t\treturn true\n\t}\n\treturn false\n}", "func testBatchCTXInvalidAddenda(t testing.TB) {\n\tmockBatch := NewBatchCTX(mockBatchCTXHeader())\n\tmockBatch.AddEntry(mockCTXEntryDetail())\n\taddenda05 := mockAddenda05()\n\taddenda05.TypeCode = \"63\"\n\tmockBatch.GetEntries()[0].AddAddenda05(addenda05)\n\tmockBatch.Entries[0].AddendaRecordIndicator = 1\n\terr := mockBatch.Create()\n\tif !base.Match(err, ErrAddendaTypeCode) {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t}\n}", "func TestNewIDAllocatorInvalidArgs(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\targs := [][]uint32{\n\t\t{0, 10}, // minID <= 0\n\t\t{2, 0}, // blockSize < 1\n\t}\n\tfor i := range args {\n\t\tif _, err := newIDAllocator(nil, nil, args[i][0], args[i][1], nil); err == nil {\n\t\t\tt.Errorf(\"expect to have error return, but got nil\")\n\t\t}\n\t}\n}", "func CheckTheValidityOfTheToken(token string) (newToken string, err error) {\n\n err = checkInit()\n if err != nil {\n return\n }\n\n err = createError(011)\n\n if v, ok := tokens[token]; ok {\n var expires = v.(map[string]interface{})[\"expires\"].(time.Time)\n var userID = v.(map[string]interface{})[\"id\"].(string)\n\n if expires.Sub(time.Now().Local()) < 0 {\n return\n }\n\n newToken = setToken(userID, token)\n\n err = nil\n\n } else {\n return\n }\n\n return\n}", "func ErrInvalidVin(codespace sdk.CodespaceType) sdk.Error {\n\treturn sdk.NewError(codespace, InvalidVin, InvalidVinMessage)\n}", "func TestAuthResource_WithInvalidRID_CausesPanic(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\trestest.AssertPanicNoRecover(t, func() {\n\t\t\t\tr.Resource(\"test..foo\")\n\t\t\t})\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\ts.Auth(\"test.model\", \"method\", nil).\n\t\t\tResponse().\n\t\t\tAssertErrorCode(res.CodeInternalError)\n\t})\n}", "func (s *SocketModeAdapter) onInvalidAuth(info *adapter.Info) *adapter.ProviderEvent {\n\treturn s.wrapEvent(\n\t\tadapter.EventAuthenticationError,\n\t\tinfo,\n\t\t&adapter.AuthenticationErrorEvent{\n\t\t\tMsg: fmt.Sprintf(\"Connection failed to %s: invalid credentials\", s.provider.Name),\n\t\t},\n\t)\n}", "func checkInvalidTx(t *testing.T, anteHandler sdk.AnteHandler, ctx sdk.Context, tx sdk.Tx, simulate bool, code sdk.CodeType) {\n\tnewCtx, result, abort := anteHandler(ctx, tx, simulate)\n\trequire.True(t, abort)\n\trequire.Equal(t, code, result.Code, fmt.Sprintf(\"Expected %v, got %v\", code, result))\n\trequire.Equal(t, sdk.CodespaceRoot, result.Codespace)\n\n\tif code == sdk.CodeOutOfGas {\n\t\tstdTx, ok := tx.(StdTx)\n\t\trequire.True(t, ok, \"tx must be in form auth.StdTx\")\n\t\t// GasWanted set correctly\n\t\trequire.Equal(t, stdTx.Fee.Gas, result.GasWanted, \"Gas wanted not set correctly\")\n\t\trequire.True(t, result.GasUsed > result.GasWanted, \"GasUsed not greated than GasWanted\")\n\t\t// Check that context is set correctly\n\t\trequire.Equal(t, result.GasUsed, newCtx.GasMeter().GasConsumed(), \"Context not updated correctly\")\n\t}\n}", "func TestInvalidDatadogAPIKey(t *testing.T) {\n\tensureProcessExit(t, \"TestNoDatadogAPIKey\",\n\t\tfalse, \"Invalid Datadog API key\",\n\t\t\"DATADOG_API_KEY=consul2dogstats_bogus_key\")\n}", "func TestWithContractAuthErrors(t *testing.T) {\n\tvar expSTType errors.StackTrace\n\n\targs := []string{mock.Anything}\n\n\ttests := []struct {\n\t\tcRef string\n\t\tc rbac.ContractFunc\n\t\texpSC int32\n\t\texpC int32\n\t\tmsg string\n\t\tcidRoles string\n\t\tcidFound bool\n\t\tcidErr error\n\t}{\n\t\t{\n\t\t\tcRef: contractCreateTransfer,\n\t\t\tc: mockContract,\n\t\t\texpSC: http.StatusUnauthorized,\n\t\t\texpC: rbac.CodeErrAuthentication,\n\t\t\tmsg: \"when an error is returned from the CID\",\n\t\t\tcidRoles: mock.Anything,\n\t\t\tcidFound: false,\n\t\t\tcidErr: errors.New(\"some err from cid\"),\n\t\t},\n\t\t{\n\t\t\tcRef: contractCreateTransfer,\n\t\t\tc: mockContract,\n\t\t\texpSC: http.StatusForbidden,\n\t\t\texpC: rbac.CodeErrRoles,\n\t\t\tmsg: \"when the roleAttr is not found in the identity\",\n\t\t\tcidRoles: mock.Anything,\n\t\t\tcidFound: false,\n\t\t\tcidErr: nil,\n\t\t},\n\t\t{\n\t\t\tcRef: contractCreateTransfer,\n\t\t\tc: mockContract,\n\t\t\texpSC: http.StatusForbidden,\n\t\t\texpC: rbac.CodeErrContract,\n\t\t\tmsg: \"when the role is not found in the permissions map\",\n\t\t\tcidRoles: \"anUnknownRole\",\n\t\t\tcidFound: true,\n\t\t\tcidErr: nil,\n\t\t},\n\t\t{\n\t\t\tcRef: contractCreateTransfer,\n\t\t\tc: mockContract,\n\t\t\texpSC: http.StatusForbidden,\n\t\t\texpC: rbac.CodeErrContract,\n\t\t\tmsg: \"when contract invocation is not allowed\",\n\t\t\tcidRoles: \"user\",\n\t\t\tcidFound: true,\n\t\t\tcidErr: nil,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tstub := initEmptyStub()\n\t\tcid := new(mockCID)\n\t\tcid.On(\"GetAttributeValue\", mock.Anything).Return(tt.cidRoles, tt.cidFound, tt.cidErr)\n\t\tcid.On(\"GetID\", mock.Anything).Return(mock.Anything)\n\n\t\tappAuth, err := rbac.New(stub, cid, getRolePerms(), \"roles\")\n\t\t// If the New constructor didn't fail\n\t\tif err == nil {\n\t\t\t_, err = appAuth.WithContractAuth(tt.cRef, args, tt.c)\n\t\t}\n\n\t\tassert.Implements(t, (*error)(nil), err)\n\t\tassert.Implements(t, (*rbac.AuthErrorInterface)(nil), err)\n\t\tassert.IsType(t, (string)(\"\"), err.Error())\n\n\t\tif assert.Error(t, err) {\n\t\t\tt.Logf(\"Should return an error with code %v and HTTP status code %v %v\\nmsg: %v\", tt.expC, tt.expSC, tt.msg, err)\n\n\t\t\tif e, ok := err.(rbac.AuthErrorInterface); ok {\n\t\t\t\tassert.Equal(t, tt.expC, e.Code())\n\t\t\t\tassert.Equal(t, tt.expSC, e.StatusCode())\n\t\t\t\tassert.IsType(t, expSTType, e.StackTrace())\n\t\t\t}\n\t\t}\n\t}\n}", "func TestBadKVDef(t *testing.T) {\n\tinput := \"badentry\"\n\tbr := bufio.NewReader(strings.NewReader(input))\n\tp := newParser(br)\n\t_, err := p.NextValue()\n\tif err.(*ParseError).Code() != ErrInvalidEntry {\n\t\tt.Fatalf(\"expected err=ErrInvalidEntry actual=%s\", err)\n\t}\n}", "func checkInvalidTx(t *testing.T, anteHandler sdk.AnteHandler, ctx sdk.Context, tx sdk.Tx, simulate bool, code sdk.CodeType) {\n\t_, result, abort := anteHandler(ctx, tx, simulate)\n\trequire.True(t, abort, \"abort, expected: true, got: false\")\n\n\trequire.Equal(t, code, result.Code, fmt.Sprintf(\"Expected %v, got %v\", code, result))\n\trequire.Equal(t, sdk.CodespaceRoot, result.Codespace, \"code not match\")\n\n\t// if code == sdk.CodeOutOfGas {\n\t// stdTx, ok := tx.(auth.StdTx)\n\t// require.True(t, ok, \"tx must be in form auth.StdTx\")\n\t// GasWanted set correctly\n\t// require.Equal(t, stdTx.Fee.GasWanted, result.GasWanted, \"Gas wanted not set correctly\")\n\t// require.True(t, result.GasUsed > result.GasWanted, \"GasUsed not greated than GasWanted\")\n\t// Check that context is set correctly\n\t// require.Equal(t, result.GasUsed, newCtx.GasMeter().GasConsumed(), \"Context not updated correctly\")\n\t// }\n}", "func TestClientAuthInvalidPublickey(t *testing.T) {\n\tkc := new(keychain)\n\tkc.keys = append(kc.keys, dsakey)\n\tconfig := &ClientConfig{\n\t\tUser: \"testuser\",\n\t\tAuth: []ClientAuth{\n\t\t\tClientAuthKeyring(kc),\n\t\t},\n\t}\n\n\tc, err := Dial(\"tcp\", newMockAuthServer(t), config)\n\tif err == nil {\n\t\tc.Close()\n\t\tt.Fatalf(\"dsa private key should not have authenticated with rsa public key\")\n\t}\n}", "func TestAgentFailsRequestWithoutToken(t *testing.T) {\n\tif *skip {\n\t\tt.Skip(\"Test is skipped until Citadel agent is setup in test.\")\n\t}\n\tclient, err := sdsc.NewClient(sdsc.ClientOptions{\n\t\tServerAddress: *sdsUdsPath,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"failed to create sds client\")\n\t}\n\tclient.Start()\n\tdefer client.Stop()\n\tclient.Send()\n\terrmsg := \"no credential token\"\n\t_, err = client.WaitForUpdate(3 * time.Second)\n\tif err == nil || strings.Contains(err.Error(), errmsg) {\n\t\tt.Errorf(\"got [%v], want error with substring [%v]\", err, errmsg)\n\t}\n}", "func (err BadCrtcError) BadId() uint32 {\n\treturn 0\n}", "func testValidateEDTransactionCode(t testing.TB) {\n\ted := mockEntryDetail()\n\ted.TransactionCode = 63\n\terr := ed.Validate()\n\tif !base.Match(err, ErrTransactionCode) {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t}\n}", "func TestCorruptedTokenLogin(t *testing.T) {\n\t// @todo this test is disabled now because it was\n\t// built on internal assumptions that no longer hold and not so easy to access anymore\n\t// TrySuite(t, testCorruptedLogin, retryCount)\n}", "func TestConsulStateDriverInitInvalidConfig(t *testing.T) {\n\tdriver := &ConsulStateDriver{}\n\tcommonTestStateDriverInitInvalidConfig(t, driver)\n}", "func TestInvalidClient(t *testing.T) {\n\tassert := assert.New(t)\n\n\tw := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t\tEvents: []string{\"iot\"},\n\t}\n\tw.Config.URL = \"http://localhost:9999/foo\"\n\tw.Config.ContentType = \"application/json\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t\tLogger: getLogger(),\n\t}.New()\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n}", "func TestProcessTokenWithBadSignNew(t *testing.T) {\n\tconfig.SetConfigurationFromFile(\"../../../config/config-test.json\")\n\n\t// build the information of the course to be created (in a simplified way)\n\tjsonBody := simplejson.New()\n\tjsonBody.Set(\"name\", \"corso\")\n\n\t// generate a token to be appended to the course creation request\n\tuser := microservice.User{Name: \"nome\", Surname: \"cognome\", Username: \"username\", Password: \"password\", Type: \"teacher\", Mail: \"[email protected]\"}\n\ttoken, _ := microservice.GenerateAccessToken(user, []byte(\"wrong-signing-key\"))\n\n\t// make the POST request for the course creation\n\trequestBody, _ := jsonBody.MarshalJSON()\n\trequest, _ := http.NewRequest(http.MethodPost, \"/didattica-mobile/api/v1.0/courses\", bytes.NewBuffer(requestBody))\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\trequest.AddCookie(&http.Cookie{Name: \"token\", Value: token})\n\n\tresponse := httptest.NewRecorder()\n\thandler := createTestGatewayCreateCourse()\n\t// Goroutines represent the micro-services listening to the requests coming from the api gateway\n\tgo mock.LaunchCourseManagementMock()\n\tgo mock.LaunchNotificationManagementMock()\n\t// simulates a request-response interaction between client and api gateway\n\thandler.ServeHTTP(response, request)\n\n\tif response.Code != http.StatusUnauthorized {\n\t\tt.Error(\"Expected 401 Unauthorized but got \" + strconv.Itoa(response.Code) + \" \" + http.StatusText(response.Code))\n\t}\n}", "func TestNewMovieErrorUuid(t *testing.T) {\n\tt.Helper()\n\n\tu := newValidUser()\n\twantError := errs.E(errs.Validation, errs.Parameter(\"ID\"), errors.New(errs.MissingField(\"ID\").Error()))\n\tif gotMovie, gotError := movie.NewMovie(uuid.UUID{}, \"randomExternalId\", u); !reflect.DeepEqual(wantError.Error(), gotError.Error()) && gotMovie != nil {\n\t\tt.Errorf(\"Want: %v\\nGot: %v\", wantError, gotError)\n\t}\n}", "func ErrRegisterExpiredEvent(unixTime int64) sdk.Error {\n\treturn types.NewError(types.CodeRegisterExpiredEvent, fmt.Sprintf(\"register event at expired time %v\", unixTime))\n}", "func TestBatchCTXInvalidAddenda(t *testing.T) {\n\ttestBatchCTXInvalidAddenda(t)\n}", "func TestInvalidAddress(t *testing.T) {\n\tt.Run(\"FetchChain\", func(t *testing.T) {\n\t\t_, err := FetchChain(\"iamateapot:418\")\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected failure when calling with an invalid address, err is nil\")\n\t\t}\n\t})\n\n\tt.Run(\"Expired\", func(t *testing.T) {\n\t\t_, err := Expired(\"iamateapot:418\")\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected failure when calling with an invalid address, err is nil\")\n\t\t}\n\t})\n\n\tt.Run(\"ExiresWithinDays\", func(t *testing.T) {\n\t\t_, err := ExpiresWithinDays(\"iamateapot:418\", 30)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected failure when calling with an invalid address, err is nil\")\n\t\t}\n\t})\n\n\tt.Run(\"ExpiresBeforeDate\", func(t *testing.T) {\n\t\t_, err := ExpiresBeforeDate(\"iamateapot:418\", time.Now())\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected failure when calling with an invalid address, err is nil\")\n\t\t}\n\t})\n}", "func TestInvalidJSON(t *testing.T) {\n\tjson := `{\"action\":jump\", \"time\":100}`\n\t_, err := ParseEventJSON(json)\n\tif err == nil {\n\t\tt.Errorf(\"JSON parsing of %v should have generated error, but didn't\", json)\n\t}\n}", "func TestAuthRequestTokenEvent(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.TokenEvent(mock.Token)\n\t\t\tr.OK(nil)\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\treq := s.Auth(\"test.model\", \"method\", nil)\n\t\ts.GetMsg().\n\t\t\tAssertTokenEvent(mock.CID, mock.Token)\n\t\treq.Response().\n\t\t\tAssertResult(nil)\n\t})\n}", "func TestMalformedEvent(t *testing.T) {\n\ttestCases := []string{\n\t\t\"\",\n\t\t\"HTTP/1.1 200 OK\",\n\t\t\" \",\n\t\t\"\\x00\",\n\t}\n\n\tfor i, testCase := range testCases {\n\t\t_, kw, body := splitEvent(testCase)\n\t\tevent := upgradeEvent(kw, body)\n\n\t\tvar malformed MalformedEvent\n\t\tvar ok bool\n\t\tif malformed, ok = event.(MalformedEvent); !ok {\n\t\t\tt.Errorf(\"test %d got %T; want %T\", i, event, malformed)\n\t\t\tcontinue\n\t\t}\n\n\t\twantString := fmt.Sprintf(\"Malformed Event %q\", testCase)\n\t\tif gotString := malformed.String(); gotString != wantString {\n\t\t\tt.Errorf(\"test %d String returned %q; want %q\", i, gotString, wantString)\n\t\t}\n\t}\n}", "func TestGetUserIDInvalid(t *testing.T) {\n\tts := initAPITestServer(t)\n\tdefer test.CloseServer(ts)\n\n\tinvalidUsername := \"not_\" + username\n\tid, err := GetUserID(invalidUsername)\n\tif err == nil || err.Error() != \"Username not found\" {\n\t\tt.Fatalf(\"Expected error\")\n\t}\n\tif id != \"\" {\n\t\tt.Fatalf(\"Expected empty userID\")\n\t}\n}", "func testBatchCTXInvalidBuild(t testing.TB) {\n\tmockBatch := mockBatchCTX(t)\n\tmockBatch.GetHeader().ServiceClassCode = 3\n\terr := mockBatch.Create()\n\tif !base.Match(err, ErrServiceClass) {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t}\n}", "func TestEditWithInvalidToken(t *testing.T) {\n\tcrud := moc.NewLoadedCRUD()\n\thandler := createGqlHandler(crud)\n\tassert := assert.New(t)\n\n\t// prepare query\n\tquery := fmt.Sprintf(`\n\t\tmutation{\n\t\t\tedit(token: \"%s\"){\n\t\t\t\t... on Editor{}\n\t\t\t}\n\t\t}\n\t`, \"bad_token\")\n\n\t// request\n\tresponse, err := gqlRequestAndRespond(handler, query, nil)\n\tfailOnError(assert, err)\n\tassert.Contains(response, \"errors\", msgNoError)\n}", "func TestTokenExpiracy(t *testing.T) {\n\tdb.InitDB()\n\tvar router *gin.Engine = routes.SetupRouter()\n\n\tos.Setenv(\"TOKEN_VALIDITY_MINUTES\", \"0\")\n\n\tvar user models.UserCreate = utils.CreateUser(\"Tom\", \"qwerty1234\", t, router)\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\ttime.Sleep(1 * time.Second)\n\n\tvar url string = \"/v1/user/\" + strconv.Itoa(user.ID)\n\tvar bearer = \"Bearer \" + user.Token\n\trecord := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"GET\", url, nil)\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", bearer)\n\n\trouter.ServeHTTP(record, request)\n\n\tvar message Message\n\terr := json.Unmarshal([]byte(record.Body.String()), &message)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad output: \", err.Error())\n\t\tt.Fail()\n\t}\n\n\tassert.Equal(t, record.Code, 401)\n\tassert.Equal(t, message.Message, \"Token expired.\")\n\n\tos.Setenv(\"TOKEN_VALIDITY_MINUTES\", \"15\")\n\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\n\tutils.CleanUser(user.ID, user.Token, t, router)\n\tdb.CloseDB()\n}", "func TestValidateBasicMsgCreateInvalidTokenArgumentGivesError(t *testing.T) {\n\tmessage := newValidMsgCreateBond()\n\tmessage.Token = \"123abc\" // starts with number\n\terr := message.ValidateBasic()\n\trequire.NotNil(t, err)\n\n\tmessage.Token = \"a\" // too short\n\terr = message.ValidateBasic()\n\trequire.NotNil(t, err)\n}", "func verifyCtr() error {\n\tif CtrdClient == nil {\n\t\treturn fmt.Errorf(\"verifyCtr: Container client is nil\")\n\t}\n\n\tif ctrdCtx == nil {\n\t\treturn fmt.Errorf(\"verifyCtr: Container context is nil\")\n\t}\n\treturn nil\n}", "func TestValidEvents(t *testing.T) {\n\ttestCases := []struct {\n\t\tevents []string\n\t\terrCode APIErrorCode\n\t}{\n\t\t// Return error for unknown event element.\n\t\t{\n\t\t\tevents: []string{\n\t\t\t\t\"s3:UnknownAPI\",\n\t\t\t},\n\t\t\terrCode: ErrEventNotification,\n\t\t},\n\t\t// Return success for supported event.\n\t\t{\n\t\t\tevents: []string{\n\t\t\t\t\"s3:ObjectCreated:Put\",\n\t\t\t},\n\t\t\terrCode: ErrNone,\n\t\t},\n\t\t// Return success for supported events.\n\t\t{\n\t\t\tevents: []string{\n\t\t\t\t\"s3:ObjectCreated:*\",\n\t\t\t\t\"s3:ObjectRemoved:*\",\n\t\t\t},\n\t\t\terrCode: ErrNone,\n\t\t},\n\t\t// Return error for empty event list.\n\t\t{\n\t\t\tevents: []string{\"\"},\n\t\t\terrCode: ErrEventNotification,\n\t\t},\n\t}\n\n\tfor i, testCase := range testCases {\n\t\terrCode := checkEvents(testCase.events)\n\t\tif testCase.errCode != errCode {\n\t\t\tt.Errorf(\"Test %d: Expected \\\"%d\\\", got \\\"%d\\\"\", i+1, testCase.errCode, errCode)\n\t\t}\n\t}\n}", "func AssertValidKey(key []byte) {\n\tif key == nil {\n\t\tpanic(\"key is nil\")\n\t}\n}", "func TestInvalidEvents(t *testing.T) {\n\tassert := assert.New(t)\n\n\tw := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t}\n\tw.Config.URL = \"http://localhost:9999/foo\"\n\tw.Config.ContentType = \"application/json\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tClient: &http.Client{},\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t\tLogger: getLogger(),\n\t}.New()\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n\n\tw2 := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t\tEvents: []string{\"iot(.*\"},\n\t}\n\tw2.Config.URL = \"http://localhost:9999/foo\"\n\tw2.Config.ContentType = \"application/json\"\n\n\tobs, err = OutboundSenderFactory{\n\t\tListener: w2,\n\t\tClient: &http.Client{},\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tLogger: getLogger(),\n\t\tProfilerFactory: testServerProfilerFactory,\n\t}.New()\n\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n}", "func (tc *testContext) testInvalidServicesCM(cmName string, expected *servicescm.Data) error {\n\t// Scale down the WMCO deployment to 0\n\tif err := tc.scaleWMCODeployment(0); err != nil {\n\t\treturn err\n\t}\n\t// Delete existing services CM\n\terr := tc.client.K8s.CoreV1().ConfigMaps(wmcoNamespace).Delete(context.TODO(), cmName, meta.DeleteOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Generate and create a service CM with incorrect data\n\tinvalidServicesCM, err := servicescm.Generate(cmName, wmcoNamespace,\n\t\t&servicescm.Data{Services: []servicescm.Service{{Name: \"fakeservice\", Bootstrap: true}},\n\t\t\tFiles: []servicescm.FileInfo{}})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := tc.client.K8s.CoreV1().ConfigMaps(wmcoNamespace).Create(context.TODO(), invalidServicesCM,\n\t\tmeta.CreateOptions{}); err != nil {\n\t\treturn err\n\t}\n\n\t// Restart the operator pod\n\tif err := tc.scaleWMCODeployment(1); err != nil {\n\t\treturn err\n\t}\n\t// Try to retrieve newly created ConfigMap and validate its contents\n\t_, err = tc.waitForValidWindowsServicesConfigMap(cmName, expected)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error waiting for valid ConfigMap %s: %w\", cmName, err)\n\t}\n\treturn nil\n}", "func InvalidID(c *gin.Context, err error) {\n\tc.JSON(http.StatusBadRequest, &Result{\n\t\tMessage: \"Invalid ID\",\n\t\tCode: 1400,\n\t\tError: err.Error(),\n\t})\n}", "func TestBadToken(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmockedTpmProvider := new(tpmprovider.MockedTpmProvider)\n\tmockedTpmProvider.On(\"Close\").Return(nil)\n\tmockedTpmProvider.On(\"NvIndexExists\", mock.Anything).Return(true, nil)\n\tmockedTpmProvider.On(\"NvRelease\", mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvDefine\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvWrite\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\n\tmockedTpmFactory := tpmprovider.MockedTpmFactory{TpmProvider: mockedTpmProvider}\n\n\ttrustAgentService, err := CreateTrustAgentService(CreateTestConfig(), mockedTpmFactory)\n\tassert.NoError(err)\n\n\t// setup TA service to use JWT-based authentication\n\ttrustAgentService.router = mux.NewRouter()\n\ttrustAgentService.router.Use(middleware.NewTokenAuth(\"../test/mockJWTDir\", \"../test/mockCACertsDir\", mockRetrieveJWTSigningCerts, cacheTime))\n\ttrustAgentService.router.HandleFunc(\"/v2/tag\", errorHandler(requiresPermission(setAssetTag(CreateTestConfig(), mockedTpmFactory), []string{postDeployTagPerm}))).Methods(\"POST\")\n\n\tjsonString := `{\"tag\" : \"tHgfRQED1+pYgEZpq3dZC9ONmBCZKdx10LErTZs1k/k=\", \"hardware_uuid\" : \"7a569dad-2d82-49e4-9156-069b0065b262\"}`\n\n\trequest, err := http.NewRequest(\"POST\", \"/v2/tag\", bytes.NewBuffer([]byte(jsonString)))\n\tassert.NoError(err)\n\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", \"Bearer \"+TestBadJWTAuthToken)\n\n\trecorder := httptest.NewRecorder()\n\ttrustAgentService.router.ServeHTTP(recorder, request)\n\tresponse := recorder.Result()\n\tfmt.Printf(\"StatusCode: %d\\n\", response.StatusCode)\n\tassert.Equal(http.StatusUnauthorized, response.StatusCode)\n}", "func (n *NullEventReceiver) EventErrKv(eventName string, err error, kvs map[string]string) error {\n\treturn err\n}", "func TestInvalidDSN(t *testing.T) {\n\tuser, pwd, host := parseDsn(\"\")\n\tstringsEqual(t, \"\", user)\n\tstringsEqual(t, \"\", pwd)\n\tstringsEqual(t, \"\", host)\n}", "func cardReportError(context *Context, err error) {\n\tif context == nil {\n\t\treturn\n\t}\n\tif context.Debug {\n\t\tfmt.Printf(\"*** %s\\n\", err)\n\t}\n\tif IoErrorIsRecoverable {\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\tcontext.reopenRequired = true\n\t}\n}", "func TestNonExistantRequestID(t *testing.T) {\n\ta := wolf.New()\n\n\tvar run bool\n\ta.Get(\"/\", func(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\t\trun = true\n\t\tassert.Equal(t, \"\", GetReqID(ctx))\n\t})\n\n\tvar w http.ResponseWriter = httptest.NewRecorder()\n\tr, err := http.NewRequest(\"GET\", \"/\", nil)\n\tassert.NoError(t, err)\n\ta.ServeHTTP(w, r)\n\n\tassert.True(t, run)\n}", "func noValidTokenTest(t *testing.T, r *http.Request, h http.Handler, auth *mock.Authenticator) {\n\toriginal := auth.AuthenticateFn\n\tauth.AuthenticateFn = authenticateGenerator(false, errors.New(\"An error\"))\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, r)\n\ttest.Equals(t, http.StatusBadRequest, w.Result().StatusCode)\n\tauth.AuthenticateFn = authenticateGenerator(false, nil)\n\tw = httptest.NewRecorder()\n\th.ServeHTTP(w, r)\n\ttest.Equals(t, http.StatusUnauthorized, w.Result().StatusCode)\n\tauth.AuthenticateFn = original\n}", "func TestBatchCTXInvalidBuild(t *testing.T) {\n\ttestBatchCTXInvalidBuild(t)\n}", "func TestNewMovieErrorInvalidUser(t *testing.T) {\n\tt.Helper()\n\n\tu := newInvalidUser()\n\tuid, _ := uuid.NewUUID()\n\n\twantError := errs.E(errs.Validation, errs.Parameter(\"User\"), errors.New(\"User is invalid\"))\n\n\tif gotMovie, gotError := movie.NewMovie(uid, \"externalID\", u); !reflect.DeepEqual(wantError.Error(), gotError.Error()) && gotMovie != nil {\n\t\tt.Errorf(\"Want: %v\\nGot: %v\", wantError, gotError)\n\t}\n}", "func (aio *AsyncIO) verifyEvent(evt event) error {\n\tif evt.obj == nil {\n\t\treturn ErrNilCallback\n\t}\n\tre, ok := aio.running.Get(pointer2string(unsafe.Pointer(evt.obj)))\n\tif !ok {\n\t\treturn ErrUntrackedEventKey\n\t}\n\trevt, ok := re.(*runningEvent)\n\tif !ok {\n\t\treturn ErrInvalidEventPtr\n\t}\n\tif revt.iocb != evt.obj {\n\t\treturn ErrInvalidEventPtr\n\t}\n\t// an error occured with this event, remove the running event and set error code.\n\tif evt.res < 0 {\n\t\treturn aio.freeEvent(revt, evt.obj, lookupErrNo(int(evt.res)))\n\t}\n\t//we have an active event returned and its one we are tracking\n\t//ensure it wrote our entire buffer, res is > 0 at this point\n\tif evt.res > 0 && uint(count(revt.data)) != (uint(evt.res)+revt.wrote) {\n\t\trevt.wrote += uint(evt.res)\n\t\tif err := aio.resubmit(revt); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\trevt.wrote += uint(evt.res)\n\n\treturn aio.freeEvent(revt, evt.obj, nil)\n}", "func TestInvalidInput(t *testing.T) {\n\ttests := []struct {\n\t\tinput string\n\t}{\n\t\t//too short\n\t\t{\"12\"},\n\t\t{\"12/14\"},\n\t\t//bad month\n\t\t{\"13/14/1989\"},\n\t\t//bad day\n\t\t{\"12/32/1989\"},\n\t}\n\tfor _, test := range tests {\n\t\t//Create and assign test input\n\t\tobj := DOB{}\n\t\tobj.DOB = test.input\n\n\t\t//Create buffer to catch validateInput() text so we don't spam up the terminal\n\t\tvar b bytes.Buffer\n\n\t\t//Test if test input incorrect sets obj.Validated == true\n\t\t//Should be false if working correctly\n\t\tobj.validateInput(&b)\n\n\t\t//Bad inputs should not have set obj.Validated == true\n\t\t//FAIL if true\n\t\tif obj.Validated == true {\n\t\t\tt.Errorf(\"validateInput() = have: %s & '%v', want: false \", test.input, obj.Validated)\n\t\t}\n\t}\n}", "func testEDFieldInclusionTransactionCode(t testing.TB) {\n\tentry := mockEntryDetail()\n\tentry.TransactionCode = 0\n\terr := entry.Validate()\n\tif !base.Match(err, ErrConstructor) {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t}\n}", "func mockErr(mockErrOpts *MockErrOptions, n apns.Packet) error {\n\ti := rand.Intn(101-1) + 1\n\tif i < mockErrOpts.fail {\n\t\tif en, isEN := n.(*apns.EnhancedNotification); isEN {\n\t\t\tresp := &apns.ErrorResponse{\n\t\t\t\tStatus: apns.InvalidTokenStatus,\n\t\t\t\tIdentifier: en.Identifier,\n\t\t\t}\n\t\t\treturn resp\n\t\t}\n\t\treturn io.EOF\n\t}\n\treturn nil\n}", "func TestSubscriptionUnsubscribeError(t *testing.T) {\n\tmockTransport := new(mockFScopeTransport)\n\terr := errors.New(\"error\")\n\tmockTransport.On(\"Unsubscribe\").Return(err)\n\tsub := NewFSubscription(\"foo\", mockTransport)\n\tassert.Equal(t, err, sub.Unsubscribe())\n\tmockTransport.AssertExpectations(t)\n}", "func CheckInvalid(tb testing.TB, funcName string, err error) {\n\ttb.Helper()\n\n\tif err != os.ErrInvalid {\n\t\ttb.Errorf(\"%s : want error to be %v, got %v\", funcName, os.ErrInvalid, err)\n\t}\n}", "func TestMsgParseTortureRegBadCt(t *testing.T) {\n\tstr := \"OPTIONS sip:[email protected] SIP/2.0\\r\\n\" +\n\t\t\"Via: SIP/2.0/UDP host4.example.com:5060;branch=z9hG4bKkdju43234\\r\\n\" +\n\t\t\"Max-Forwards: 70\\r\\n\" +\n\t\t\"From: \\\"Bell, Alexander\\\" <sip:[email protected]>;tag=433423\\r\\n\" +\n\t\t\"To: \\\"Watson, Thomas\\\" < sip:[email protected] >\\r\\n\" +\n\t\t\"Call-ID: badaspec.sdf0234n2nds0a099u23h3hnnw009cdkne3\\r\\n\" +\n\t\t\"Accept: application/sdp\\r\\n\" +\n\t\t\"CSeq: 3923239 OPTIONS\\r\\n\" +\n\t\t\"l: 0\\r\\n\\r\\n\"\n\t_, err := MsgParse([]byte(str))\n\tassert.NotNil(t, err)\n\tassert.Contains(t, err.Error(), \"< sip:[email protected] >\")\n}", "func cannotBeCalledFromContracts(ctx isc.SandboxBase) {\n\tcaller := ctx.Caller()\n\tif caller != nil && caller.Kind() == isc.AgentIDKindContract {\n\t\tpanic(vm.ErrIllegalCall)\n\t}\n}", "func TestNewMovieErrorExtlID(t *testing.T) {\n\tt.Helper()\n\n\tu := newValidUser()\n\tuid, _ := uuid.NewUUID()\n\twantError := errs.E(errs.Validation, errs.Parameter(\"ID\"), errors.New(errs.MissingField(\"ID\").Error()))\n\tif gotMovie, gotError := movie.NewMovie(uid, \"\", u); !reflect.DeepEqual(wantError.Error(), gotError.Error()) && gotMovie != nil {\n\t\tt.Errorf(\"Want: %v\\nGot: %v\", wantError, gotError)\n\t}\n}", "func (s *PartitionCsmSuite) TestOfferInvalid(c *C) {\n\ts.kh.SetOffsetValues(group, topic, s.kh.GetOldestOffsets(topic))\n\tpc := Spawn(s.ns, group, topic, partition, s.cfg, s.groupMember, s.msgFetcherF, s.offsetMgrF)\n\tdefer pc.Stop()\n\n\tmsg, ok := <-pc.Messages()\n\tc.Assert(ok, Equals, true)\n\n\t// When\n\tmsg.EventsCh <- consumer.Event{T: consumer.EvOffered, Offset: msg.Offset + 1}\n\tmsg.EventsCh <- consumer.Event{T: consumer.EvOffered, Offset: msg.Offset - 1}\n\n\t// Then\n\tmsg.EventsCh <- consumer.Event{T: consumer.EvOffered, Offset: msg.Offset}\n\tmsg2, ok := <-pc.Messages()\n\tc.Assert(msg2.Offset, Equals, msg.Offset+1)\n\tc.Assert(ok, Equals, true)\n}", "func ErrInvalidVout(codespace sdk.CodespaceType) sdk.Error {\n\treturn sdk.NewError(codespace, InvalidVout, InvalidVoutMessage)\n}", "func IsMockInvalid(cc ContractCall) bool {\n\treturn false\n}", "func TestUploadCannotAcquireToken(t *testing.T) {\n\t// prepare uploader parameter\n\tlocalNodeId := uuid.New()\n\tsenderChan := make(chan data.ShareCommand)\n\tsender := NewShareSender(senderChan)\n\n\t// create uploader\n\tmaxUploads := 0\n\tuploader := NewShareUploader(localNodeId, maxUploads, sender)\n\n\t// prepare dirs\n\tdownloadDir, base := prepareDirs(t)\n\tdefer os.RemoveAll(downloadDir)\n\tdefer os.RemoveAll(base)\n\t// prepare shared file\n\tsf := createSharedFile(t, base)\n\tdefer os.Remove(sf.FilePath())\n\n\t// prepare download request of unknown chunk\n\tnodeId := uuid.New().String()\n\tchunkChecksum := sf.LocalChunksChecksums()[0]\n\trequest := data.NewDownloadRequest(sf.FileId(), nodeId, chunkChecksum)\n\n\t// start message reader for deny message\n\tdone := make(chan bool)\n\tgo readDenyUpload(t, done, senderChan, request)\n\n\t// start upload\n\tuploader.Upload(sf, chunkChecksum, nodeId, filepath.Join(downloadDir, sf.FileRelativePath()))\n\n\t// wait for message\n\t<-done\n}", "func (id InvalidContainerIDError) BadRequest() {}", "func AssertValidKey(key []byte) {\n\tif len(key) == 0 {\n\t\tpanic(\"key is nil or empty\")\n\t}\n\tif len(key) > MaxKeyLength {\n\t\tpanic(\"key is too large\")\n\t}\n}", "func TestInvalidTime(t *testing.T) {\n\ttestCases := []struct {\n\t\th, m, s int\n\t}{\n\t\t{-1, 13, 16},\n\t\t{24, 13, 16},\n\t\t{12, -1, 16},\n\t\t{12, 60, 16},\n\t\t{12, 13, -1},\n\t\t{12, 13, 60},\n\t}\n\tfor _, tc := range testCases {\n\t\tt.Run(fmt.Sprintf(\"input %v\", tc), func(t *testing.T) {\n\n\t\t\tif _, err := NewTime(tc.h, tc.m, tc.s); err == nil {\n\t\t\t\tt.Errorf(\"Expected an error for invalid time %02d:%02d:%02d\", tc.h, tc.m, tc.s)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestInvalidEventRegex(t *testing.T) {\n\n\tassert := assert.New(t)\n\n\tw := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t\tEvents: []string{\"[[:123\"},\n\t}\n\tw.Config.URL = \"http://localhost:9999/foo\"\n\tw.Config.ContentType = \"application/json\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tClient: &http.Client{},\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t\tLogger: getLogger(),\n\t}.New()\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n\n}", "func (s *ServerTestSuite) TestNewServerWithBadSigningKey() {\n\tts := NewServer(\"test-server\", \":9999\", \"9.99.999\", s.info, nil, true, \"\", 37 * time.Minute)\n\tassert.Nil(s.T(), ts)\n}", "func TestInvalidCutOffPeriod(t *testing.T) {\n\n\tassert := assert.New(t)\n\n\ttrans := &transport{}\n\n\tobs, err := simpleSetup(trans, 0*time.Second, nil)\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n}", "func TestFlow_InvalidPacket(t *testing.T) {\n\tinvalidIPpacket := []byte{0xab, 0xbc}\n\n\t_, err := FindFlow(invalidIPpacket)\n\tif err == nil {\n\t\tt.Errorf(\"Unable to detect invalid flow from %v\\n\", invalidIPpacket)\n\t}\n}", "func testBatchXCKInvalidBuild(t testing.TB) {\n\tmockBatch := mockBatchXCK(t)\n\tmockBatch.GetHeader().ServiceClassCode = 3\n\terr := mockBatch.Create()\n\tif !base.Match(err, ErrServiceClass) {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t}\n}", "func TestValidAuth(t *testing.T) {\n\tt.Parallel()\n\ta, err := getAuth()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif !ValidAuth(a) {\n\t\tt.Error(ErrInvalidAuth)\n\t}\n}", "func TestContactAddInvalidData(t *testing.T) {\n\tdb := database.Connect()\n\tu := models.User{\n\t\tEmail: \"[email protected]\",\n\t}\n\tu.Create(db)\n\tut, _ := u.AddToken(db)\n\n\ttype Data struct {\n\t\tID int64\n\t}\n\td := Data{ID: 321}\n\tj, _ := json.Marshal(d)\n\tb := bytes.NewBuffer(j)\n\n\tr, err := http.NewRequest(\"POST\", \"/\", b)\n\tr.Header.Add(\"Content-Type\", \"application/json\")\n\tr.Header.Add(\"X-Access-Token\", ut.Token)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error\", err)\n\t}\n\n\tw := httptest.NewRecorder()\n\tc := SetupWebContext()\n\tContactAdd(c, w, r)\n\tif w.Code != http.StatusBadRequest {\n\t\tt.Errorf(\"%v expected, got %v instead\", http.StatusBadRequest, w.Code)\n\t}\n}", "func verifyCustomToken(t *testing.T, ct, uid string) *auth.Token {\n\tidt, err := signInWithCustomToken(ct)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer deleteUser(uid)\n\n\tvt, err := client.VerifyIDToken(context.Background(), idt)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif vt.UID != uid {\n\t\tt.Errorf(\"UID = %q; want UID = %q\", vt.UID, uid)\n\t}\n\tif vt.Firebase.Tenant != \"\" {\n\t\tt.Errorf(\"Tenant = %q; want = %q\", vt.Firebase.Tenant, \"\")\n\t}\n\treturn vt\n}", "func testInvalidRootCertWithClientAuth(t *testing.T) {\n\tsrv := getServer(rootPort, testdataDir, \"\", 0, t)\n\tsrv = getTLSConfig(srv, \"RequireAndVerifyClientCert\", []string{\"../testdata/root.pem\", \"../testdata/root2.pem\"})\n\n\terr := srv.Start()\n\tif err == nil {\n\t\tt.Error(\"Root2.pem does not exists, server should have failed to start\")\n\t}\n}", "func (e StreamEventsRequest_IdentifierValidationError) Cause() error { return e.cause }", "func (s *EventSuite) TestDeleteEvent_WrongID(c *C) {\n\taccounts := CorrectDeploy(1, 0, 1, 1, 1, true, true)\n\tapplication := accounts[0].Applications[0]\n\tuser := application.Users[0]\n\tevent := user.Events[0]\n\n\trouteName := \"deleteCurrentUserEvent\"\n\troute := getComposedRoute(routeName, event.ID+1)\n\tcode, _, err := runRequest(routeName, route, \"\", signApplicationRequest(application, user, true, true))\n\tc.Assert(err, IsNil)\n\n\tc.Assert(err, IsNil)\n\tc.Assert(code, Equals, http.StatusNotFound)\n}", "func (n *EventReceiver) EventErrKv(eventName string, err error, kvs map[string]string) error {\n\tlogger.Errorf(\"%+v\", err)\n\tlogger.Errorf(\"%s: %+v\", eventName, kvs)\n\treturn err\n}", "func TestRegisterByDeviceFailWithInvalidContentType(t *testing.T) {\n\t// initialize\n\tapiTest.T = t\n\ttestCaseStatusError := []struct {\n\t\tname string\n\t\tparamRequest map[string][]string\n\t}{\n\t\t{\n\t\t\tname: \"invalid content type\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"123e4567-e89b-12d3-a456-123456789123\"},\n\t\t\t},\n\t\t},\n\t}\n\tt.Run(testCaseStatusError[0].name, func(t *testing.T) {\n\t\tresp := sendRequest(testCaseStatusError[0].paramRequest, \"application/x-www-form-urlencoded, test\", apiTest)\n\t\t// check status bad request.\n\t\tcheckStatusCodeResponse(t, resp, http.StatusBadRequest)\n\t\t// check response data.\n\t\tcheckJSONResponeMessage(t, resp, \"Parse request error.\")\n\t\t// check user is not created in user_app table\n\t\tassert.False(t, checkUserExisted(testCaseStatusError[0].paramRequest[\"device_id\"][0]))\n\t})\n}", "func secp256k1GoPanicIllegal(msg *C.char, data unsafe.Pointer) {\n\tpanic(\"illegal argument: \" + C.GoString(msg))\n}", "func TestNextEventAfterFailedSubscribe(t *testing.T) {\n\tdctx, dcancel := context.WithCancel(context.Background())\n\tdefer dcancel()\n\n\tconn := mockCharon(dctx)\n\n\ts, err := NewSession(withTestConn(conn))\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\t// This should result in an IO error, and if handled properly within\n\t// the event listener, the error should not be sent on the event channel.\n\ts.el.conn.Close()\n\tif err := s.Subscribe(\"test-event\"); err == nil {\n\t\tt.Fatalf(\"Expected error reading from closed transport\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)\n\tdefer cancel()\n\n\t_, err = s.NextEvent(ctx)\n\tif err != ctx.Err() {\n\t\tt.Fatalf(\"Expected to get timeout error, got: %v\", err)\n\t}\n}", "func ErrInvalidMarketplaceID(id string) sdk.Error {\r\n\treturn sdk.NewError(\r\n\t\tDefaultCodespace,\r\n\t\tErrorCodeClaimsWithMarketplaceNotFound,\r\n\t\tfmt.Sprintf(\"Invalid marketplace id: %s\", id))\r\n}", "func TestSignContractFailure(t *testing.T) {\n\tsignatureHelper(t, true)\n}", "func TestNextEventCancel(t *testing.T) {\n\tmaybeSkipIntegrationTest(t)\n\n\ts, err := NewSession()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\tif err := s.Subscribe(\"ike-updown\"); err != nil {\n\t\tt.Fatalf(\"Failed to start event listener: %v\", err)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)\n\tdefer cancel()\n\n\t_, err = s.NextEvent(ctx)\n\tif err != ctx.Err() {\n\t\tt.Fatalf(\"Expected to get context's timeout error after not receiving event, got: %v\", err)\n\t}\n}", "func TestToken(t *testing.T) {\n\tkey := []byte(\"26BF237B95964852625A2C27988C3\")\n\tSetSecret(key)\n\tc := NewClaims(1, 15*time.Minute)\n\tc.SetIssuer(\"token_test\")\n\tc.SetSubject(\"test\")\n\ttok, err := c.Token()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tc, err = Decode(tok)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func ERROR_AUTH_TOKEN_INVALID(w http.ResponseWriter) {\n\tbuildForeignError(w, http.StatusForbidden, \"ERROR_AUTH_TOKEN_INVALID\", \"\")\n}", "func ErrCorruptUserID(err error) *influxdb.Error {\n\treturn &influxdb.Error{\n\t\tCode: influxdb.EInvalid,\n\t\tMsg: \"corrupt ID provided\",\n\t\tErr: err,\n\t}\n}", "func TestInvalidFingerprintCausesFailed(t *testing.T) {\n\treport := test.CheckRoutines(t)\n\tdefer report()\n\n\tpcOffer, err := NewPeerConnection(Configuration{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpcAnswer, err := NewPeerConnection(Configuration{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdefer closePairNow(t, pcOffer, pcAnswer)\n\n\tofferChan := make(chan SessionDescription)\n\tpcOffer.OnICECandidate(func(candidate *ICECandidate) {\n\t\tif candidate == nil {\n\t\t\tofferChan <- *pcOffer.PendingLocalDescription()\n\t\t}\n\t})\n\n\tconnectionHasFailed, closeFunc := context.WithCancel(context.Background())\n\tpcAnswer.OnConnectionStateChange(func(connectionState PeerConnectionState) {\n\t\tif connectionState == PeerConnectionStateFailed {\n\t\t\tcloseFunc()\n\t\t}\n\t})\n\n\tif _, err = pcOffer.CreateDataChannel(\"unusedDataChannel\", nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\toffer, err := pcOffer.CreateOffer(nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if err := pcOffer.SetLocalDescription(offer); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tselect {\n\tcase offer := <-offerChan:\n\t\t// Replace with invalid fingerprint\n\t\tre := regexp.MustCompile(`sha-256 (.*?)\\r`)\n\t\toffer.SDP = re.ReplaceAllString(offer.SDP, \"sha-256 AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA\\r\")\n\n\t\tif err := pcAnswer.SetRemoteDescription(offer); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tanswer, err := pcAnswer.CreateAnswer(nil)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif err = pcAnswer.SetLocalDescription(answer); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\terr = pcOffer.SetRemoteDescription(answer)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"timed out waiting to receive offer\")\n\t}\n\n\tselect {\n\tcase <-connectionHasFailed.Done():\n\tcase <-time.After(30 * time.Second):\n\t\tt.Fatal(\"timed out waiting for connection to fail\")\n\t}\n}", "func (mt *Mytoken) Valid() error {\n\tstandardClaims := jwt.StandardClaims{\n\t\tAudience: mt.Audience,\n\t\tExpiresAt: int64(mt.ExpiresAt),\n\t\tId: mt.ID.String(),\n\t\tIssuedAt: int64(mt.IssuedAt),\n\t\tIssuer: mt.Issuer,\n\t\tNotBefore: int64(mt.NotBefore),\n\t\tSubject: mt.Subject,\n\t}\n\tif err := errors.WithStack(standardClaims.Valid()); err != nil {\n\t\treturn err\n\t}\n\tif ok := standardClaims.VerifyIssuer(config.Get().IssuerURL, true); !ok {\n\t\treturn errors.New(\"invalid issuer\")\n\t}\n\tif ok := standardClaims.VerifyAudience(config.Get().IssuerURL, true); !ok {\n\t\treturn errors.New(\"invalid Audience\")\n\t}\n\tif ok := mt.verifyID(); !ok {\n\t\treturn errors.New(\"invalid id\")\n\t}\n\tif ok := mt.verifySubject(); !ok {\n\t\treturn errors.New(\"invalid subject\")\n\t}\n\treturn nil\n}", "func (mt *Mytoken) Valid() error {\n\tstandardClaims := jwt.StandardClaims{\n\t\tAudience: mt.Audience,\n\t\tExpiresAt: int64(mt.ExpiresAt),\n\t\tId: mt.ID.String(),\n\t\tIssuedAt: int64(mt.IssuedAt),\n\t\tIssuer: mt.Issuer,\n\t\tNotBefore: int64(mt.NotBefore),\n\t\tSubject: mt.Subject,\n\t}\n\tif err := errors.WithStack(standardClaims.Valid()); err != nil {\n\t\treturn err\n\t}\n\tif ok := standardClaims.VerifyIssuer(config.Get().IssuerURL, true); !ok {\n\t\treturn errors.New(\"invalid issuer\")\n\t}\n\tif ok := standardClaims.VerifyAudience(config.Get().IssuerURL, true); !ok {\n\t\treturn errors.New(\"invalid Audience\")\n\t}\n\tif ok := mt.verifyID(); !ok {\n\t\treturn errors.New(\"invalid id\")\n\t}\n\tif ok := mt.verifySubject(); !ok {\n\t\treturn errors.New(\"invalid subject\")\n\t}\n\treturn nil\n}", "func ErrAddressNotAuthorised() sdk.Error {\r\n\treturn sdk.NewError(\r\n\t\tDefaultCodespace,\r\n\t\tErrorCodeAddressNotAuthorised,\r\n\t\t\"This address is not authorised to perform this action.\")\r\n}", "func TestGetByEmailInvalid(t *testing.T) {\n\tdb := database.Connect()\n\tu := User{\n\t\tEmail: \"[email protected]\",\n\t}\n\tu.GetByEmail(db)\n\tif u.ID != 0 {\n\t\tt.Errorf(\"Expected no result, got %v\", u)\n\t}\n}", "func (authSvc *AuthService) sessionIdIsValid(sessionId string) bool {\n\t\n\treturn authSvc.validateSessionId(sessionId)\n}" ]
[ "0.8047649", "0.57519215", "0.5298864", "0.5272522", "0.52437335", "0.52140474", "0.5171461", "0.5149031", "0.5140155", "0.5113895", "0.507982", "0.5015881", "0.5013838", "0.50101703", "0.5003114", "0.49705926", "0.49312204", "0.49038598", "0.48926267", "0.48593882", "0.4817481", "0.48161712", "0.4809991", "0.4783413", "0.47555634", "0.47521812", "0.47516456", "0.4743322", "0.4741775", "0.47413108", "0.47236833", "0.47225896", "0.47194484", "0.47111478", "0.46988517", "0.46926653", "0.46877533", "0.4670378", "0.4670263", "0.46655822", "0.4660756", "0.4660487", "0.4658299", "0.46563554", "0.46552292", "0.4654358", "0.46494874", "0.4649131", "0.46454832", "0.46407655", "0.46166286", "0.46166134", "0.46149424", "0.46068364", "0.4588312", "0.45853552", "0.4583361", "0.45777154", "0.45647952", "0.4563109", "0.45621327", "0.45612487", "0.4560855", "0.4560311", "0.45584664", "0.45566335", "0.45502636", "0.45424813", "0.45422265", "0.4538887", "0.45318735", "0.45272648", "0.45263565", "0.45223126", "0.45065093", "0.45044184", "0.44989488", "0.44982877", "0.44971004", "0.4477733", "0.4477414", "0.44719258", "0.4470889", "0.44665232", "0.44589147", "0.44577467", "0.44557813", "0.4444894", "0.44440225", "0.4440934", "0.44352612", "0.44342822", "0.4434196", "0.44333825", "0.4430979", "0.44299215", "0.44299215", "0.4426906", "0.44247782", "0.44214916" ]
0.78260386
1
Test that TokenEvent sends a connection token event.
func TestServiceTokenEventWithID_WithObjectToken_SendsToken(t *testing.T) { runTest(t, func(s *res.Service) { s.Handle("model", res.GetResource(func(r res.GetRequest) { r.NotFound() })) }, func(s *restest.Session) { s.Service().TokenEventWithID(mock.CID, "foo", mock.Token) s.GetMsg().AssertTokenEventWithID(mock.CID, "foo", mock.Token) }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestAuthRequestTokenEvent(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.TokenEvent(mock.Token)\n\t\t\tr.OK(nil)\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\treq := s.Auth(\"test.model\", \"method\", nil)\n\t\ts.GetMsg().\n\t\t\tAssertTokenEvent(mock.CID, mock.Token)\n\t\treq.Response().\n\t\t\tAssertResult(nil)\n\t})\n}", "func TestServiceTokenEvent_WithObjectToken_SendsToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEvent(mock.CID, mock.Token)\n\t\ts.GetMsg().AssertTokenEvent(mock.CID, mock.Token)\n\t})\n}", "func TestServiceTokenEvent_WithNilToken_SendsNilToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEvent(mock.CID, nil)\n\t\ts.GetMsg().AssertTokenEvent(mock.CID, nil)\n\t})\n}", "func (s *Service) TestToken(ctx context.Context, info *pushmdl.PushInfo, token string) (err error) {\n\tparams := url.Values{}\n\tparams.Add(\"app_id\", strconv.FormatInt(info.APPID, 10))\n\tparams.Add(\"alert_title\", info.Title)\n\tparams.Add(\"alert_body\", info.Summary)\n\tparams.Add(\"token\", token)\n\tparams.Add(\"link_type\", strconv.FormatInt(int64(info.LinkType), 10))\n\tparams.Add(\"link_value\", info.LinkValue)\n\tparams.Add(\"sound\", strconv.Itoa(info.Sound))\n\tparams.Add(\"vibration\", strconv.Itoa(info.Vibration))\n\tparams.Add(\"expire_time\", strconv.FormatInt(int64(info.ExpireTime), 10))\n\tparams.Add(\"image_url\", info.ImageURL)\n\tif err = s.httpClient.Post(ctx, _testTokenURL, \"\", params, nil); err != nil {\n\t\tlog.Error(\"s.TestToken(%+v) error(%v)\", info, err)\n\t}\n\treturn\n}", "func TestMockOnEvent(t *testing.T) {\n\tmockServer := &MockRailsServer{T: t, Behaviour: MockEvent}\n\n\tdialer := wstest.NewDialer(mockServer)\n\tdialer.HandshakeTimeout = time.Second * 2\n\n\tclient := NewClient(fakeEndpoint).WithDialer(dialer)\n\n\tcalled := make(chan struct{})\n\n\tclient.OnEvent(\"AgentChannel\", func(conn *websocket.Conn, payload *Payload, error error) {\n\t\tcalled <- struct{}{}\n\t\treturn\n\t})\n\n\terr := client.Serve()\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treceiveSleepMs(2000, called, t)\n}", "func sendEvent(client runner.RunnerClient, token string, key string) {\n\tlog.Println(\"sending event:\", key)\n\tif _, err := client.Event(context.Background(), &runner.EventRequest{\n\t\tKey: key,\n\t}); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func TestOAUTH2Token(t *testing.T) {\n\tconnection, err := NewConnectionBuilder().\n\t\tURL(\"http://localhost:9100/api\").\n\t\tUsername(\"admin\").\n\t\tPassword(\"password\").\n\t\tBuild()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer connection.Close()\n\tvcr := govcr.NewVCR(\"connection_oauth2\",\n\t\t&govcr.VCRConfig{\n\t\t\tClient: connection.client,\n\t\t\tDisableRecording: true,\n\t\t})\n\t// Replace our HTTPClient with a vcr client wrapping it\n\tconnection.client = vcr.Client\n\tprojectsResource := connection.Projects()\n\n\t// Trigger the auth flow.\n\tgetProjectsRequest := projectsResource.Get()\n\tif len(connection.token) != 0 || len(connection.bearer) != 0 {\n\t\tt.Errorf(\"Connection should have no tokens. token: '%s', bearer: '%s'\",\n\t\t\tconnection.token,\n\t\t\tconnection.bearer)\n\t}\n\t_, err = getProjectsRequest.Send()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(connection.token) != 0 || len(connection.bearer) == 0 {\n\t\tt.Errorf(\"Connection should have only a bearer token. token: '%s', bearer: '%s'\",\n\t\t\tconnection.token,\n\t\t\tconnection.bearer)\n\t}\n}", "func TestServiceTokenEventWithID_WithNilToken_SendsNilToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEventWithID(mock.CID, \"foo\", nil)\n\t\ts.GetMsg().AssertTokenEventWithID(mock.CID, \"foo\", nil)\n\t})\n}", "func TestAuthRequestNilTokenEvent(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.TokenEvent(nil)\n\t\t\tr.OK(nil)\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\treq := s.Auth(\"test.model\", \"method\", nil)\n\t\ts.GetMsg().\n\t\t\tAssertTokenEvent(mock.CID, nil)\n\t\treq.Response().\n\t\t\tAssertResult(nil)\n\t})\n}", "func TestNatsAdaptorOnWhenConnectedWithAuth(t *testing.T) {\n\tt.Skip(\"TODO: implement this test without requiring actual server connection\")\n\ta := NewAdaptorWithAuth(\"localhost:4222\", 9999, \"test\", \"testwd\")\n\ta.Connect()\n\tgobottest.Assert(t, a.On(\"hola\", func(msg Message) {\n\t\tfmt.Println(\"hola\")\n\t}), true)\n}", "func TestToken(t *testing.T) {\n\tkey := []byte(\"26BF237B95964852625A2C27988C3\")\n\tSetSecret(key)\n\tc := NewClaims(1, 15*time.Minute)\n\tc.SetIssuer(\"token_test\")\n\tc.SetSubject(\"test\")\n\ttok, err := c.Token()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tc, err = Decode(tok)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func TestVerifyToken(t *testing.T) {\n t.Errorf(\"No tests written yet for VerifyToken()\")\n}", "func TestMockOnConnect(t *testing.T) {\n\tmockServer := &MockRailsServer{T: t, Behaviour: MockConnect}\n\n\tdialer := wstest.NewDialer(mockServer)\n\tdialer.HandshakeTimeout = time.Second * 2\n\n\tclient := NewClient(fakeEndpoint).WithDialer(dialer)\n\n\tcalled := make(chan struct{})\n\n\tclient.OnConnect(func(conn *websocket.Conn) error {\n\t\tcalled <- struct{}{}\n\t\treturn nil\n\t})\n\terr := client.Serve()\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treceiveSleepMs(2000, called, t)\n}", "func TestUserTokenPingSuccess(t *testing.T) {\n\tdb := setupDB()\n\tdefer db.Close()\n\trouter := setupRouter()\n\n\tw := httptest.NewRecorder()\n\n\treq, _ := http.NewRequest(\"GET\", \"/token/ping\", nil)\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", \"Bearer \" + Token)\n\trouter.ServeHTTP(w, req)\n\n\tassert.Equal(t, http.StatusOK, w.Code)\n\t//\tsomething like {\"claim_id\":\"test001\",\"message\":\"pong\",\"username\":\"test001\"}\n\tassert.Contains(t, w.Body.String(), \"pong\")\n\tassert.Contains(t, w.Body.String(), kTestUserUsername)\n}", "func TestEmittingMessage(t *testing.T) {\n\tsink := make(chan bool, 1)\n\tclient := NewClient()\n\n\ttimeout, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)\n\tdefer cancel()\n\n\tclient.Subscribe(Before, func(ctx context.Context, message interface{}) {\n\t\tsink <- true\n\t})\n\n\tclient.Emit(context.Background(), Before, nil)\n\n\tselect {\n\tcase <-timeout.Done():\n\t\tt.Fatal(\"Timeout reached\")\n\tcase <-sink:\n\t}\n}", "func (*ClientConnectEvent) Op() ws.OpCode { return 12 }", "func TestCorrectTokenPasses(t *testing.T) {\n\thand := New(http.HandlerFunc(succHand))\n\thand.SetFailureHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tt.Errorf(\"Test failed. Reason: %v\", Reason(r))\n\t}))\n\n\tserver := httptest.NewServer(hand)\n\tdefer server.Close()\n\n\t// issue the first request to get the token\n\tresp, err := http.Get(server.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcookie := getRespCookie(resp, CookieName)\n\tif cookie == nil {\n\t\tt.Fatal(\"Cookie was not found in the response.\")\n\t}\n\n\tfinalToken := b64encode(maskToken(b64decode(cookie.Value)))\n\n\tvals := [][]string{\n\t\t{\"name\", \"Jolene\"},\n\t\t{FormFieldName, finalToken},\n\t}\n\n\t// Constructing a custom request is suffering\n\treq, err := http.NewRequest(\"POST\", server.URL, formBodyR(vals))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\treq.AddCookie(cookie)\n\n\tresp, err = http.DefaultClient.Do(req)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif resp.StatusCode != 200 {\n\t\tt.Errorf(\"The request should have succeeded, but it didn't. Instead, the code was %d\",\n\t\t\tresp.StatusCode)\n\t}\n}", "func RegisteringTokenTest(env *models.PhotonEnvReader, allowFail bool) {\n\t// 1. register a not-exist token\n\tcase1 := &APITestCase{\n\t\tCaseName: \"Register a not-exist token\",\n\t\tAllowFail: allowFail,\n\t\tReq: &models.Req{\n\t\t\tAPIName: \"RegisteringOneToken\",\n\t\t\tFullURL: env.RandomNode().Host + \"/api/1/tokens/0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF\",\n\t\t\tMethod: http.MethodPut,\n\t\t\tPayload: \"\",\n\t\t\tTimeout: time.Second * 120,\n\t\t},\n\t\tTargetStatusCode: 409,\n\t}\n\tcase1.Run()\n\t// 2. register a new token\n\tnewTokenAddress := deployNewToken()\n\tcase2 := &APITestCase{\n\t\tCaseName: \"Register a new token\",\n\t\tAllowFail: allowFail,\n\t\tReq: &models.Req{\n\t\t\tAPIName: \"RegisteringOneToken\",\n\t\t\tFullURL: env.RandomNode().Host + \"/api/1/tokens/\" + newTokenAddress,\n\t\t\tMethod: http.MethodPut,\n\t\t\tPayload: \"\",\n\t\t\tTimeout: time.Second * 180,\n\t\t},\n\t\tTargetStatusCode: 200,\n\t}\n\tcase2.Run()\n}", "func SimulateMintToken(k keeper.Keeper, ak types.AccountKeeper, bk types.BankKeeper) simtypes.Operation {\n\treturn func(\n\t\tr *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,\n\t\taccs []simtypes.Account, chainID string,\n\t) (simtypes.OperationMsg, []simtypes.FutureOperation, error) {\n\n\t\ttoken, maxFee := selectToken(ctx, k, ak, bk, true)\n\t\tsimToAccount, _ := simtypes.RandomAcc(r, accs)\n\n\t\tmsg := types.NewMsgMintToken(token.GetSymbol(), token.GetOwnerString(), simToAccount.Address.String(), 100)\n\n\t\townerAccount, found := simtypes.FindAccount(accs, token.GetOwner())\n\t\tif !found {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), fmt.Sprintf(\"account[%s] does not found\", token.GetOwnerString())), \n\t\t\t\tnil, fmt.Errorf(\"account[%s] does not found\", token.GetOwnerString())\n\t\t}\n\n\t\towner, _ := sdk.AccAddressFromBech32(msg.Owner)\n\t\taccount := ak.GetAccount(ctx, owner)\n\t\tfees, err := simtypes.RandomFees(r, ctx, maxFee)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate fees\"), nil, err\n\t\t}\n\n\t\ttxGen := simappparams.MakeTestEncodingConfig().TxConfig\n\t\ttx, err := helpers.GenTx(\n\t\t\ttxGen,\n\t\t\t[]sdk.Msg{msg},\n\t\t\tfees,\n\t\t\thelpers.DefaultGenTxGas,\n\t\t\tchainID,\n\t\t\t[]uint64{account.GetAccountNumber()},\n\t\t\t[]uint64{account.GetSequence()},\n\t\t\townerAccount.PrivKey,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate mock tx\"), nil, err\n\t\t}\n\n\t\tif _, _, err = app.Deliver(txGen.TxEncoder(), tx); err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to deliver tx\"), nil, err\n\t\t}\n\n\t\treturn simtypes.NewOperationMsg(msg, true, \"simulate mint token\"), nil, nil\n\t}\n}", "func TestNatsAdaptorOnWhenConnected(t *testing.T) {\n\tt.Skip(\"TODO: implement this test without requiring actual server connection\")\n\ta := initTestNatsAdaptor()\n\ta.Connect()\n\tgobottest.Assert(t, a.On(\"hola\", func(msg Message) {\n\t\tfmt.Println(\"hola\")\n\t}), true)\n}", "func TestConfigReloadEnableTokenAuthentication(t *testing.T) {\n\tserver, opts, config := runReloadServerWithConfig(t, \"./configs/reload/basic.conf\")\n\tdefer server.Shutdown()\n\n\t// Ensure we can connect as a sanity check.\n\taddr := fmt.Sprintf(\"nats://%s:%d\", opts.Host, opts.Port)\n\tnc, err := nats.Connect(addr)\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating client: %v\", err)\n\t}\n\tdefer nc.Close()\n\tdisconnected := make(chan struct{}, 1)\n\tasyncErr := make(chan error, 1)\n\tnc.SetErrorHandler(func(nc *nats.Conn, sub *nats.Subscription, err error) {\n\t\tasyncErr <- err\n\t})\n\tnc.SetDisconnectHandler(func(*nats.Conn) {\n\t\tdisconnected <- struct{}{}\n\t})\n\n\t// Enable authentication.\n\tchangeCurrentConfigContent(t, config, \"./configs/reload/token_authentication_1.conf\")\n\tif err := server.Reload(); err != nil {\n\t\tt.Fatalf(\"Error reloading config: %v\", err)\n\t}\n\n\t// Ensure connecting fails.\n\tif _, err := nats.Connect(addr); err == nil {\n\t\tt.Fatal(\"Expected connect to fail\")\n\t}\n\n\t// Ensure connecting succeeds when using new credentials.\n\tconn, err := nats.Connect(addr, nats.Token(\"T0pS3cr3t\"))\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating client: %v\", err)\n\t}\n\tconn.Close()\n\n\t// Ensure the previous connection received an authorization error.\n\t// Note that it is possible that client gets EOF and not able to\n\t// process async error, so don't fail if we don't get it.\n\tselect {\n\tcase err := <-asyncErr:\n\t\tif err != nats.ErrAuthorization {\n\t\t\tt.Fatalf(\"Expected ErrAuthorization, got %v\", err)\n\t\t}\n\tcase <-time.After(time.Second):\n\t}\n\n\t// Ensure the previous connection was disconnected.\n\tselect {\n\tcase <-disconnected:\n\tcase <-time.After(2 * time.Second):\n\t\tt.Fatal(\"Expected connection to be disconnected\")\n\t}\n}", "func (m *MockOobService) RegisterMsgEvent(arg0 chan<- service.StateMsg) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RegisterMsgEvent\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (c *NetClient) registerToken(token []byte) {\n\tc.token = token\n\tc.log.Debugf(\"Registered token %s\", c.token)\n}", "func TestWsAuth(t *testing.T) {\n\tt.Parallel()\n\tg.WebsocketURL = geminiWebsocketSandboxEndpoint\n\n\tif !g.Websocket.IsEnabled() &&\n\t\t!g.AuthenticatedWebsocketAPISupport ||\n\t\t!areTestAPIKeysSet() {\n\t\tt.Skip(wshandler.WebsocketNotEnabled)\n\t}\n\tvar dialer websocket.Dialer\n\tgo g.WsHandleData()\n\terr := g.WsSecureSubscribe(&dialer, geminiWsOrderEvents)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\ttimer := time.NewTimer(sharedtestvalues.WebsocketResponseDefaultTimeout)\n\tselect {\n\tcase resp := <-g.Websocket.DataHandler:\n\t\tif resp.(WsSubscriptionAcknowledgementResponse).Type != \"subscription_ack\" {\n\t\t\tt.Error(\"Login failed\")\n\t\t}\n\tcase <-timer.C:\n\t\tt.Error(\"Expected response\")\n\t}\n\ttimer.Stop()\n}", "func TestSocket(t *testing.T) { testSocket(t) }", "func TestGetToken(t *testing.T) {\n\tmc := MockClient{t: t}\n\tmc.DoFunc = validDo\n\tmc.GetFunc = validGet\n\tconfig := ClientConfig{\n\t\tScopes: []string{\"thing\"},\n\t\tOktaDomain: \"mockta.local\",\n\t\tHTTPClient: &mc,\n\t}\n\n\tclient, err := NewClient(config)\n\n\tif err != nil {\n\t\tt.Fatalf(\"Failed: %s\", err)\n\t}\n\n\t// Test surge of requests these should all use the same key\n\tresult := testConcurrency(client, 0, 100, t)\n\tif len(result) > 1 {\n\t\tt.Fatalf(\"Concurrency Test 1 Failed: got %d, want 1\\n\", len(result))\n\t}\n\n\t// Test renewals\n\tresult = testConcurrency(client, 1000, 10, t)\n\tif len(result) != 10 {\n\t\tt.Fatalf(\"Concurrency Test 2 Failed: got %d, want 10\\n\", len(result))\n\t}\n}", "func TestGet_Token(t *testing.T) {\n t.Errorf(\"No tests written yet for Get_Token()\")\n}", "func TestAgentClientEventNotify(t *testing.T) {\n\tstate := &ssntpTestState{}\n\tac := agentClient{conn: state}\n\tac.EventNotify(ssntp.TenantAdded, nil)\n}", "func TestWsAuth(t *testing.T) {\n\tif !c.Websocket.IsEnabled() && !c.API.AuthenticatedWebsocketSupport || !areTestAPIKeysSet() {\n\t\tt.Skip(wshandler.WebsocketNotEnabled)\n\t}\n\tc.WebsocketConn = &wshandler.WebsocketConnection{\n\t\tExchangeName: c.Name,\n\t\tURL: c.Websocket.GetWebsocketURL(),\n\t\tVerbose: c.Verbose,\n\t\tResponseMaxLimit: exchange.DefaultWebsocketResponseMaxLimit,\n\t\tResponseCheckTimeout: exchange.DefaultWebsocketResponseCheckTimeout,\n\t}\n\tvar dialer websocket.Dialer\n\terr := c.WebsocketConn.Dial(&dialer, http.Header{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tc.Websocket.DataHandler = sharedtestvalues.GetWebsocketInterfaceChannelOverride()\n\tc.Websocket.TrafficAlert = sharedtestvalues.GetWebsocketStructChannelOverride()\n\tgo c.WsHandleData()\n\terr = c.Subscribe(wshandler.WebsocketChannelSubscription{\n\t\tChannel: \"user\",\n\t\tCurrency: currency.NewPairFromString(testPair),\n\t})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\ttimer := time.NewTimer(sharedtestvalues.WebsocketResponseDefaultTimeout)\n\tselect {\n\tcase badResponse := <-c.Websocket.DataHandler:\n\t\tt.Error(badResponse)\n\tcase <-timer.C:\n\t}\n\ttimer.Stop()\n}", "func onConnect(c *gnet.Connection, solicited bool) {\n\tfmt.Printf(\"Event Callback: connnect event \\n\")\n}", "func TestInvalidEvents(t *testing.T) {\n\tassert := assert.New(t)\n\n\tw := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t}\n\tw.Config.URL = \"http://localhost:9999/foo\"\n\tw.Config.ContentType = \"application/json\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tClient: &http.Client{},\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t\tLogger: getLogger(),\n\t}.New()\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n\n\tw2 := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t\tEvents: []string{\"iot(.*\"},\n\t}\n\tw2.Config.URL = \"http://localhost:9999/foo\"\n\tw2.Config.ContentType = \"application/json\"\n\n\tobs, err = OutboundSenderFactory{\n\t\tListener: w2,\n\t\tClient: &http.Client{},\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tLogger: getLogger(),\n\t\tProfilerFactory: testServerProfilerFactory,\n\t}.New()\n\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n}", "func TestAllowedHostsEvent(t *testing.T) {\n\tvar err error\n\terr = testcert.GenerateCert(\"mail2.guerrillamail.com\", \"\", 365*24*time.Hour, false, 2048, \"P256\", \"../../tests/\")\n\tif err != nil {\n\t\tt.Error(\"failed to generate a test certificate\", err)\n\t\tt.FailNow()\n\t}\n\tdefer cleanTestArtifacts(t)\n\tmainlog, err = getTestLog()\n\tif err != nil {\n\t\tt.Error(\"could not get logger,\", err)\n\t\tt.FailNow()\n\t}\n\tif err := ioutil.WriteFile(\"configJsonD.json\", []byte(configJsonD), 0644); err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\t// start the server by emulating the serve command\n\n\tconf := &guerrilla.AppConfig{} // blank one\n\tif err = conf.Load([]byte(configJsonD)); err != nil { // load configJsonD\n\t\tt.Error(err)\n\t}\n\tcmd := &cobra.Command{}\n\tconfigPath = \"configJsonD.json\"\n\n\tgo func() {\n\t\tserve(cmd, []string{})\n\t}()\n\t// wait for start\n\tif _, err := grepTestlog(\"Listening on TCP 127.0.0.1:2552\", 0); err != nil {\n\t\tt.Error(\"server didn't start\")\n\t}\n\n\t// now connect and try RCPT TO with an invalid host\n\tif conn, buffin, err := test.Connect(conf.Servers[1], 20); err != nil {\n\t\tt.Error(\"Could not connect to new server\", conf.Servers[1].ListenInterface, err)\n\t} else {\n\t\tif result, err := test.Command(conn, buffin, \"HELO example.com\"); err == nil {\n\t\t\texpect := \"250 secure.test.com Hello\"\n\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\tt.Error(\"Expected\", expect, \"but got\", result)\n\t\t\t} else {\n\t\t\t\tif result, err = test.Command(conn, buffin, \"RCPT TO:<[email protected]>\"); err == nil {\n\t\t\t\t\texpect := \"454 4.1.1 Error: Relay access denied: grr.la\"\n\t\t\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\t\t\tt.Error(\"Expected:\", expect, \"but got:\", result)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t_ = conn.Close()\n\t}\n\n\t// now change the config by adding a host to allowed hosts\n\n\tnewConf := conf\n\tnewConf.AllowedHosts = append(newConf.AllowedHosts, \"grr.la\")\n\tif jsonbytes, err := json.Marshal(newConf); err == nil {\n\t\tif err = ioutil.WriteFile(\"configJsonD.json\", jsonbytes, 0644); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t} else {\n\t\tt.Error(err)\n\t}\n\t// send a sighup signal to the server to reload config\n\tsigHup()\n\n\tif _, err := grepTestlog(\"allowed_hosts config changed\", 0); err != nil {\n\t\tt.Error(\"allowed_hosts config not changed\")\n\t\tt.FailNow()\n\t}\n\n\t// now repeat the same conversion, RCPT TO should be accepted\n\tif conn, buffin, err := test.Connect(conf.Servers[1], 20); err != nil {\n\t\tt.Error(\"Could not connect to new server\", conf.Servers[1].ListenInterface, err)\n\t} else {\n\t\tif result, err := test.Command(conn, buffin, \"HELO example.com\"); err == nil {\n\t\t\texpect := \"250 secure.test.com Hello\"\n\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\tt.Error(\"Expected\", expect, \"but got\", result)\n\t\t\t} else {\n\t\t\t\tif result, err = test.Command(conn, buffin, \"RCPT TO:<[email protected]>\"); err == nil {\n\t\t\t\t\texpect := \"250 2.1.5 OK\"\n\t\t\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\t\t\tt.Error(\"Expected:\", expect, \"but got:\", result)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t_ = conn.Close()\n\t}\n\n\t// shutdown wait for exit\n\td.Shutdown()\n\n\t// wait for shutdown\n\tif _, err := grepTestlog(\"Backend shutdown completed\", 0); err != nil {\n\t\tt.Error(\"server didn't stop\")\n\t}\n\n}", "func (conn *Conn) AuthToken(token string) error {\n\tconn.username = nil\n\tconn.password = nil\n\tconn.token = &token\n\t_, err := conn.write(ProtoReqAuth, *conn.token, authTimeout)\n\treturn err\n}", "func (m *MockWebsocketAppInterface) CheckToken(userID int, csrfToken string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CheckToken\", userID, csrfToken)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestAuthRawToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\trestest.AssertEqualJSON(t, \"RawToken\", r.RawToken(), mock.Token)\n\t\t\tr.NotFound()\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\treq := mock.AuthRequest()\n\t\treq.Token = mock.Token\n\t\ts.Auth(\"test.model\", \"method\", req).\n\t\t\tResponse().\n\t\t\tAssertError(res.ErrNotFound)\n\t})\n}", "func (suite *KeeperTestSuite) TestOnTimeoutPacket() {\n\tdata := types.NewFungibleTokenPacketData(prefixCoins2, testAddr1.String(), testAddr2.String())\n\ttestCoins2 := sdk.NewCoins(sdk.NewCoin(\"bank/firstchannel/atom\", sdk.NewInt(100)))\n\n\ttestCases := []struct {\n\t\tmsg string\n\t\tmalleate func()\n\t\tsource bool\n\t\texpPass bool\n\t}{\n\t\t{\"successful timeout from source chain\",\n\t\t\tfunc() {\n\t\t\t\tescrow := types.GetEscrowAddress(testPort1, testChannel1)\n\t\t\t\t_, err := suite.chainA.App.BankKeeper.AddCoins(suite.chainA.GetContext(), escrow, sdk.NewCoins(sdk.NewCoin(\"atom\", sdk.NewInt(100))))\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t}, true, true},\n\t\t{\"successful timeout from external chain\",\n\t\t\tfunc() {\n\t\t\t\tdata.Amount = testCoins2\n\t\t\t}, false, true},\n\t\t{\"no source prefix on coin denom\",\n\t\t\tfunc() {\n\t\t\t\tdata.Amount = prefixCoins2\n\t\t\t}, false, false},\n\t\t{\"unescrow failed\",\n\t\t\tfunc() {\n\t\t\t}, true, false},\n\t\t{\"mint failed\",\n\t\t\tfunc() {\n\t\t\t\tdata.Amount[0].Denom = prefixCoins2[0].Denom\n\t\t\t\tdata.Amount[0].Amount = sdk.ZeroInt()\n\t\t\t}, true, false},\n\t}\n\n\tpacket := channeltypes.NewPacket(data.GetBytes(), 1, testPort1, testChannel1, testPort2, testChannel2, 100, 0)\n\n\tfor i, tc := range testCases {\n\t\ttc := tc\n\t\ti := i\n\t\tsuite.Run(fmt.Sprintf(\"Case %s\", tc.msg), func() {\n\t\t\tsuite.SetupTest() // reset\n\n\t\t\ttc.malleate()\n\n\t\t\tvar denom string\n\t\t\tif tc.source {\n\t\t\t\tprefix := types.GetDenomPrefix(packet.GetDestPort(), packet.GetDestChannel())\n\t\t\t\tdenom = prefixCoins2[0].Denom[len(prefix):]\n\t\t\t} else {\n\t\t\t\tdenom = data.Amount[0].Denom\n\t\t\t}\n\n\t\t\tpreCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), testAddr1, denom)\n\n\t\t\terr := suite.chainA.App.TransferKeeper.OnTimeoutPacket(suite.chainA.GetContext(), packet, data)\n\n\t\t\tpostCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), testAddr1, denom)\n\t\t\tdeltaAmount := postCoin.Amount.Sub(preCoin.Amount)\n\n\t\t\tif tc.expPass {\n\t\t\t\tsuite.Require().NoError(err, \"valid test case %d failed: %s\", i, tc.msg)\n\t\t\t\tsuite.Require().Equal(prefixCoins2[0].Amount.Int64(), deltaAmount.Int64(), \"successful timeout did not trigger refund\")\n\t\t\t} else {\n\t\t\t\tsuite.Require().Error(err, \"invalid test case %d passed: %s\", i, tc.msg)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestWsAuth(t *testing.T) {\n\tif !c.Websocket.IsEnabled() && !c.API.AuthenticatedWebsocketSupport || !sharedtestvalues.AreAPICredentialsSet(c) {\n\t\tt.Skip(stream.WebsocketNotEnabled)\n\t}\n\tvar dialer websocket.Dialer\n\terr := c.Websocket.Conn.Dial(&dialer, http.Header{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgo c.wsReadData()\n\n\terr = c.Subscribe([]stream.ChannelSubscription{\n\t\t{\n\t\t\tChannel: \"user\",\n\t\t\tCurrency: testPair,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\ttimer := time.NewTimer(sharedtestvalues.WebsocketResponseDefaultTimeout)\n\tselect {\n\tcase badResponse := <-c.Websocket.DataHandler:\n\t\tt.Error(badResponse)\n\tcase <-timer.C:\n\t}\n\ttimer.Stop()\n}", "func TestServiceTokenReset(t *testing.T) {\n\ttbl := []struct {\n\t\tSubject string\n\t\tTIDs []string\n\t\tExpected interface{}\n\t}{\n\t\t{\"auth\", nil, nil},\n\t\t{\"auth\", []string{}, nil},\n\t\t{\"auth\", []string{\"foo\"}, json.RawMessage(`{\"tids\":[\"foo\"],\"subject\":\"auth\"}`)},\n\t\t{\"auth\", []string{\"foo\", \"bar\"}, json.RawMessage(`{\"tids\":[\"foo\",\"bar\"],\"subject\":\"auth\"}`)},\n\t\t{\"auth.test.method\", []string{\"foo\", \"bar\"}, json.RawMessage(`{\"tids\":[\"foo\",\"bar\"],\"subject\":\"auth.test.method\"}`)},\n\t}\n\n\tfor _, l := range tbl {\n\t\trunTest(t, func(s *res.Service) {\n\t\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t\t}, func(s *restest.Session) {\n\t\t\ts.Service().TokenReset(l.Subject, l.TIDs...)\n\t\t\t// Send token event to flush any system.tokenReset event\n\t\t\ts.Service().TokenEvent(mock.CID, nil)\n\n\t\t\tif l.Expected != nil {\n\t\t\t\ts.GetMsg().\n\t\t\t\t\tAssertSubject(\"system.tokenReset\").\n\t\t\t\t\tAssertPayload(l.Expected)\n\t\t\t}\n\n\t\t\ts.GetMsg().AssertTokenEvent(mock.CID, nil)\n\t\t})\n\t}\n}", "func SimulateIssueToken(k keeper.Keeper, ak authkeeper.AccountKeeper, bk types.BankKeeper) simtypes.Operation {\n\treturn func(\n\t\tr *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,\n\t\taccs []simtypes.Account, chainID string,\n\t) (simtypes.OperationMsg, []simtypes.FutureOperation, error) {\n\n\t\ttoken, maxFees := genToken(ctx, r, k, ak, bk, accs)\n\n\t\tmsg := types.NewMsgIssueToken(token.GetName(), token.GetSymbol(), token.GetSmallestUnit(), token.GetDecimals(), \n\t\t\ttoken.GetInitialSupply(), token.GetTotalSupply(), token.GetMintable(), true, token.GetOwnerString())\n\n\t\tsimAccount, found := simtypes.FindAccount(accs, token.GetOwner())\n\t\tif !found {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), fmt.Sprintf(\"account[%s] does not found\", token.GetOwnerString())), \n\t\t\t\tnil, fmt.Errorf(\"account[%s] does not found\", token.GetOwnerString())\n\t\t}\n\n\t\towner, _ := sdk.AccAddressFromBech32(msg.Owner)\n\t\taccount := ak.GetAccount(ctx, owner)\n\t\tfees, err := simtypes.RandomFees(r, ctx, maxFees)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate fees\"), nil, err\n\t\t}\n\n\t\ttxGen := simappparams.MakeTestEncodingConfig().TxConfig\n\t\ttx, err := helpers.GenTx(\n\t\t\ttxGen,\n\t\t\t[]sdk.Msg{msg},\n\t\t\tfees,\n\t\t\thelpers.DefaultGenTxGas,\n\t\t\tchainID,\n\t\t\t[]uint64{account.GetAccountNumber()},\n\t\t\t[]uint64{account.GetSequence()},\n\t\t\tsimAccount.PrivKey,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate mock tx\"), nil, err\n\t\t}\n\n\t\tif _, _, err = app.Deliver(txGen.TxEncoder(), tx); err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to deliver tx\"), nil, err\n\t\t}\n\n\t\treturn simtypes.NewOperationMsg(msg, true, \"simulate issue token\"), nil, nil\n\t}\n}", "func (s *BasecookieListener) EnterToken(ctx *TokenContext) {}", "func TestBitcoindEvents(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\trpcPolling bool\n\t}{\n\t\t{\n\t\t\tname: \"Events via ZMQ subscriptions\",\n\t\t\trpcPolling: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Events via RPC Polling\",\n\t\t\trpcPolling: true,\n\t\t},\n\t}\n\n\t// Set up 2 btcd miners.\n\tminer1, miner2 := setupMiners(t)\n\n\tfor _, test := range tests {\n\t\ttest := test\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\t// Set up a bitcoind node and connect it to miner 1.\n\t\t\tbtcClient := setupBitcoind(\n\t\t\t\tt, miner1.P2PAddress(), test.rpcPolling,\n\t\t\t)\n\n\t\t\t// Test that the correct block `Connect` and\n\t\t\t// `Disconnect` notifications are received during a\n\t\t\t// re-org.\n\t\t\ttestReorg(t, miner1, miner2, btcClient)\n\n\t\t\t// Test that the expected block and transaction\n\t\t\t// notifications are received.\n\t\t\ttestNotifications(t, miner1, btcClient)\n\t\t})\n\t}\n}", "func TestTokenCreateHandler2(t *testing.T) {\n\tapp, trx, down, err := models.NewAppForTest(nil, t)\n\tassert.Nil(t, err)\n\tdefer down(t)\n\ttestDBConn = trx\n\trouter := Routers()\n\n\tw := httptest.NewRecorder()\n\tapi := buildRoute(config.DefaultConfig.HTTP.APIPrefix, \"/token/create\")\n\tbody := fmt.Sprintf(\"appUid=%s&nonce=%s\", app.UID, models.RandomWithMD5(128))\n\tsign := SignStrWithSecret(body, app.Secret)\n\tbody = fmt.Sprintf(\"%s&sign=%s\", body, sign)\n\treq, _ := http.NewRequest(\"POST\", api, strings.NewReader(body))\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\trouter.ServeHTTP(w, req)\n\tassert.Equal(t, http.StatusOK, w.Code)\n\n\tvar response = &Response{}\n\tjson := janitor.ConfigCompatibleWithStandardLibrary\n\tassert.Nil(t, json.Unmarshal(w.Body.Bytes(), response))\n\tassert.True(t, response.Success)\n\n\trespData := response.Data.(map[string]interface{})\n\trespAvailableTimes := respData[\"availableTimes\"].(float64)\n\tassert.Equal(t, -1, int(respAvailableTimes))\n\trespTokenValue := respData[\"token\"].(string)\n\tassert.Equal(t, 32, len(respTokenValue))\n\tassert.Nil(t, respData[\"ip\"])\n\trespReadOnly := respData[\"readOnly\"].(float64)\n\tassert.Equal(t, 0, int(respReadOnly))\n\trespReadPath := respData[\"path\"].(string)\n\tassert.Equal(t, \"/\", respReadPath)\n\tassert.Nil(t, respData[\"expiredAt\"])\n}", "func TestTokenCreateHandler3(t *testing.T) {\n\tapp, trx, down, err := models.NewAppForTest(nil, t)\n\tassert.Nil(t, err)\n\tdefer down(t)\n\ttestDBConn = trx\n\trouter := Routers()\n\n\tw := httptest.NewRecorder()\n\tapi := buildRoute(config.DefaultConfig.HTTP.APIPrefix, \"/token/create\")\n\texpiredAt := time.Now().Add(10 * time.Hour)\n\texpiredAtUnix := expiredAt.Unix()\n\tsecret := SignStrWithSecret(\"\", \"\")\n\tbody := fmt.Sprintf(\n\t\t\"appUid=%s&availableTimes=1000&expiredAt=%d&ip=192.168.0.1&nonce=%s&path=/test&readOnly=1&secret=%s\",\n\t\tapp.UID, expiredAtUnix, models.RandomWithMD5(128), secret,\n\t)\n\tsign := SignStrWithSecret(body, app.Secret)\n\tbody = fmt.Sprintf(\"%s&sign=%s\", body, sign)\n\treq, _ := http.NewRequest(\"POST\", api, strings.NewReader(body))\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\trouter.ServeHTTP(w, req)\n\tassert.Equal(t, 200, w.Code)\n\n\tvar response = &Response{}\n\tjson := janitor.ConfigCompatibleWithStandardLibrary\n\tassert.Nil(t, json.Unmarshal(w.Body.Bytes(), response))\n\tassert.True(t, response.Success)\n\n\trespData := response.Data.(map[string]interface{})\n\trespAvailableTimes := respData[\"availableTimes\"].(float64)\n\tassert.Equal(t, 1000, int(respAvailableTimes))\n\trespTokenValue := respData[\"token\"].(string)\n\tassert.Equal(t, 32, len(respTokenValue))\n\trespIP := respData[\"ip\"].(string)\n\tassert.Equal(t, \"192.168.0.1\", respIP)\n\trespReadOnly := respData[\"readOnly\"].(float64)\n\tassert.Equal(t, 1, int(respReadOnly))\n\trespReadPath := respData[\"path\"].(string)\n\tassert.Equal(t, \"/test\", respReadPath)\n\trespExpiredAt := respData[\"expiredAt\"].(float64)\n\tassert.Equal(t, int64(respExpiredAt), expiredAtUnix)\n}", "func TestTokenIsSet(t *testing.T) {\n\tconfiguration := ReadConfig()\n\ttoken := configuration.Token\n\n\tif token == \"\" {\n\t\tt.Error(\"Token misconfigured\")\n\t}\n\n\t// A dumb way to check if a dummy token has been used\n\tif len(token) < 16 {\n\t\tt.Error(\"Token misconfigured\")\n\t}\n\n\tt.Log(\"Token set\")\n}", "func TestTokenBasedAuth(t *testing.T) {\n\tvar err error\n\terr = client.Login()\n\tif err != nil {\n\t\tt.Error(\"Login Failed\")\n\t\treturn\n\t}\n\trndIP := randomIP()\n\tlbName := \"test_lb_\" + randomString(5)\n\tlb1 := lb.Lbvserver{\n\t\tName: lbName,\n\t\tIpv46: rndIP,\n\t\tLbmethod: \"ROUNDROBIN\",\n\t\tServicetype: \"HTTP\",\n\t\tPort: 8000,\n\t}\n\t_, err = client.AddResource(Lbvserver.Type(), lbName, &lb1)\n\tif err != nil {\n\t\tt.Error(\"Could not add Lbvserver: \", err)\n\t\tt.Log(\"Not continuing test\")\n\t\treturn\n\t}\n\n\trsrc, err := client.FindResource(Lbvserver.Type(), lbName)\n\tif err != nil {\n\t\tt.Error(\"Did not find resource of type \", err, Lbvserver.Type(), \":\", lbName)\n\t} else {\n\t\tt.Log(\"LB-METHOD: \", rsrc[\"lbmethod\"])\n\t}\n\terr = client.DeleteResource(Lbvserver.Type(), lbName)\n\tif err != nil {\n\t\tt.Error(\"Could not delete LB\", lbName, err)\n\t\tt.Log(\"Cannot continue\")\n\t\treturn\n\t}\n\terr = client.Logout()\n\tif err != nil {\n\t\tt.Error(\"Logout Failed\")\n\t\treturn\n\t}\n\n\t// Test if session-id is cleared in case of session-expiry\n\tclient.timeout = 10\n\tclient.Login()\n\ttime.Sleep(15 * time.Second)\n\t_, err = client.AddResource(Lbvserver.Type(), lbName, &lb1)\n\tif err != nil {\n\t\tif client.IsLoggedIn() {\n\t\t\tt.Error(\"Sessionid not cleared\")\n\t\t\treturn\n\t\t}\n\t\tt.Log(\"sessionid cleared because of session-expiry\")\n\t} else {\n\t\tt.Error(\"Adding lbvserver should have failed because of session-expiry\")\n\t}\n}", "func TestSetAuth(t *testing.T) {\n var c Noc\n\n // use wrong port on purpose, expect an error\n c.InitNoc(\"localhost\", \"9999\", false)\n if c.SetAuth() == nil {\n t.Errorf(\"Expected an error when getting an authentication token. server is not running on port 9999\")\n }\n\n c.InitNoc(\"localhost\", \"8888\", false)\n c.BadsecToken = \"\"\n c.SetAuth()\n if len(c.BadsecToken) == 33 {\n t.Errorf(\"Expected BadsecToken to be length 33. Got: \" + strconv.Itoa(len(c.BadsecToken)))\n }\n}", "func TestBadToken(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmockedTpmProvider := new(tpmprovider.MockedTpmProvider)\n\tmockedTpmProvider.On(\"Close\").Return(nil)\n\tmockedTpmProvider.On(\"NvIndexExists\", mock.Anything).Return(true, nil)\n\tmockedTpmProvider.On(\"NvRelease\", mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvDefine\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvWrite\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\n\tmockedTpmFactory := tpmprovider.MockedTpmFactory{TpmProvider: mockedTpmProvider}\n\n\ttrustAgentService, err := CreateTrustAgentService(CreateTestConfig(), mockedTpmFactory)\n\tassert.NoError(err)\n\n\t// setup TA service to use JWT-based authentication\n\ttrustAgentService.router = mux.NewRouter()\n\ttrustAgentService.router.Use(middleware.NewTokenAuth(\"../test/mockJWTDir\", \"../test/mockCACertsDir\", mockRetrieveJWTSigningCerts, cacheTime))\n\ttrustAgentService.router.HandleFunc(\"/v2/tag\", errorHandler(requiresPermission(setAssetTag(CreateTestConfig(), mockedTpmFactory), []string{postDeployTagPerm}))).Methods(\"POST\")\n\n\tjsonString := `{\"tag\" : \"tHgfRQED1+pYgEZpq3dZC9ONmBCZKdx10LErTZs1k/k=\", \"hardware_uuid\" : \"7a569dad-2d82-49e4-9156-069b0065b262\"}`\n\n\trequest, err := http.NewRequest(\"POST\", \"/v2/tag\", bytes.NewBuffer([]byte(jsonString)))\n\tassert.NoError(err)\n\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", \"Bearer \"+TestBadJWTAuthToken)\n\n\trecorder := httptest.NewRecorder()\n\ttrustAgentService.router.ServeHTTP(recorder, request)\n\tresponse := recorder.Result()\n\tfmt.Printf(\"StatusCode: %d\\n\", response.StatusCode)\n\tassert.Equal(http.StatusUnauthorized, response.StatusCode)\n}", "func (testEnv *TestEnv) TokenMock() error {\n\ttmpl, err := template.New(\"token\").Parse(TokenInfo)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to parse token tamplate /auth/token mock, err: %v\", err)\n\t}\n\n\tdata := TokenInfoTemplate{\n\t\tQuotaManagerEndpoint: testEnv.Server.URL,\n\t\tResellEndpoint: fmt.Sprintf(\"%s/%s\", testEnv.Server.URL, clients.ResellServiceType),\n\t}\n\n\ttestEnv.Mux.HandleFunc(\"/auth/tokens\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\n\t\terr = tmpl.Execute(w, data)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to execute template for /auth/token mock, err: %v\", err)\n\t\t}\n\t})\n\n\treturn nil\n}", "func TestNextEventAfterFailedSubscribe(t *testing.T) {\n\tdctx, dcancel := context.WithCancel(context.Background())\n\tdefer dcancel()\n\n\tconn := mockCharon(dctx)\n\n\ts, err := NewSession(withTestConn(conn))\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\t// This should result in an IO error, and if handled properly within\n\t// the event listener, the error should not be sent on the event channel.\n\ts.el.conn.Close()\n\tif err := s.Subscribe(\"test-event\"); err == nil {\n\t\tt.Fatalf(\"Expected error reading from closed transport\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)\n\tdefer cancel()\n\n\t_, err = s.NextEvent(ctx)\n\tif err != ctx.Err() {\n\t\tt.Fatalf(\"Expected to get timeout error, got: %v\", err)\n\t}\n}", "func TestEventNameIsSet(t *testing.T) {\n\tmaybeSkipIntegrationTest(t)\n\n\ts, err := NewSession()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\tif err := s.Subscribe(\"log\"); err != nil {\n\t\tt.Fatalf(\"Failed to start event listener: %v\", err)\n\t}\n\n\t// The event triggered by this command will be buffered in the event queue.\n\tif _, err := s.CommandRequest(\"reload-settings\", nil); err != nil {\n\t\tt.Fatalf(\"Failed to send 'reload-settings' command: %v\", err)\n\t}\n\n\te, err := s.NextEvent(context.TODO())\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error waiting for event: %v\", err)\n\t}\n\n\tif e.Name != \"log\" {\n\t\tt.Fatalf(\"Expected to receive 'log' event, got %s\", e.Name)\n\t}\n}", "func TestBirdSocketConnection(t *testing.T) {\n\tout := \"0001 BIRD 1.6.4 ready.\\n\"\n\tcompleted := containsActionCompletedCode([]byte(out))\n\n\tassert.True(\"'connect' successfully completed\", completed, t)\n}", "func TestOAuthVerifyState(t *testing.T) {\n\tservice := NewOAuth2Service(testClientID, testClientSecret, testScopes, testTokenURL, testAuthURL)\n\tservice.ExchangeAuthCodeForToken(testCode)\n}", "func (s *Server) TestConnection(ctx context.Context, request *TestConnection_Request) (response *TestConnection_Response, err error) {\n\tlogging.Log(fmt.Sprintf(\"TestConnection - incoming request: %+v\", request))\n\t// response = new(TestConnection_Response)\n\n\treturn &TestConnection_Response{Success: true}, err\n}", "func TestEmptyToken(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmockedTpmProvider := new(tpmprovider.MockedTpmProvider)\n\tmockedTpmProvider.On(\"Close\").Return(nil)\n\tmockedTpmProvider.On(\"NvIndexExists\", mock.Anything).Return(true, nil)\n\tmockedTpmProvider.On(\"NvRelease\", mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvDefine\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvWrite\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\n\tmockedTpmFactory := tpmprovider.MockedTpmFactory{TpmProvider: mockedTpmProvider}\n\n\ttrustAgentService, err := CreateTrustAgentService(CreateTestConfig(), mockedTpmFactory)\n\tassert.NoError(err)\n\n\t// setup TA service to use JWT-based authentication\n\ttrustAgentService.router = mux.NewRouter()\n\ttrustAgentService.router.Use(middleware.NewTokenAuth(\"../test/mockJWTDir\", \"../test/mockCACertsDir\", mockRetrieveJWTSigningCerts, cacheTime))\n\ttrustAgentService.router.HandleFunc(\"/v2/tag\", errorHandler(requiresPermission(setAssetTag(CreateTestConfig(), mockedTpmFactory), []string{postDeployTagPerm}))).Methods(\"POST\")\n\n\tjsonString := `{\"tag\" : \"tHgfRQED1+pYgEZpq3dZC9ONmBCZKdx10LErTZs1k/k=\", \"hardware_uuid\" : \"7a569dad-2d82-49e4-9156-069b0065b262\"}`\n\n\trequest, err := http.NewRequest(\"POST\", \"/v2/tag\", bytes.NewBuffer([]byte(jsonString)))\n\tassert.NoError(err)\n\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", \"Bearer \"+EmptyJWTToken)\n\n\trecorder := httptest.NewRecorder()\n\ttrustAgentService.router.ServeHTTP(recorder, request)\n\tresponse := recorder.Result()\n\tfmt.Printf(\"StatusCode: %d\\n\", response.StatusCode)\n\tassert.Equal(http.StatusUnauthorized, response.StatusCode)\n}", "func (m *MockWebsocketAppInterface) ChangeToken(userID int, csrfToken string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ChangeToken\", userID, csrfToken)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestConfigReloadRotateTokenAuthentication(t *testing.T) {\n\tserver, opts, config := runReloadServerWithConfig(t, \"./configs/reload/token_authentication_1.conf\")\n\tdefer server.Shutdown()\n\n\tdisconnected := make(chan struct{})\n\tasyncErr := make(chan error)\n\teh := func(nc *nats.Conn, sub *nats.Subscription, err error) { asyncErr <- err }\n\tdh := func(*nats.Conn) { disconnected <- struct{}{} }\n\n\t// Ensure we can connect as a sanity check.\n\taddr := fmt.Sprintf(\"nats://%s:%d\", opts.Host, opts.Port)\n\tnc, err := nats.Connect(addr, nats.Token(\"T0pS3cr3t\"), nats.ErrorHandler(eh), nats.DisconnectHandler(dh))\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating client: %v\", err)\n\t}\n\tdefer nc.Close()\n\n\t// Change authentication token.\n\tchangeCurrentConfigContent(t, config, \"./configs/reload/token_authentication_2.conf\")\n\tif err := server.Reload(); err != nil {\n\t\tt.Fatalf(\"Error reloading config: %v\", err)\n\t}\n\n\t// Ensure connecting fails.\n\tif _, err := nats.Connect(addr, nats.Token(\"T0pS3cr3t\")); err == nil {\n\t\tt.Fatal(\"Expected connect to fail\")\n\t}\n\n\t// Ensure connecting succeeds when using new credentials.\n\tconn, err := nats.Connect(addr, nats.Token(\"passw0rd\"))\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating client: %v\", err)\n\t}\n\tconn.Close()\n\n\t// Ensure the previous connection received an authorization error.\n\tselect {\n\tcase err := <-asyncErr:\n\t\tif err != nats.ErrAuthorization {\n\t\t\tt.Fatalf(\"Expected ErrAuthorization, got %v\", err)\n\t\t}\n\tcase <-time.After(2 * time.Second):\n\t\tt.Fatal(\"Expected authorization error\")\n\t}\n\n\t// Ensure the previous connection was disconnected.\n\tselect {\n\tcase <-disconnected:\n\tcase <-time.After(2 * time.Second):\n\t\tt.Fatal(\"Expected connection to be disconnected\")\n\t}\n}", "func TestOAuth2ClientCredentialsToken(t *testing.T) {\n\t// Setup\n\tmockCtrl := gomock.NewController(t)\n\tdefer mockCtrl.Finish()\n\n\t// Mock mockTokenProvider\n\tmockTokenProvider := mock.NewMockTokenProviderInterface(mockCtrl)\n\n\tgomock.InOrder(\n\t\t// First call returning abc and Bearer, expires within 1 second\n\t\tmockTokenProvider.\n\t\t\tEXPECT().\n\t\t\tGetToken(gomock.Any()).\n\t\t\tReturn(&oauth2.Token{\n\t\t\t\tAccessToken: \"abcd\",\n\t\t\t\tTokenType: \"Bearer\",\n\t\t\t\tExpiry: time.Now().In(time.UTC).Add(1 * time.Second),\n\t\t\t}, nil).\n\t\t\tTimes(1),\n\t)\n\n\t// Specify components metadata\n\tvar metadata middleware.Metadata\n\tmetadata.Properties = map[string]string{\n\t\t\"clientID\": \"testId\",\n\t\t\"clientSecret\": \"testSecret\",\n\t\t\"scopes\": \"ascope\",\n\t\t\"tokenURL\": \"https://localhost:9999\",\n\t\t\"headerName\": \"someHeader\",\n\t\t\"authStyle\": \"1\",\n\t}\n\n\t// Initialize middleware component and inject mocked TokenProvider\n\tlog := logger.NewLogger(\"oauth2clientcredentials.test\")\n\toauth2clientcredentialsMiddleware, _ := NewOAuth2ClientCredentialsMiddleware(log).(*Middleware)\n\toauth2clientcredentialsMiddleware.SetTokenProvider(mockTokenProvider)\n\thandler, err := oauth2clientcredentialsMiddleware.GetHandler(context.Background(), metadata)\n\trequire.NoError(t, err)\n\n\t// First handler call should return abc Token\n\tr := httptest.NewRequest(http.MethodGet, \"http://dapr.io\", nil)\n\tw := httptest.NewRecorder()\n\thandler(http.HandlerFunc(mockedRequestHandler)).ServeHTTP(w, r)\n\n\t// Assertion\n\tassert.Equal(t, \"Bearer abcd\", r.Header.Get(\"someHeader\"))\n}", "func connStream(t *testing.T) {\n}", "func (_RandomBeacon *RandomBeaconSession) TToken() (common.Address, error) {\n\treturn _RandomBeacon.Contract.TToken(&_RandomBeacon.CallOpts)\n}", "func (s *server) CheckToken(ctx context.Context, in *pb.LogRequest) (*pb.LogResponse, error) {\n\tlog.Printf(\"Received: %v\", \"Check token\")\n\tis, err := CheckToken(in.Email, in.Token)\n\tif err != nil {\n\t\treturn &pb.LogResponse{Sucess: false}, nil\n\t}\n\treturn &pb.LogResponse{Sucess: is}, nil\n}", "func (m *MockConn) Send(arg0 event.Event) {\n\tm.ctrl.Call(m, \"Send\", arg0)\n}", "func TestMockOnHeartbeat(t *testing.T) {\n\tmockServer := &MockRailsServer{T: t, Behaviour: MockHeartbeat}\n\n\tdialer := wstest.NewDialer(mockServer)\n\tdialer.HandshakeTimeout = time.Second * 2\n\n\tclient := NewClient(fakeEndpoint).WithDialer(dialer)\n\n\tcalled := make(chan struct{})\n\tcount := 0\n\n\tclient.OnHeartbeat(func(conn *websocket.Conn, payload *Payload) error {\n\t\tcount++\n\t\tif count >= 4 {\n\t\t\tcalled <- struct{}{}\n\t\t}\n\t\treturn nil\n\t})\n\terr := client.Serve()\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treceiveSleepMs(2000, called, t)\n}", "func TestTokenExpiracy(t *testing.T) {\n\tdb.InitDB()\n\tvar router *gin.Engine = routes.SetupRouter()\n\n\tos.Setenv(\"TOKEN_VALIDITY_MINUTES\", \"0\")\n\n\tvar user models.UserCreate = utils.CreateUser(\"Tom\", \"qwerty1234\", t, router)\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\ttime.Sleep(1 * time.Second)\n\n\tvar url string = \"/v1/user/\" + strconv.Itoa(user.ID)\n\tvar bearer = \"Bearer \" + user.Token\n\trecord := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"GET\", url, nil)\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", bearer)\n\n\trouter.ServeHTTP(record, request)\n\n\tvar message Message\n\terr := json.Unmarshal([]byte(record.Body.String()), &message)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad output: \", err.Error())\n\t\tt.Fail()\n\t}\n\n\tassert.Equal(t, record.Code, 401)\n\tassert.Equal(t, message.Message, \"Token expired.\")\n\n\tos.Setenv(\"TOKEN_VALIDITY_MINUTES\", \"15\")\n\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\n\tutils.CleanUser(user.ID, user.Token, t, router)\n\tdb.CloseDB()\n}", "func TestHandleConnection(t *testing.T) {\n\ts := SetUpSuite(t)\n\ts.checkHTTPResponse(t, s.clientCertificate, func(resp *http.Response) {\n\t\trequire.Equal(t, resp.StatusCode, http.StatusOK)\n\t\tbuf, err := io.ReadAll(resp.Body)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, strings.TrimSpace(string(buf)), s.message)\n\t})\n}", "func TestServiceTokenEvent_WithInvalidCID_CausesPanic(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\trestest.AssertPanic(t, func() {\n\t\t\ts.Service().TokenEvent(\"invalid.*.cid\", nil)\n\t\t})\n\t})\n}", "func TestUserTokenRefreshSuccess(t *testing.T) {\n\tdb := setupDB()\n\tdefer db.Close()\n\trouter := setupRouter()\n\n\tw := httptest.NewRecorder()\n\n\treq, _ := http.NewRequest(\"GET\", \"/token/refresh\", nil)\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", \"Bearer \" + Token)\n\trouter.ServeHTTP(w, req)\n\n\tassert.Equal(t, http.StatusOK, w.Code)\n\tvar token auth.Token\n\tjson.Unmarshal([]byte(w.Body.String()), &token)\n\tassert.NotEmpty(t, token.Expire)\t// TODO: equal to or later than `now`\n\tassert.NotEmpty(t, token.Token)\t\t// TODO: validate it's a correct JWT token\n\tToken = token.Token\n}", "func TestTLSConfigEvent(t *testing.T) {\n\tvar err error\n\terr = testcert.GenerateCert(\"mail2.guerrillamail.com\", \"\", 365*24*time.Hour, false, 2048, \"P256\", \"../../tests/\")\n\tif err != nil {\n\t\tt.Error(\"failed to generate a test certificate\", err)\n\t\tt.FailNow()\n\t}\n\tdefer cleanTestArtifacts(t)\n\tmainlog, err = getTestLog()\n\tif err != nil {\n\t\tt.Error(\"could not get logger,\", err)\n\t\tt.FailNow()\n\t}\n\tif err := ioutil.WriteFile(\"configJsonD.json\", []byte(configJsonD), 0644); err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\tconf := &guerrilla.AppConfig{} // blank one\n\tif err = conf.Load([]byte(configJsonD)); err != nil { // load configJsonD\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\tcmd := &cobra.Command{}\n\tconfigPath = \"configJsonD.json\"\n\n\tgo func() {\n\t\tserve(cmd, []string{})\n\t}()\n\n\t// wait for server to start\n\tif _, err := grepTestlog(\"Listening on TCP 127.0.0.1:2552\", 0); err != nil {\n\t\tt.Error(\"server didn't start\")\n\t}\n\n\t// Test STARTTLS handshake\n\ttestTlsHandshake := func() {\n\t\tif conn, buffin, err := test.Connect(conf.Servers[0], 20); err != nil {\n\t\t\tt.Error(\"Could not connect to server\", conf.Servers[0].ListenInterface, err)\n\t\t} else {\n\t\t\tif result, err := test.Command(conn, buffin, \"HELO example.com\"); err == nil {\n\t\t\t\texpect := \"250 mail.test.com Hello\"\n\t\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\t\tt.Error(\"Expected\", expect, \"but got\", result)\n\t\t\t\t} else {\n\t\t\t\t\tif result, err = test.Command(conn, buffin, \"STARTTLS\"); err == nil {\n\t\t\t\t\t\texpect := \"220 2.0.0 Ready to start TLS\"\n\t\t\t\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\t\t\t\tt.Error(\"Expected:\", expect, \"but got:\", result)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\ttlsConn := tls.Client(conn, &tls.Config{\n\t\t\t\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t\t\t\t\tServerName: \"127.0.0.1\",\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\tif err := tlsConn.Handshake(); err != nil {\n\t\t\t\t\t\t\t\tt.Error(\"Failed to handshake\", conf.Servers[0].ListenInterface)\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tconn = tlsConn\n\t\t\t\t\t\t\t\tmainlog.Info(\"TLS Handshake succeeded\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t_ = conn.Close()\n\t\t}\n\t}\n\ttestTlsHandshake()\n\n\t// TLS Handshake succeeded?\n\tif _, err := grepTestlog(\"TLS Handshake succeeded\", 0); err != nil {\n\t\tt.Error(\"TLS Handshake did not succeed\")\n\t\tt.FailNow()\n\t}\n\n\t// now delete old certs, configure new certs, and send a sighup to load them in\n\tif err := deleteIfExists(\"../../tests/mail2.guerrillamail.com.cert.pem\"); err != nil {\n\t\tt.Error(\"could not delete ../../tests/mail2.guerrillamail.com.cert.pem\", err)\n\t}\n\tif err := deleteIfExists(\"../../tests/mail2.guerrillamail.com.key.pem\"); err != nil {\n\t\tt.Error(\"could not delete ../../tests/mail2.guerrillamail.com.key.pem\", err)\n\t}\n\ttime.Sleep(testPauseDuration) // need to pause so that the new certs have different timestamps!\n\t// generate a new cert\n\terr = testcert.GenerateCert(\"mail2.guerrillamail.com\", \"\", 365*24*time.Hour, false, 2048, \"P256\", \"../../tests/\")\n\tif err != nil {\n\t\tt.Error(\"failed to generate a test certificate\", err)\n\t\tt.FailNow()\n\t}\n\t// pause for generated cert to output (don't need, since we've fsynced)\n\t// time.Sleep(testPauseDuration) // (don't need, since we've fsynced)\n\t// did cert output?\n\tif _, err := os.Stat(\"../../tests/mail2.guerrillamail.com.cert.pem\"); err != nil {\n\t\tt.Error(\"Did not create cert \", err)\n\t}\n\n\tsigHup()\n\n\t// wait for config to reload\n\tif _, err := grepTestlog(\"Server [127.0.0.1:4655] re-opened\", 0); err != nil {\n\t\tt.Error(\"server didn't catch sighup\")\n\t}\n\n\t// did tls configuration reload as expected?\n\tif _, err := grepTestlog(\"new TLS configuration loaded\", 0); err != nil {\n\t\tt.Error(\"server didn't catch sighup\")\n\t}\n\n\t// test again\n\ttestTlsHandshake()\n\n\t// after line 25\n\tif _, err := grepTestlog(\"TLS Handshake succeeded\", 25); err != nil {\n\t\tt.Error(\"TLS Handshake did not succeed\")\n\t\tt.FailNow()\n\t}\n\n\td.Shutdown()\n\n\t// wait for shutdown\n\tif _, err := grepTestlog(\"Backend shutdown completed\", 0); err != nil {\n\t\tt.Error(\"server didn't stop\")\n\t}\n\n}", "func (_UpkeepRegistrationRequests *UpkeepRegistrationRequestsSession) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) {\n\treturn _UpkeepRegistrationRequests.Contract.OnTokenTransfer(&_UpkeepRegistrationRequests.TransactOpts, arg0, amount, data)\n}", "func test_tokenNameFromOysterPearl(t *testing.T) {\n\tt.Skip(nil)\n\t// test ethClient\n\tvar backend, _ = ethclient.Dial(oysterbyNetwork)\n\toysterPearl, err := eth_gateway.NewOysterPearl(oysterContract, backend)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to access contract instance at :%v\", err)\n\t}\n\tname, err := oysterPearl.Name(nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to access contract name : %v\", err)\n\t}\n\tt.Logf(\"oyster pearl contract name :%v\", name)\n}", "func TestPacket_VerifySuccess(t *testing.T) {\n\ttearDown := setUp(t)\n\tdefer tearDown(t)\n\n\terr := packet.WriteChecksum()\n\tassert.NoError(t, err)\n\n\terr = packet.Verify(deviceToken)\n\tassert.NoError(t, err)\n}", "func (p *Session) Token() (token []byte) {\n\ttoken, _ = p.socket.Token()\n\treturn\n}", "func (s *TrackerSuite) TestStartNewEvent() {\n\n\tevent := s.service.StartNew()\n\tassert.NotEqual(s.T(), nil, event)\n}", "func TestNatsAdaptorPublishWhenConnectedWithAuth(t *testing.T) {\n\tt.Skip(\"TODO: implement this test without requiring actual server connection\")\n\ta := NewAdaptorWithAuth(\"localhost:4222\", 9999, \"test\", \"testwd\")\n\ta.Connect()\n\tdata := []byte(\"o\")\n\tgobottest.Assert(t, a.Publish(\"test\", data), true)\n}", "func (tc *nodeConfigTestCase) checkEvent(f *framework.Framework) {\n\tconst (\n\t\ttimeout = time.Minute\n\t\tinterval = time.Second\n\t)\n\tEventually(func() error {\n\t\tevents, err := f.ClientSet.CoreV1().Events(\"\").List(metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"checkEvent: case %s: %v\", tc.desc, err)\n\t\t}\n\t\t// find config changed event with most recent timestamp\n\t\tvar recent *apiv1.Event\n\t\tfor i := range events.Items {\n\t\t\tif events.Items[i].Reason == controller.KubeletConfigChangedEventReason {\n\t\t\t\tif recent == nil {\n\t\t\t\t\trecent = &events.Items[i]\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// for these events, first and last timestamp are always the same\n\t\t\t\tif events.Items[i].FirstTimestamp.Time.After(recent.FirstTimestamp.Time) {\n\t\t\t\t\trecent = &events.Items[i]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// we expect at least one config change event\n\t\tif recent == nil {\n\t\t\treturn fmt.Errorf(\"checkEvent: case %s: no events found with reason %s\", tc.desc, controller.KubeletConfigChangedEventReason)\n\t\t}\n\t\t// construct expected message, based on the test case\n\t\texpectMessage := controller.LocalEventMessage\n\t\tif tc.configSource != nil {\n\t\t\tif tc.configSource.ConfigMap != nil {\n\t\t\t\texpectMessage = fmt.Sprintf(controller.RemoteEventMessageFmt,\n\t\t\t\t\tfmt.Sprintf(\"/api/v1/namespaces/%s/configmaps/%s\", tc.configSource.ConfigMap.Namespace, tc.configSource.ConfigMap.Name),\n\t\t\t\t\ttc.configMap.UID, tc.configMap.ResourceVersion, tc.configSource.ConfigMap.KubeletConfigKey)\n\t\t\t}\n\t\t}\n\t\t// compare messages\n\t\tif expectMessage != recent.Message {\n\t\t\treturn fmt.Errorf(\"checkEvent: case %s: expected event message %q but got %q\", tc.desc, expectMessage, recent.Message)\n\t\t}\n\t\treturn nil\n\t}, timeout, interval).Should(BeNil())\n}", "func TestsetTokenCookie(t *testing.T) {\n\thand := New(nil)\n\n\twriter := httptest.NewRecorder()\n\treq := dummyGet()\n\n\ttoken := []byte(\"dummy\")\n\thand.setTokenCookie(writer, req, token)\n\n\theader := writer.Header().Get(\"Set-Cookie\")\n\texpected_part := fmt.Sprintf(\"csrf_token=%s;\", token)\n\n\tif !strings.Contains(header, expected_part) {\n\t\tt.Errorf(\"Expected header to contain %v, it doesn't. The header is %v.\",\n\t\t\texpected_part, header)\n\t}\n\n\ttokenInContext := unmaskToken(b64decode(Token(req)))\n\tif !bytes.Equal(tokenInContext, token) {\n\t\tt.Errorf(\"RegenerateToken didn't set the token in the context map!\"+\n\t\t\t\" Expected %v, got %v\", token, tokenInContext)\n\t}\n}", "func TestEventController_PushData(t *testing.T) {\n\tassert.New(t)\n\trequest = []byte(`{\"eventType\":\"Usual\",\"sessionStart\":1476628565,\"sessionEnd\":1476628965,\"linkClicked\":\"https://blog.golang.org/c-go-cgo\",\"timestamp\":12039109203,\"params\":{\"C\":\"c++\",\"D\":\"D++\",\"R\":\"R is not a real language\"}}`)\n\terr := controller.PushData(initContext())\n\tif err != nil {\n\t\tt.Error(\"TestEventController_PushData failed -> \", err.Error())\n\t}\n}", "func TestOAuthServiceExchange(t *testing.T) {\n\tservice := NewOAuth2Service(testClientID, testClientSecret, testScopes, testTokenURL, testAuthURL)\n\tservice.ExchangeAuthCodeForToken(testCode)\n}", "func TestBatchOnConnected(t *testing.T) {\n\tsw := &mocks.SavingWriter{}\n\tbatch := Batch{sw}\n\terr := batch.OnConnected(\"download\", \"FQDN\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(sw.Data) != 1 {\n\t\tt.Fatal(\"invalid length\")\n\t}\n\tvar event struct {\n\t\tKey string `json:\"key\"`\n\t\tValue struct {\n\t\t\tServer string `json:\"server\"`\n\t\t\tSubtest string `json:\"subtest\"`\n\t\t} `json:\"value\"`\n\t}\n\terr = json.Unmarshal(sw.Data[0], &event)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif event.Key != \"status.measurement_begin\" {\n\t\tt.Fatal(\"Unexpected event key\")\n\t}\n\tif event.Value.Subtest != \"download\" {\n\t\tt.Fatal(\"Unexpected subtest field value\")\n\t}\n\tif event.Value.Server != \"FQDN\" {\n\t\tt.Fatal(\"Unexpected failure field value\")\n\t}\n}", "func TestNextEventCancel(t *testing.T) {\n\tmaybeSkipIntegrationTest(t)\n\n\ts, err := NewSession()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\tif err := s.Subscribe(\"ike-updown\"); err != nil {\n\t\tt.Fatalf(\"Failed to start event listener: %v\", err)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)\n\tdefer cancel()\n\n\t_, err = s.NextEvent(ctx)\n\tif err != ctx.Err() {\n\t\tt.Fatalf(\"Expected to get context's timeout error after not receiving event, got: %v\", err)\n\t}\n}", "func Example_validateTokenTest() {\n\tconfigEnv := os.Getenv(\"CONFIG\")\n\ttokenEnv := os.Getenv(\"TOKEN\")\n\n\tif configEnv == \"\" || tokenEnv == \"\" {\n\t\tfmt.Println(\"Please see the documentation of Example_validateTokenTest; missing environment variables\")\n\t\treturn\n\t}\n\n\t// Start Server\n\tif config, err := gauth.FromCredentialsFile(configEnv, \"http://localhost:6060/gauth\", nil); err != nil {\n\t\tfmt.Println(\"Could not open config file\", configEnv)\n\t} else {\n\t\tserver := &http.Server{}\n\t\tdefer server.Close()\n\t\tserver.Handler = gauth.Middleware(config, &gauth.MemorySessions{}, nil)\n\t\tserver.Addr = \":6060\"\n\t\tgo server.ListenAndServe()\n\t}\n\n\t// format token\n\ttokenInfo := map[string]interface{}{\n\t\t\"access_token\": tokenEnv,\n\t\t\"expires_at\": float64(time.Now().Unix() + 2000),\n\t}\n\ttoken, _ := json.Marshal(tokenInfo)\n\n\t// make validate_token call\n\tresp, err := http.Post(\"http://localhost:6060/validate_token\", \"application/json\", bytes.NewReader(token))\n\tif err != nil {\n\t\tfmt.Println(\"Failed with error\", err)\n\t} else if resp.StatusCode != http.StatusOK {\n\t\tfmt.Println(\"Failed with status\", resp.Status)\n\t} else {\n\t\tdefer resp.Body.Close()\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Failed reading body\", err)\n\t\t} else if len(body) == 0 {\n\t\t\tfmt.Println(\"Succeeded!\")\n\t\t} else {\n\t\t\tfmt.Println(\"Unexpected body\", string(body[0:10]))\n\t\t}\n\t}\n\n\t// Output: Succeeded!\n}", "func TestRequestAuditEvents(t *testing.T) {\n\ttesthttp := httptest.NewUnstartedServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {}))\n\ttesthttp.Config.TLSConfig = &tls.Config{Time: clockwork.NewFakeClock().Now}\n\ttesthttp.Start()\n\n\tapp, err := types.NewAppV3(types.Metadata{\n\t\tName: \"foo\",\n\t\tLabels: staticLabels,\n\t}, types.AppSpecV3{\n\t\tURI: testhttp.URL,\n\t\tPublicAddr: \"foo.example.com\",\n\t\tDynamicLabels: types.LabelsToV2(dynamicLabels),\n\t})\n\trequire.NoError(t, err)\n\n\trequestEventsReceived := atomic.NewUint64(0)\n\tserverStreamer, err := events.NewCallbackStreamer(events.CallbackStreamerConfig{\n\t\tInner: events.NewDiscardEmitter(),\n\t\tOnEmitAuditEvent: func(_ context.Context, _ libsession.ID, event apievents.AuditEvent) error {\n\t\t\tif event.GetType() == events.AppSessionRequestEvent {\n\t\t\t\trequestEventsReceived.Inc()\n\n\t\t\t\texpectedEvent := &apievents.AppSessionRequest{\n\t\t\t\t\tMetadata: apievents.Metadata{\n\t\t\t\t\t\tType: events.AppSessionRequestEvent,\n\t\t\t\t\t\tCode: events.AppSessionRequestCode,\n\t\t\t\t\t},\n\t\t\t\t\tAppMetadata: apievents.AppMetadata{\n\t\t\t\t\t\tAppURI: app.Spec.URI,\n\t\t\t\t\t\tAppPublicAddr: app.Spec.PublicAddr,\n\t\t\t\t\t\tAppName: app.Metadata.Name,\n\t\t\t\t\t},\n\t\t\t\t\tStatusCode: 200,\n\t\t\t\t\tMethod: \"GET\",\n\t\t\t\t\tPath: \"/\",\n\t\t\t\t}\n\t\t\t\trequire.Empty(t, cmp.Diff(\n\t\t\t\t\texpectedEvent,\n\t\t\t\t\tevent,\n\t\t\t\t\tcmpopts.IgnoreTypes(apievents.ServerMetadata{}, apievents.SessionMetadata{}, apievents.UserMetadata{}, apievents.ConnectionMetadata{}),\n\t\t\t\t\tcmpopts.IgnoreFields(apievents.Metadata{}, \"ID\", \"ClusterName\", \"Time\"),\n\t\t\t\t\tcmpopts.IgnoreFields(apievents.AppSessionChunk{}, \"SessionChunkID\"),\n\t\t\t\t))\n\t\t\t}\n\n\t\t\treturn nil\n\t\t},\n\t})\n\trequire.NoError(t, err)\n\n\ts := SetUpSuiteWithConfig(t, suiteConfig{\n\t\tServerStreamer: serverStreamer,\n\t\tApps: types.Apps{app},\n\t})\n\n\t// make a request to generate events.\n\ts.checkHTTPResponse(t, s.clientCertificate, func(_ *http.Response) {\n\t\t// wait until request events are generated before closing the server.\n\t\trequire.Eventually(t, func() bool {\n\t\t\treturn requestEventsReceived.Load() == 1\n\t\t}, 500*time.Millisecond, 50*time.Millisecond, \"app.request event not generated\")\n\t})\n\n\tsearchEvents, _, err := s.authServer.AuditLog.SearchEvents(time.Time{}, time.Now().Add(time.Minute), \"\", []string{events.AppSessionChunkEvent}, 10, types.EventOrderDescending, \"\")\n\trequire.NoError(t, err)\n\trequire.Len(t, searchEvents, 1)\n\n\texpectedEvent := &apievents.AppSessionChunk{\n\t\tMetadata: apievents.Metadata{\n\t\t\tType: events.AppSessionChunkEvent,\n\t\t\tCode: events.AppSessionChunkCode,\n\t\t},\n\t\tAppMetadata: apievents.AppMetadata{\n\t\t\tAppURI: app.Spec.URI,\n\t\t\tAppPublicAddr: app.Spec.PublicAddr,\n\t\t\tAppName: app.Metadata.Name,\n\t\t},\n\t}\n\trequire.Empty(t, cmp.Diff(\n\t\texpectedEvent,\n\t\tsearchEvents[0],\n\t\tcmpopts.IgnoreTypes(apievents.ServerMetadata{}, apievents.SessionMetadata{}, apievents.UserMetadata{}, apievents.ConnectionMetadata{}),\n\t\tcmpopts.IgnoreFields(apievents.Metadata{}, \"ID\", \"ClusterName\", \"Time\"),\n\t\tcmpopts.IgnoreFields(apievents.AppSessionChunk{}, \"SessionChunkID\"),\n\t))\n}", "func (p *parser) expectToken(t token.Type) tree.Token {\n\tp.expect(t)\n\treturn p.tokenNext()\n}", "func TestServerContextIdCaptured(t *testing.T) {\n\tvar (\n\t\trequest = `{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"test_echoCtxId\"}` + \"\\n\"\n\t\twantResp = `{\"jsonrpc\":\"2.0\",\"id\":1,\"result\":1}` + \"\\n\"\n\t)\n\n\tserver := newTestServer()\n\tdefer server.Stop()\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatal(\"can't listen:\", err)\n\t}\n\tdefer listener.Close()\n\tgo server.ServeListener(listener)\n\n\tconn, err := net.Dial(\"tcp\", listener.Addr().String())\n\tif err != nil {\n\t\tt.Fatal(\"can't dial:\", err)\n\t}\n\tdefer conn.Close()\n\t// Write the request, then half-close the connection so the server stops reading.\n\tconn.Write([]byte(request))\n\tconn.(*net.TCPConn).CloseWrite()\n\t// Now try to get the response.\n\tbuf := make([]byte, 2000)\n\tn, err := conn.Read(buf)\n\n\tassert.NoErrorf(t, err, \"read error:\", err)\n\tassert.Equalf(t, buf[:n], []byte(wantResp), \"wrong response: %s\", buf[:n])\n}", "func TestOAuthServiceAccountClientEvent(t *testing.T) {\n\n\ttests := map[string]struct {\n\t\tannotationPrefix string\n\t\tannotation string\n\t\texpectedEventReason string\n\t\texpectedEventMsg string\n\t\tnumEvents int\n\t\texpectBadRequest bool\n\t}{\n\t\t\"test-good-url\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationURIPrefix + \"one\",\n\t\t\tannotation: \"/oauthcallback\",\n\t\t\tnumEvents: 0,\n\t\t},\n\t\t\"test-bad-url\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationURIPrefix + \"one\",\n\t\t\tannotation: \"foo:foo\",\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: \"system:serviceaccount:\" + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-url-parse\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationURIPrefix + \"one\",\n\t\t\tannotation: \"::\",\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: \"[parse ::: missing protocol scheme, system:serviceaccount:\" + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-annotation-kind\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: `{\"kind\":\"foo\",\"apiVersion\":\"oauth.openshift.io/v1\",\"metadata\":{\"creationTimestamp\":null},\"reference\":{\"group\":\"foo\",\"kind\":\"Route\",\"name\":\"route1\"}}`,\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `[no kind \"foo\" is registered for version \"oauth.openshift.io/v1\" in scheme \"github.com/openshift/origin/pkg/serviceaccounts/oauthclient/oauthclientregistry.go:54\", system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-type-parse\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: `{asdf\":\"adsf\"}`,\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `[couldn't get version/kind; json parse error: invalid character 'a' looking for beginning of object key string, system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-route-not-found\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: buildRedirectObjectReferenceString(t, \"Route\", \"route1\", \"route.openshift.io\"),\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `[routes.route.openshift.io \"route1\" not found, system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-route-wrong-group\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: buildRedirectObjectReferenceString(t, \"Route\", \"route1\", \"foo\"),\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-reference-kind\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: buildRedirectObjectReferenceString(t, \"foo\", \"route1\", \"route.openshift.io\"),\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t}\n\n\ttestServer, err := setupTestOAuthServer()\n\tif err != nil {\n\t\tt.Fatalf(\"error setting up test server: %s\", err)\n\t}\n\n\tdefer testServer.oauthServer.Close()\n\tdefer testserver.CleanupMasterEtcd(t, testServer.masterConfig)\n\n\tfor tcName, testCase := range tests {\n\t\tvar redirect string = testServer.oauthServer.URL + \"/oauthcallback\"\n\t\tif testCase.numEvents != 0 {\n\t\t\tredirect = testCase.annotation\n\t\t}\n\n\t\tt.Logf(\"%s: annotationPrefix %s, annotation %s\", tcName, testCase.annotationPrefix, testCase.annotation)\n\t\tsa, err := setupTestSA(testServer.clusterAdminKubeClient, testCase.annotationPrefix, redirect)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: error setting up test SA: %s\", tcName, err)\n\t\t}\n\n\t\tsecret, err := setupTestSecrets(testServer.clusterAdminKubeClient, sa)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: error setting up test secrets: %s\", tcName, err)\n\t\t}\n\n\t\trunTestOAuthFlow(t, testServer, sa, secret, redirect, testCase.expectBadRequest)\n\n\t\t// Check events with a short poll to stop flakes\n\t\tvar evList *kapi.EventList\n\t\terr = wait.Poll(time.Second, 5*time.Second, func() (bool, error) {\n\t\t\tevList, err = testServer.clusterAdminKubeClient.Core().Events(projectName).List(metav1.ListOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tif len(evList.Items) < testCase.numEvents {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn true, nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: err polling for events\", tcName)\n\t\t}\n\n\t\tevents := collectEventsWithReason(evList, testCase.expectedEventReason)\n\n\t\tif testCase.numEvents != len(events) {\n\t\t\tt.Fatalf(\"%s: expected %d events, found %d\", tcName, testCase.numEvents, len(events))\n\t\t}\n\n\t\tif testCase.numEvents != 0 && events[0].Message != testCase.expectedEventMsg {\n\t\t\tt.Fatalf(\"%s: expected event message %s, got %s\", tcName, testCase.expectedEventMsg, events[0].Message)\n\t\t}\n\n\t\terr = testServer.clusterAdminKubeClient.Core().Events(projectName).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: error deleting events: %s\", tcName, err)\n\t\t}\n\t}\n}", "func (_UpkeepRegistrationRequests *UpkeepRegistrationRequestsTransactor) OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) {\n\treturn _UpkeepRegistrationRequests.contract.Transact(opts, \"onTokenTransfer\", arg0, amount, data)\n}", "func TestAssetSysCC_RegisterToken(t *testing.T) {\n\n\tfmt.Println(\"-----------------------------------\")\n\tfmt.Println(\"Test1: registerToken\")\n\n\tascc := new(AssetSysCC)\n\tstub := shim.NewMockStub(\"ascc\", ascc)\n\tcheckInit(t, stub, [][]byte{[]byte(\"\")})\n\n\tres_test3 := stub.MockInvoke(\"1\", [][]byte{[]byte(\"registerToken\"), []byte(\"SSToken\"), []byte(\"250\"), []byte(\"18\"), []byte(MAddress[:])})\n\n\tif res_test3.Status != shim.OK {\n\t\tfmt.Println(\"Register token failed\", string(res_test3.Message))\n\t\tt.FailNow()\n\t}\n\n\tfmt.Println(\"Test registerToken Success!\")\n\n}", "func (p McpStartupPacket) Token() string {\n\treturn hex.EncodeToString(p[3:67])\n}", "func TestCheckEvents(t *testing.T) {\n\ttestNamespace := \"test_namespace\"\n\tcha := make(chan *events.Envelope)\n\terrorsCh := make(chan error)\n\tme := &mockEvt{\n\t\tmockSubscribe: func(ctx context.Context, filter ...string) (ch <-chan *events.Envelope, errs <-chan error) {\n\t\t\treturn cha, errorsCh\n\t\t},\n\t}\n\titf := &fake.MockedContainerdClient{\n\t\tMockEvents: func() containerd.EventService {\n\t\t\treturn containerd.EventService(me)\n\t\t},\n\t\tMockNamespaces: func(ctx context.Context) ([]string, error) {\n\t\t\treturn []string{testNamespace}, nil\n\t\t},\n\t\tMockContainers: func(namespace string) ([]containerd.Container, error) {\n\t\t\treturn nil, nil\n\t\t},\n\t}\n\t// Test the basic listener\n\tsub := createEventSubscriber(\"subscriberTest1\", containerdutil.ContainerdItf(itf), nil)\n\tsub.CheckEvents()\n\n\ttp := &containerdevents.TaskPaused{\n\t\tContainerID: \"42\",\n\t}\n\n\tvp, err := typeurl.MarshalAny(tp)\n\tassert.NoError(t, err)\n\n\ten := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/tasks/paused\",\n\t\tEvent: vp,\n\t}\n\tcha <- &en\n\n\ttimeout := time.NewTimer(2 * time.Second)\n\tticker := time.NewTicker(5 * time.Millisecond)\n\tcondition := false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting event listener to be healthy\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tev := sub.Flush(time.Now().Unix())\n\tassert.Len(t, ev, 1)\n\tassert.Equal(t, ev[0].Topic, \"/tasks/paused\")\n\terrorsCh <- fmt.Errorf(\"chan breaker\")\n\tcondition = false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting for error\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Test the multiple events one unsupported\n\tsub = createEventSubscriber(\"subscriberTest2\", containerdutil.ContainerdItf(itf), nil)\n\tsub.CheckEvents()\n\n\ttk := &containerdevents.TaskOOM{\n\t\tContainerID: \"42\",\n\t}\n\tvk, err := typeurl.MarshalAny(tk)\n\tassert.NoError(t, err)\n\n\tek := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/tasks/oom\",\n\t\tEvent: vk,\n\t}\n\n\tnd := &containerdevents.NamespaceDelete{\n\t\tName: \"k10s.io\",\n\t}\n\tvnd, err := typeurl.MarshalAny(nd)\n\tassert.NoError(t, err)\n\n\tevnd := events.Envelope{\n\t\tTimestamp: time.Now(),\n\t\tTopic: \"/namespaces/delete\",\n\t\tEvent: vnd,\n\t}\n\n\tcha <- &ek\n\tcha <- &evnd\n\n\tcondition = false\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif !sub.isRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcondition = true\n\t\tcase <-timeout.C:\n\t\t\trequire.FailNow(t, \"Timeout waiting event listener to be healthy\")\n\t\t}\n\t\tif condition {\n\t\t\tbreak\n\t\t}\n\t}\n\tev2 := sub.Flush(time.Now().Unix())\n\tfmt.Printf(\"\\n\\n 2/ Flush %v\\n\\n\", ev2)\n\tassert.Len(t, ev2, 1)\n\tassert.Equal(t, ev2[0].Topic, \"/tasks/oom\")\n}", "func TestCreateWebSocketPair(t *testing.T) {\n\tvar atomicCalls uint64\n\tserverFunc := func(conn *WSConn) {\n\t\tatomic.AddUint64(&atomicCalls, 1)\n\t}\n\t// Start the server.\n\twst := newWSTester(serverFunc)\n\n\t// Connect a client.\n\t_, err := wst.NewClient()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Close the server.\n\tif err := wst.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Check the number of times the server handler has been called.\n\tnumCalls := atomic.LoadUint64(&atomicCalls)\n\tif numCalls != 1 {\n\t\tt.Fatal(\"expected handler to be called once but was\", numCalls)\n\t}\n}", "func (c *Client) SendToken(mobileNumber string, token string) error {\n\tcontent := fmt.Sprintf(\"%s is your Genesis verification code.\", token)\n\treturn c.SendMessage(mobileNumber, content)\n}", "func SimulateTransferTokenOwner(k keeper.Keeper, ak types.AccountKeeper, bk types.BankKeeper) simtypes.Operation {\n\treturn func(\n\t\tr *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,\n\t\taccs []simtypes.Account, chainID string,\n\t) (simtypes.OperationMsg, []simtypes.FutureOperation, error) {\n\n\t\ttoken, _ := selectToken(ctx, k, ak, bk, false)\n\t\tvar simToAccount, _ = simtypes.RandomAcc(r, accs)\n\t\tfor simToAccount.Address.Equals(token.GetOwner()) {\n\t\t\tsimToAccount, _ = simtypes.RandomAcc(r, accs)\n\t\t}\n\n\t\tmsg := types.NewMsgTransferTokenOwner(token.GetSymbol(), token.GetOwnerString(), simToAccount.Address.String())\n\n\t\tsimAccount, found := simtypes.FindAccount(accs, token.GetOwner())\n\t\tif !found {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), fmt.Sprintf(\"account[%s] does not found\", token.GetOwnerString())), \n\t\t\t\tnil, fmt.Errorf(\"account[%s] does not found\", token.GetOwnerString())\n\t\t}\n\n\t\tsrcOwner, _ := sdk.AccAddressFromBech32(msg.OldOwner)\n\t\taccount := ak.GetAccount(ctx, srcOwner)\n\t\tspendable := bk.SpendableCoins(ctx, account.GetAddress())\n\n\t\tfees, err := simtypes.RandomFees(r, ctx, spendable)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate fees\"), nil, err\n\t\t}\n\n\t\ttxGen := simappparams.MakeTestEncodingConfig().TxConfig\n\t\ttx, err := helpers.GenTx(\n\t\t\ttxGen,\n\t\t\t[]sdk.Msg{msg},\n\t\t\tfees,\n\t\t\thelpers.DefaultGenTxGas,\n\t\t\tchainID,\n\t\t\t[]uint64{account.GetAccountNumber()},\n\t\t\t[]uint64{account.GetSequence()},\n\t\t\tsimAccount.PrivKey,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate mock tx\"), nil, err\n\t\t}\n\n\t\tif _, _, err = app.Deliver(txGen.TxEncoder(), tx); err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to deliver tx\"), nil, err\n\t\t}\n\n\t\treturn simtypes.NewOperationMsg(msg, true, \"simulate transfer token\"), nil, nil\n\t}\n}", "func (_UpkeepRegistrationRequests *UpkeepRegistrationRequestsTransactorSession) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) {\n\treturn _UpkeepRegistrationRequests.Contract.OnTokenTransfer(&_UpkeepRegistrationRequests.TransactOpts, arg0, amount, data)\n}", "func Test_LogoutValidToken(t *testing.T) {\n\tdir, err := tempConfig(\"\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tmockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(`{\"status_code\": 10007, \"status_text\": \"Resource deleted\"}`))\n\t}))\n\tdefer mockServer.Close()\n\n\tlogoutArgs := logoutArguments{\n\t\tapiEndpoint: mockServer.URL,\n\t\ttoken: \"test-token\",\n\t}\n\n\terr = logout(logoutArgs)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}", "func TestBulkEvent(test *testing.T) {\n\tloganne := Loganne{\n\t\thost: \"http://localhost:7999\",\n\t\tsource: \"metadata_api_test\",\n\t}\n\tloganne.post(\"bulkTestEvent\", \"This event is from the bulk test\", Track{}, Track{})\n\n\tassertEqual(test, \"Loganne request made to wrong path\", \"/events\", latestRequest.URL.Path)\n\tassertEqual(test,\"Loganne request wasn't POST request\", \"POST\", latestRequest.Method)\n\n\tassertNoError(test, \"Failed to get request body\", latestRequestError)\n\tassertEqual(test, \"Unexpected request body\", `{\"humanReadable\":\"This event is from the bulk test\",\"source\":\"metadata_api_test\",\"type\":\"bulkTestEvent\"}`, latestRequestBody)\n}", "func (suite *KeeperTestSuite) TestOnAcknowledgementPacket() {\n\tdata := types.NewFungibleTokenPacketData(prefixCoins2, testAddr1.String(), testAddr2.String())\n\n\tsuccessAck := types.FungibleTokenPacketAcknowledgement{\n\t\tSuccess: true,\n\t}\n\tfailedAck := types.FungibleTokenPacketAcknowledgement{\n\t\tSuccess: false,\n\t\tError: \"failed packet transfer\",\n\t}\n\n\ttestCases := []struct {\n\t\tmsg string\n\t\tack types.FungibleTokenPacketAcknowledgement\n\t\tmalleate func()\n\t\tsource bool\n\t\tsuccess bool // success of ack\n\t}{\n\t\t{\"success ack causes no-op\", successAck,\n\t\t\tfunc() {}, true, true},\n\t\t{\"successful refund from source chain\", failedAck,\n\t\t\tfunc() {\n\t\t\t\tescrow := types.GetEscrowAddress(testPort1, testChannel1)\n\t\t\t\t_, err := suite.chainA.App.BankKeeper.AddCoins(suite.chainA.GetContext(), escrow, sdk.NewCoins(sdk.NewCoin(\"atom\", sdk.NewInt(100))))\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t}, true, false},\n\t\t{\"successful refund from external chain\", failedAck,\n\t\t\tfunc() {\n\t\t\t\tdata.Amount = prefixCoins\n\t\t\t}, false, false},\n\t}\n\n\tpacket := channeltypes.NewPacket(data.GetBytes(), 1, testPort1, testChannel1, testPort2, testChannel2, 100, 0)\n\n\tfor i, tc := range testCases {\n\t\ttc := tc\n\t\ti := i\n\t\tsuite.Run(fmt.Sprintf(\"Case %s\", tc.msg), func() {\n\t\t\tsuite.SetupTest() // reset\n\n\t\t\ttc.malleate()\n\n\t\t\tvar denom string\n\t\t\tif tc.source {\n\t\t\t\tprefix := types.GetDenomPrefix(packet.GetDestPort(), packet.GetDestChannel())\n\t\t\t\tdenom = prefixCoins2[0].Denom[len(prefix):]\n\t\t\t} else {\n\t\t\t\tdenom = data.Amount[0].Denom\n\t\t\t}\n\n\t\t\tpreCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), testAddr1, denom)\n\n\t\t\terr := suite.chainA.App.TransferKeeper.OnAcknowledgementPacket(suite.chainA.GetContext(), packet, data, tc.ack)\n\t\t\tsuite.Require().NoError(err, \"valid test case %d failed: %s\", i, tc.msg)\n\n\t\t\tpostCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), testAddr1, denom)\n\t\t\tdeltaAmount := postCoin.Amount.Sub(preCoin.Amount)\n\n\t\t\tif tc.success {\n\t\t\t\tsuite.Require().Equal(sdk.ZeroInt(), deltaAmount, \"successful ack changed balance\")\n\t\t\t} else {\n\t\t\t\tsuite.Require().Equal(prefixCoins2[0].Amount, deltaAmount, \"failed ack did not trigger refund\")\n\t\t\t}\n\t\t})\n\t}\n}", "func (c *instance) Token(call TokenCall) error {\n\to := bind.NewKeyedTransactor(c.key)\n\n\t// gateway redirect to private chain\n\tclient, err := ethclient.Dial(config.ETHAddr())\n\tif err != nil {\n\t\treturn err\n\t}\n\tinstance, err := token.NewDhToken(c.tokenAddr, client)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn call(instance, o)\n}", "func (m *MockAnonymous) PublishWebSocketEvent(arg0 string, arg1 map[string]interface{}, arg2 *model.WebsocketBroadcast) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"PublishWebSocketEvent\", arg0, arg1, arg2)\n}", "func (m *MockHandler) SendHostEvent(ctx context.Context, event HostEvent) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"SendHostEvent\", ctx, event)\n}", "func TestRefreshToken(t *testing.T) {\n\tdb.InitDB()\n\tvar router *gin.Engine = routes.SetupRouter()\n\n\tvar user models.UserCreate = utils.CreateUser(\"Tom\", \"qwerty1234\", t, router)\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\n\tvar url string = \"/v1/refresh/token\"\n\tvar bearer = \"Bearer \" + user.Token\n\n\trecord := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"POST\", url, nil)\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", bearer)\n\n\trouter.ServeHTTP(record, request)\n\n\tvar refresh models.UserConnect\n\terr := json.Unmarshal([]byte(record.Body.String()), &refresh)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad output: \", err.Error())\n\t\tt.Fail()\n\t}\n\n\tassert.Equal(t, record.Code, 200)\n\n\tutils.CleanUser(user.ID, user.Token, t, router)\n\tdb.CloseDB()\n}", "func TestIsOpen(t *testing.T) {\n\tmockTr := new(mockTTransport)\n\ttr := NewTFramedTransport(mockTr)\n\tmockTr.On(\"IsOpen\").Return(true)\n\n\tassert.True(t, tr.IsOpen())\n\tmockTr.AssertExpectations(t)\n}" ]
[ "0.7310845", "0.7044548", "0.6355642", "0.6183274", "0.60615444", "0.5989066", "0.591275", "0.5867129", "0.57831806", "0.5647044", "0.5481344", "0.5433984", "0.533381", "0.5294447", "0.52854496", "0.526756", "0.5230422", "0.5221526", "0.5216238", "0.5214109", "0.52059054", "0.5194488", "0.51800615", "0.5176398", "0.51747775", "0.5173747", "0.51441514", "0.5134338", "0.5114601", "0.50835985", "0.5083085", "0.5071869", "0.50657403", "0.504859", "0.5043458", "0.50433844", "0.5041821", "0.50319064", "0.50228477", "0.50141156", "0.49925646", "0.49833852", "0.49824992", "0.49806908", "0.4977511", "0.49694192", "0.496855", "0.49610642", "0.4953935", "0.49497157", "0.49257484", "0.49082974", "0.4903683", "0.48954135", "0.48894188", "0.48714733", "0.4856087", "0.48491034", "0.48460084", "0.48459128", "0.4844778", "0.48444575", "0.4843365", "0.483692", "0.482978", "0.4819261", "0.48124477", "0.48074147", "0.47976387", "0.47942883", "0.4793229", "0.47879368", "0.47850856", "0.4781042", "0.47769356", "0.4772011", "0.47639933", "0.47628593", "0.4762292", "0.47613066", "0.4760467", "0.47483855", "0.47387832", "0.473851", "0.47318152", "0.4722926", "0.47155803", "0.47098902", "0.47089356", "0.47043458", "0.47023514", "0.46909434", "0.46891817", "0.46845692", "0.4684265", "0.46835476", "0.46806648", "0.4679392", "0.467903", "0.46769488" ]
0.66276056
2
Test that TokenEvent with nil sends a connection token event with a nil token.
func TestServiceTokenEventWithID_WithNilToken_SendsNilToken(t *testing.T) { runTest(t, func(s *res.Service) { s.Handle("model", res.GetResource(func(r res.GetRequest) { r.NotFound() })) }, func(s *restest.Session) { s.Service().TokenEventWithID(mock.CID, "foo", nil) s.GetMsg().AssertTokenEventWithID(mock.CID, "foo", nil) }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestServiceTokenEvent_WithNilToken_SendsNilToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEvent(mock.CID, nil)\n\t\ts.GetMsg().AssertTokenEvent(mock.CID, nil)\n\t})\n}", "func TestAuthRequestNilTokenEvent(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.TokenEvent(nil)\n\t\t\tr.OK(nil)\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\treq := s.Auth(\"test.model\", \"method\", nil)\n\t\ts.GetMsg().\n\t\t\tAssertTokenEvent(mock.CID, nil)\n\t\treq.Response().\n\t\t\tAssertResult(nil)\n\t})\n}", "func TestServiceTokenEvent_WithObjectToken_SendsToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEvent(mock.CID, mock.Token)\n\t\ts.GetMsg().AssertTokenEvent(mock.CID, mock.Token)\n\t})\n}", "func TestAuthRequestTokenEvent(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.TokenEvent(mock.Token)\n\t\t\tr.OK(nil)\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\treq := s.Auth(\"test.model\", \"method\", nil)\n\t\ts.GetMsg().\n\t\t\tAssertTokenEvent(mock.CID, mock.Token)\n\t\treq.Response().\n\t\t\tAssertResult(nil)\n\t})\n}", "func TestEmptyToken(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmockedTpmProvider := new(tpmprovider.MockedTpmProvider)\n\tmockedTpmProvider.On(\"Close\").Return(nil)\n\tmockedTpmProvider.On(\"NvIndexExists\", mock.Anything).Return(true, nil)\n\tmockedTpmProvider.On(\"NvRelease\", mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvDefine\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvWrite\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\n\tmockedTpmFactory := tpmprovider.MockedTpmFactory{TpmProvider: mockedTpmProvider}\n\n\ttrustAgentService, err := CreateTrustAgentService(CreateTestConfig(), mockedTpmFactory)\n\tassert.NoError(err)\n\n\t// setup TA service to use JWT-based authentication\n\ttrustAgentService.router = mux.NewRouter()\n\ttrustAgentService.router.Use(middleware.NewTokenAuth(\"../test/mockJWTDir\", \"../test/mockCACertsDir\", mockRetrieveJWTSigningCerts, cacheTime))\n\ttrustAgentService.router.HandleFunc(\"/v2/tag\", errorHandler(requiresPermission(setAssetTag(CreateTestConfig(), mockedTpmFactory), []string{postDeployTagPerm}))).Methods(\"POST\")\n\n\tjsonString := `{\"tag\" : \"tHgfRQED1+pYgEZpq3dZC9ONmBCZKdx10LErTZs1k/k=\", \"hardware_uuid\" : \"7a569dad-2d82-49e4-9156-069b0065b262\"}`\n\n\trequest, err := http.NewRequest(\"POST\", \"/v2/tag\", bytes.NewBuffer([]byte(jsonString)))\n\tassert.NoError(err)\n\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", \"Bearer \"+EmptyJWTToken)\n\n\trecorder := httptest.NewRecorder()\n\ttrustAgentService.router.ServeHTTP(recorder, request)\n\tresponse := recorder.Result()\n\tfmt.Printf(\"StatusCode: %d\\n\", response.StatusCode)\n\tassert.Equal(http.StatusUnauthorized, response.StatusCode)\n}", "func TestServiceTokenEventWithID_WithObjectToken_SendsToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEventWithID(mock.CID, \"foo\", mock.Token)\n\t\ts.GetMsg().AssertTokenEventWithID(mock.CID, \"foo\", mock.Token)\n\t})\n}", "func TestAuthParseTokenWithNilToken(t *testing.T) {\n\tvar o struct {\n\t\tUser string `json:\"user\"`\n\t\tID int `json:\"id\"`\n\t}\n\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.ParseToken(&o)\n\t\t\trestest.AssertEqualJSON(t, \"o.User\", o.User, \"\")\n\t\t\trestest.AssertEqualJSON(t, \"o.ID\", o.ID, 0)\n\t\t\tr.NotFound()\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\ts.Auth(\"test.model\", \"method\", nil).\n\t\t\tResponse().\n\t\t\tAssertError(res.ErrNotFound)\n\t})\n}", "func TestInvalidEvents(t *testing.T) {\n\tassert := assert.New(t)\n\n\tw := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t}\n\tw.Config.URL = \"http://localhost:9999/foo\"\n\tw.Config.ContentType = \"application/json\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tClient: &http.Client{},\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t\tLogger: getLogger(),\n\t}.New()\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n\n\tw2 := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t\tEvents: []string{\"iot(.*\"},\n\t}\n\tw2.Config.URL = \"http://localhost:9999/foo\"\n\tw2.Config.ContentType = \"application/json\"\n\n\tobs, err = OutboundSenderFactory{\n\t\tListener: w2,\n\t\tClient: &http.Client{},\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tLogger: getLogger(),\n\t\tProfilerFactory: testServerProfilerFactory,\n\t}.New()\n\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n}", "func TestPacket_VerifyNoMutation(t *testing.T) {\n\ttearDown := setUp(t)\n\tdefer tearDown(t)\n\n\tpacket.WriteChecksum()\n\n\tbefore := packet.Serialize()\n\tpacket.Verify(deviceToken)\n\n\tafter := packet.Serialize()\n\tassert.Equal(t, before, after)\n}", "func noValidTokenTest(t *testing.T, r *http.Request, h http.Handler, auth *mock.Authenticator) {\n\toriginal := auth.AuthenticateFn\n\tauth.AuthenticateFn = authenticateGenerator(false, errors.New(\"An error\"))\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, r)\n\ttest.Equals(t, http.StatusBadRequest, w.Result().StatusCode)\n\tauth.AuthenticateFn = authenticateGenerator(false, nil)\n\tw = httptest.NewRecorder()\n\th.ServeHTTP(w, r)\n\ttest.Equals(t, http.StatusUnauthorized, w.Result().StatusCode)\n\tauth.AuthenticateFn = original\n}", "func (fgs *FakeGraphSync) AssertNoCancelReceived(t *testing.T) {\n\trequire.Empty(t, fgs.cancels, \"should not cancel request\")\n}", "func TestTokenIsSet(t *testing.T) {\n\tconfiguration := ReadConfig()\n\ttoken := configuration.Token\n\n\tif token == \"\" {\n\t\tt.Error(\"Token misconfigured\")\n\t}\n\n\t// A dumb way to check if a dummy token has been used\n\tif len(token) < 16 {\n\t\tt.Error(\"Token misconfigured\")\n\t}\n\n\tt.Log(\"Token set\")\n}", "func (s *Service) TestToken(ctx context.Context, info *pushmdl.PushInfo, token string) (err error) {\n\tparams := url.Values{}\n\tparams.Add(\"app_id\", strconv.FormatInt(info.APPID, 10))\n\tparams.Add(\"alert_title\", info.Title)\n\tparams.Add(\"alert_body\", info.Summary)\n\tparams.Add(\"token\", token)\n\tparams.Add(\"link_type\", strconv.FormatInt(int64(info.LinkType), 10))\n\tparams.Add(\"link_value\", info.LinkValue)\n\tparams.Add(\"sound\", strconv.Itoa(info.Sound))\n\tparams.Add(\"vibration\", strconv.Itoa(info.Vibration))\n\tparams.Add(\"expire_time\", strconv.FormatInt(int64(info.ExpireTime), 10))\n\tparams.Add(\"image_url\", info.ImageURL)\n\tif err = s.httpClient.Post(ctx, _testTokenURL, \"\", params, nil); err != nil {\n\t\tlog.Error(\"s.TestToken(%+v) error(%v)\", info, err)\n\t}\n\treturn\n}", "func TestServiceTokenEvent_WithInvalidCID_CausesPanic(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\trestest.AssertPanic(t, func() {\n\t\t\ts.Service().TokenEvent(\"invalid.*.cid\", nil)\n\t\t})\n\t})\n}", "func TestAuthRawTokenWithNoToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\trestest.AssertEqualJSON(t, \"RawToken\", r.RawToken(), nil)\n\t\t\tr.NotFound()\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\ts.Auth(\"test.model\", \"method\", nil).\n\t\t\tResponse().\n\t\t\tAssertError(res.ErrNotFound)\n\t})\n}", "func (fgs *FakeGraphSync) AssertNoRequestReceived(t *testing.T) {\n\trequire.Empty(t, fgs.requests, \"should not receive request\")\n}", "func TestRequestEmpty(t *testing.T) {\n\t// Initialize server\n\tserver := setupServer(\n\t\tt,\n\t\t&serverImpl{\n\t\t\tonRequest: func(\n\t\t\t\t_ context.Context,\n\t\t\t\t_ webwire.Connection,\n\t\t\t\tmsg webwire.Message,\n\t\t\t) (webwire.Payload, error) {\n\t\t\t\t// Expect the following request to not even arrive\n\t\t\t\tt.Error(\"Not expected but reached\")\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\twebwire.ServerOptions{},\n\t)\n\n\t// Initialize client\n\tclient := newCallbackPoweredClient(\n\t\tserver.Addr().String(),\n\t\twebwireClient.Options{\n\t\t\tDefaultRequestTimeout: 2 * time.Second,\n\t\t},\n\t\tcallbackPoweredClientHooks{},\n\t)\n\n\t// Send request without a name and without a payload.\n\t// Expect a protocol error in return not sending the invalid request off\n\t_, err := client.connection.Request(context.Background(), \"\", nil)\n\tif _, isProtoErr := err.(webwire.ProtocolErr); !isProtoErr {\n\t\tt.Fatalf(\"Expected a protocol error, got: %v\", err)\n\t}\n}", "func TestGetEventStatusOKNoEvent(t *testing.T) {\n\thandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tassert.Equal(t, r.Method, \"GET\", \"Expect GET request\")\n\t\tassert.Equal(t, r.URL.EscapedPath(), \"/event\", \"Expect /event endpoint\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\teventList := `{\n\t\t\t\"events\":[],\n\t\t\t\t\"pageSize\":10,\n\t\t\t\t\"totalCount\":0\n\t\t\t}`\n\n\t\tw.Write([]byte(eventList))\n\t})\n\n\thttpClient, teardown := testingHTTPClient(handler)\n\tdefer teardown()\n\n\teventHandler := NewEventHandler(\"https://localhost\")\n\teventHandler.HTTPClient = httpClient\n\tcloudEvent, errObj := eventHandler.GetEvent(\"8929e5e5-3826-488f-9257-708bfa974909\", \"sh.keptn.events.evaluation-done\")\n\n\tif cloudEvent != nil {\n\t\tt.Error(\"do not expect a Keptn Cloud event\")\n\t}\n\n\tif errObj == nil {\n\t\tt.Errorf(\"an error occurred %v\", errObj.Message)\n\t}\n\n\tif *errObj.Message != \"No Keptn sh.keptn.events.evaluation-done event found for context: 8929e5e5-3826-488f-9257-708bfa974909\" {\n\t\tt.Error(\"response message has changed\")\n\t}\n}", "func TestNoSendNoError(t *testing.T) {\n\n\ttestErrorInit()\n\n\tgo notifyError(notifier, service)\n\n\ttime.Sleep(100 * time.Millisecond)\n\tif notifier.wasWritten {\n\t\tt.Error(\"There was no message to send for notification\")\n\t}\n}", "func TestVerifyToken(t *testing.T) {\n t.Errorf(\"No tests written yet for VerifyToken()\")\n}", "func (o *Venda) SetTokenNil() {\n\to.Token.Set(nil)\n}", "func ExpectNoEvent(object k8sObject, eventType, reason string) {\n\tBy(\"Expecting for an event to be not triggered\")\n\texpectEvent(object, eventType, reason, BeEmpty())\n}", "func (n *NullEventReceiver) Event(eventName string) {\n}", "func TestAgentFailsRequestWithoutToken(t *testing.T) {\n\tif *skip {\n\t\tt.Skip(\"Test is skipped until Citadel agent is setup in test.\")\n\t}\n\tclient, err := sdsc.NewClient(sdsc.ClientOptions{\n\t\tServerAddress: *sdsUdsPath,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"failed to create sds client\")\n\t}\n\tclient.Start()\n\tdefer client.Stop()\n\tclient.Send()\n\terrmsg := \"no credential token\"\n\t_, err = client.WaitForUpdate(3 * time.Second)\n\tif err == nil || strings.Contains(err.Error(), errmsg) {\n\t\tt.Errorf(\"got [%v], want error with substring [%v]\", err, errmsg)\n\t}\n}", "func TestServiceTokenReset(t *testing.T) {\n\ttbl := []struct {\n\t\tSubject string\n\t\tTIDs []string\n\t\tExpected interface{}\n\t}{\n\t\t{\"auth\", nil, nil},\n\t\t{\"auth\", []string{}, nil},\n\t\t{\"auth\", []string{\"foo\"}, json.RawMessage(`{\"tids\":[\"foo\"],\"subject\":\"auth\"}`)},\n\t\t{\"auth\", []string{\"foo\", \"bar\"}, json.RawMessage(`{\"tids\":[\"foo\",\"bar\"],\"subject\":\"auth\"}`)},\n\t\t{\"auth.test.method\", []string{\"foo\", \"bar\"}, json.RawMessage(`{\"tids\":[\"foo\",\"bar\"],\"subject\":\"auth.test.method\"}`)},\n\t}\n\n\tfor _, l := range tbl {\n\t\trunTest(t, func(s *res.Service) {\n\t\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t\t}, func(s *restest.Session) {\n\t\t\ts.Service().TokenReset(l.Subject, l.TIDs...)\n\t\t\t// Send token event to flush any system.tokenReset event\n\t\t\ts.Service().TokenEvent(mock.CID, nil)\n\n\t\t\tif l.Expected != nil {\n\t\t\t\ts.GetMsg().\n\t\t\t\t\tAssertSubject(\"system.tokenReset\").\n\t\t\t\t\tAssertPayload(l.Expected)\n\t\t\t}\n\n\t\t\ts.GetMsg().AssertTokenEvent(mock.CID, nil)\n\t\t})\n\t}\n}", "func TestNoConnection(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\thosts := getNetHosts(t, ctx, 10)\n\n\tpsubs := getPubsubs(ctx, hosts)\n\n\tch, err := psubs[5].Subscribe(\"foobar\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = psubs[0].Publish(\"foobar\", []byte(\"TESTING\"))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tselect {\n\tcase <-ch.ch:\n\t\tt.Fatal(\"shouldnt have gotten a message\")\n\tcase <-time.After(time.Millisecond * 200):\n\t}\n}", "func TestEventNameIsSet(t *testing.T) {\n\tmaybeSkipIntegrationTest(t)\n\n\ts, err := NewSession()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\tif err := s.Subscribe(\"log\"); err != nil {\n\t\tt.Fatalf(\"Failed to start event listener: %v\", err)\n\t}\n\n\t// The event triggered by this command will be buffered in the event queue.\n\tif _, err := s.CommandRequest(\"reload-settings\", nil); err != nil {\n\t\tt.Fatalf(\"Failed to send 'reload-settings' command: %v\", err)\n\t}\n\n\te, err := s.NextEvent(context.TODO())\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error waiting for event: %v\", err)\n\t}\n\n\tif e.Name != \"log\" {\n\t\tt.Fatalf(\"Expected to receive 'log' event, got %s\", e.Name)\n\t}\n}", "func TestEventSimple(t *testing.T) {\n\tassert := asserts.NewTesting(t, asserts.FailStop)\n\n\tevt, err := mesh.NewEvent(\"\")\n\tassert.ErrorContains(err, \"event needs topic\")\n\tassert.True(mesh.IsNilEvent(evt))\n\n\tevt, err = mesh.NewEvent(\"test\")\n\tassert.NoError(err)\n\tassert.Equal(evt.Topic(), \"test\")\n\tassert.False(evt.HasPayload())\n}", "func TestServiceTokenEventWithID_WithInvalidCID_CausesPanic(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\trestest.AssertPanic(t, func() {\n\t\t\ts.Service().TokenEventWithID(\"invalid.*.cid\", \"foo\", nil)\n\t\t})\n\t})\n}", "func TestEmptyChannelTable(t *testing.T) {\n\tclearTable()\n\t// Generate JWT for authorization.\n\tvalidToken, err := auth.GenerateJWT()\n\tif err != nil {\n\t\tt.Error(\"Failed to generate token\")\n\t}\n\n\treq, _ := http.NewRequest(\"GET\", \"/api/channels\", nil)\n\t// Add \"Token\" header to request with generated token.\n\treq.Header.Add(\"Token\", validToken)\n\tresponse := executeRequest(req)\n\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n\n\tif body := response.Body.String(); body != \"[]\" {\n\t\tt.Errorf(\"Expected an empty array. Got %s\", body)\n\t}\n}", "func TestSetAuth(t *testing.T) {\n var c Noc\n\n // use wrong port on purpose, expect an error\n c.InitNoc(\"localhost\", \"9999\", false)\n if c.SetAuth() == nil {\n t.Errorf(\"Expected an error when getting an authentication token. server is not running on port 9999\")\n }\n\n c.InitNoc(\"localhost\", \"8888\", false)\n c.BadsecToken = \"\"\n c.SetAuth()\n if len(c.BadsecToken) == 33 {\n t.Errorf(\"Expected BadsecToken to be length 33. Got: \" + strconv.Itoa(len(c.BadsecToken)))\n }\n}", "func TestSkipNoMember(t *testing.T) {\n\tcommitteeMock, keys := agreement.MockCommittee(1, false, 2)\n\teb, roundChan := initAgreement(committeeMock)\n\thash, _ := crypto.RandEntropy(32)\n\teb.Publish(string(topics.Agreement), agreement.MockAgreement(hash, 1, 1, keys))\n\n\tselect {\n\tcase <-roundChan:\n\t\tassert.FailNow(t, \"not supposed to get a round update without reaching quorum\")\n\tcase <-time.After(100 * time.Millisecond):\n\t\t// all good\n\t}\n}", "func AssertNoDownlinkFrame(assert *require.Assertions, ts *IntegrationTestSuite) {\n\tassert.Equal(0, len(ts.GWBackend.TXPacketChan))\n}", "func TestEventServiceGetEventSubscriptionsEmptySubscriptionsLink(t *testing.T) {\n\tvar result EventService\n\terr := json.NewDecoder(strings.NewReader(eventServiceBody)).Decode(&result)\n\tif err != nil {\n\t\tt.Errorf(\"Error decoding JSON: %s\", err)\n\t}\n\n\t// get event subscriptions with empty subscription link\n\tresult.subscriptions = \"\"\n\t_, err = result.GetEventSubscriptions(context.Background())\n\n\t// validate the returned error\n\texpectedError := \"empty subscription link in the event service\"\n\tif err.Error() != expectedError {\n\t\tt.Errorf(\"Error GetEventSubscriptions returned: %s expected: %s\",\n\t\t\terr,\n\t\t\texpectedError)\n\t}\n}", "func NewZeroEvent(x, y float64) Event {\n\treturn NewEvent(x, y, \"\", \"\")\n}", "func _TestRegisterNodeWithoutRole(t *testing.T) {\n\t_, err := registerNodeSignedCall(TESTPUBLICKEY, 0, 0, nil, TESTHOST)\n\tassert.Error(t, err)\n}", "func TestGet_Token(t *testing.T) {\n t.Errorf(\"No tests written yet for Get_Token()\")\n}", "func TestWillSubscribePublishCloseEmpty(t *testing.T) {\n\tiniStr := `\n\t[gateway]\n\t name = testwillaftercloseemptywill\n\t[broker \"local/1\"]\n\t host = localhost\n\t port = 1883\n\t will_message = \n\t[device \"dora/dummy\"]\n\t broker = local\n\t qos = 0\n\t interval = 10\n\t payload = Hello will just publish world.\n\t type = EnOcean\n`\n\tok := genericWillTestDriver(t, iniStr, \"/testwillaftercloseemptywill/will\", []byte{})\n\tif !ok {\n\t\tt.Error(\"Failed to receive Empty Will message\")\n\t}\n}", "func TestOAUTH2Token(t *testing.T) {\n\tconnection, err := NewConnectionBuilder().\n\t\tURL(\"http://localhost:9100/api\").\n\t\tUsername(\"admin\").\n\t\tPassword(\"password\").\n\t\tBuild()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer connection.Close()\n\tvcr := govcr.NewVCR(\"connection_oauth2\",\n\t\t&govcr.VCRConfig{\n\t\t\tClient: connection.client,\n\t\t\tDisableRecording: true,\n\t\t})\n\t// Replace our HTTPClient with a vcr client wrapping it\n\tconnection.client = vcr.Client\n\tprojectsResource := connection.Projects()\n\n\t// Trigger the auth flow.\n\tgetProjectsRequest := projectsResource.Get()\n\tif len(connection.token) != 0 || len(connection.bearer) != 0 {\n\t\tt.Errorf(\"Connection should have no tokens. token: '%s', bearer: '%s'\",\n\t\t\tconnection.token,\n\t\t\tconnection.bearer)\n\t}\n\t_, err = getProjectsRequest.Send()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(connection.token) != 0 || len(connection.bearer) == 0 {\n\t\tt.Errorf(\"Connection should have only a bearer token. token: '%s', bearer: '%s'\",\n\t\t\tconnection.token,\n\t\t\tconnection.bearer)\n\t}\n}", "func (v Client) MustRevokeToken() {\n\tif err := v.RevokeToken(); err != nil {\n\t\tlog.Entry().WithError(err).Fatal(\"Could not revoke token\")\n\t}\n}", "func TestInitToken_Ensure_NoExpectedToken_NotExisting(t *testing.T) {\n\tfv := NewFakeVault(t)\n\tdefer fv.Finish()\n\n\tfv.ExpectWrite()\n\n\ti := &InitToken{\n\t\tRole: \"etcd\",\n\t\tPolicies: []string{\"etcd\"},\n\t\tkubernetes: fv.Kubernetes(),\n\t\tExpectedToken: \"\",\n\t}\n\n\t// expects a read and vault says secret is not existing\n\tinitTokenPath := \"test-cluster-inside/secrets/init_token_etcd\"\n\tfv.fakeLogical.EXPECT().Read(initTokenPath).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\t// expect a create new orphan\n\tfv.fakeToken.EXPECT().CreateOrphan(&tokenCreateRequestMatcher{}).Return(&vault.Secret{\n\t\tAuth: &vault.SecretAuth{\n\t\t\tClientToken: \"my-new-random-token\",\n\t\t},\n\t}, nil)\n\n\t// expect a write of the new token\n\tfv.fakeLogical.EXPECT().Write(initTokenPath, map[string]interface{}{\"init_token\": \"my-new-random-token\"}).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tfv.fakeToken.EXPECT().Lookup(\"my-new-random-token\").Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tfv.fakeToken.EXPECT().Renew(\"my-new-random-token\", 0).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tInitTokenEnsure_EXPECTs(fv)\n\n\terr := i.Ensure()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\ttoken, err := i.InitToken()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\tif exp, act := \"my-new-random-token\", token; exp != act {\n\t\tt.Errorf(\"unexpected token: act=%s exp=%s\", act, exp)\n\t}\n\n\treturn\n}", "func TestMockOnEvent(t *testing.T) {\n\tmockServer := &MockRailsServer{T: t, Behaviour: MockEvent}\n\n\tdialer := wstest.NewDialer(mockServer)\n\tdialer.HandshakeTimeout = time.Second * 2\n\n\tclient := NewClient(fakeEndpoint).WithDialer(dialer)\n\n\tcalled := make(chan struct{})\n\n\tclient.OnEvent(\"AgentChannel\", func(conn *websocket.Conn, payload *Payload, error error) {\n\t\tcalled <- struct{}{}\n\t\treturn\n\t})\n\n\terr := client.Serve()\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treceiveSleepMs(2000, called, t)\n}", "func Test_Onu_StateMachine_eapol_no_flow(t *testing.T) {\n\tt.Skip(\"Needs to be moved in the Service struct\")\n\tonu := createTestOnu()\n\n\tonu.InternalState.SetState(OnuStateEnabled)\n\tassert.Equal(t, onu.InternalState.Current(), OnuStateEnabled)\n\n\t// fail as no EapolFlow has been received\n\terr := onu.InternalState.Event(\"start_auth\")\n\tif err == nil {\n\t\tt.Fatal(\"can't start EAPOL without EapolFlow\")\n\t}\n\tassert.Equal(t, onu.InternalState.Current(), OnuStateEnabled)\n\tassert.Equal(t, err.Error(), \"transition canceled with error: cannot-go-to-auth-started-as-eapol-flow-is-missing\")\n}", "func TestInvalidClient(t *testing.T) {\n\tassert := assert.New(t)\n\n\tw := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t\tEvents: []string{\"iot\"},\n\t}\n\tw.Config.URL = \"http://localhost:9999/foo\"\n\tw.Config.ContentType = \"application/json\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t\tLogger: getLogger(),\n\t}.New()\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n}", "func TestEmptyAuh(t *testing.T) {\n\tdb.InitDB()\n\tvar router *gin.Engine = routes.SetupRouter()\n\n\tvar user models.UserCreate = utils.CreateUser(\"Tom\", \"qwerty1234\", t, router)\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\n\tvar url string = \"/v1/user/\" + strconv.Itoa(user.ID)\n\trecord := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"GET\", url, nil)\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", \"\")\n\n\trouter.ServeHTTP(record, request)\n\n\tvar message Message\n\terr := json.Unmarshal([]byte(record.Body.String()), &message)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad output: \", err.Error())\n\t\tt.Fail()\n\t}\n\n\tassert.Equal(t, record.Code, 403)\n\tassert.Equal(t, message.Message, \"Bad token\")\n\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\tutils.CleanUser(user.ID, user.Token, t, router)\n\tdb.CloseDB()\n}", "func InitMQTTNull() {\n\tmqtt.SetHandler(nullHandler)\n}", "func _TestRegisterNodeWithoutHost(t *testing.T) {\n\t_, err := registerNodeSignedCall(TESTPUBLICKEY, 0, 0, \"virtual\", \"\")\n\tassert.Error(t, err)\n}", "func TestNonexistingMessage(t *testing.T) {\n\tda := NewFrameOutputBuffer()\n\td := &model.Device{DeviceEUI: makeRandomEUI(), DevAddr: makeRandomDevAddr()}\n\tif _, err := da.GetPHYPayloadForDevice(d, &context); err == nil {\n\t\tt.Fatal(\"Did not expect to get PHYPayload for unknown device\")\n\t}\n}", "func TestFetchNullNotNullToken(t *testing.T) {\n\tinput := \"ull$_abc\"\n\texpected := \"null$_abc\"\n\treader := bytes.NewReader([]byte(input))\n\tlex := NewLexer(reader)\n\tif err := lex.fetchNull(); err != nil {\n\t\tt.Error(err.Error())\n\t\treturn\n\t}\n\n\tif len(lex.tokens) != 1 {\n\t\tt.Error(\"expecting 1 token to be fetched\")\n\t\treturn\n\t}\n\n\ttoken := lex.tokens[0]\n\tif token.t != TokenIdentifier {\n\t\tt.Errorf(\"unexpected token type %d (%s), expecting token type %d (%s)\", token.t, tokenTypeMap[token.t], TokenIdentifier, tokenTypeMap[TokenIdentifier])\n\t\treturn\n\t}\n\n\tif token.String() != expected {\n\t\tt.Errorf(\"unexpected %s, expecting %s\", token.String(), expected)\n\t}\n}", "func TestToken(t *testing.T) {\n\tkey := []byte(\"26BF237B95964852625A2C27988C3\")\n\tSetSecret(key)\n\tc := NewClaims(1, 15*time.Minute)\n\tc.SetIssuer(\"token_test\")\n\tc.SetSubject(\"test\")\n\ttok, err := c.Token()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tc, err = Decode(tok)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func TestInvalidEventRegex(t *testing.T) {\n\n\tassert := assert.New(t)\n\n\tw := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t\tEvents: []string{\"[[:123\"},\n\t}\n\tw.Config.URL = \"http://localhost:9999/foo\"\n\tw.Config.ContentType = \"application/json\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tClient: &http.Client{},\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t\tLogger: getLogger(),\n\t}.New()\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n\n}", "func (_Univ2 *Univ2Session) Token0() (common.Address, error) {\n\treturn _Univ2.Contract.Token0(&_Univ2.CallOpts)\n}", "func Test_LogoutInvalidToken(t *testing.T) {\n\tdir, err := tempConfig(\"\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tmockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tw.Write([]byte(``))\n\t}))\n\tdefer mockServer.Close()\n\n\tlogoutArgs := logoutArguments{\n\t\tapiEndpoint: mockServer.URL,\n\t\ttoken: \"test-token\",\n\t}\n\n\terr = logout(logoutArgs)\n\tif !IsNotAuthorizedError(err) {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n}", "func TestNextEventCancel(t *testing.T) {\n\tmaybeSkipIntegrationTest(t)\n\n\ts, err := NewSession()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\tif err := s.Subscribe(\"ike-updown\"); err != nil {\n\t\tt.Fatalf(\"Failed to start event listener: %v\", err)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)\n\tdefer cancel()\n\n\t_, err = s.NextEvent(ctx)\n\tif err != ctx.Err() {\n\t\tt.Fatalf(\"Expected to get context's timeout error after not receiving event, got: %v\", err)\n\t}\n}", "func TestBadToken(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmockedTpmProvider := new(tpmprovider.MockedTpmProvider)\n\tmockedTpmProvider.On(\"Close\").Return(nil)\n\tmockedTpmProvider.On(\"NvIndexExists\", mock.Anything).Return(true, nil)\n\tmockedTpmProvider.On(\"NvRelease\", mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvDefine\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvWrite\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\n\tmockedTpmFactory := tpmprovider.MockedTpmFactory{TpmProvider: mockedTpmProvider}\n\n\ttrustAgentService, err := CreateTrustAgentService(CreateTestConfig(), mockedTpmFactory)\n\tassert.NoError(err)\n\n\t// setup TA service to use JWT-based authentication\n\ttrustAgentService.router = mux.NewRouter()\n\ttrustAgentService.router.Use(middleware.NewTokenAuth(\"../test/mockJWTDir\", \"../test/mockCACertsDir\", mockRetrieveJWTSigningCerts, cacheTime))\n\ttrustAgentService.router.HandleFunc(\"/v2/tag\", errorHandler(requiresPermission(setAssetTag(CreateTestConfig(), mockedTpmFactory), []string{postDeployTagPerm}))).Methods(\"POST\")\n\n\tjsonString := `{\"tag\" : \"tHgfRQED1+pYgEZpq3dZC9ONmBCZKdx10LErTZs1k/k=\", \"hardware_uuid\" : \"7a569dad-2d82-49e4-9156-069b0065b262\"}`\n\n\trequest, err := http.NewRequest(\"POST\", \"/v2/tag\", bytes.NewBuffer([]byte(jsonString)))\n\tassert.NoError(err)\n\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", \"Bearer \"+TestBadJWTAuthToken)\n\n\trecorder := httptest.NewRecorder()\n\ttrustAgentService.router.ServeHTTP(recorder, request)\n\tresponse := recorder.Result()\n\tfmt.Printf(\"StatusCode: %d\\n\", response.StatusCode)\n\tassert.Equal(http.StatusUnauthorized, response.StatusCode)\n}", "func Test_CanSign_NilInput(t *testing.T) {\n\n\t// prepare input\n\tvar transactionInput *TransactionInput\n\tvar unspentTransactions []*UnspentTransactionOutput\n\tvar publicKey = \"\"\n\n\t// call can sign\n\tresult := CanSign(unspentTransactions, transactionInput, publicKey)\n\n\t// result should false\n\tif result {\n\t\tt.Errorf(\"result of nil transaction should be false.\")\n\t}\n}", "func AssertASNoHandleTxAckRequest() Assertion {\n\treturn func(assert *require.Assertions, ts *IntegrationTestSuite) {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\tselect {\n\t\tcase <-ts.ASClient.HandleTxAckChan:\n\t\t\tassert.Fail(\"unexpected tx ack request\")\n\t\tdefault:\n\t\t}\n\t}\n}", "func TestBearerEmpty(t *testing.T) {\n\tdb.InitDB()\n\tvar router *gin.Engine = routes.SetupRouter()\n\n\tvar user models.UserCreate = utils.CreateUser(\"Tom\", \"qwerty1234\", t, router)\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\n\tvar url string = \"/v1/user/\" + strconv.Itoa(user.ID)\n\tvar bearer = \"Bearer \"\n\trecord := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"GET\", url, nil)\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", bearer)\n\n\trouter.ServeHTTP(record, request)\n\n\tvar message Message\n\terr := json.Unmarshal([]byte(record.Body.String()), &message)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad output: \", err.Error())\n\t\tt.Fail()\n\t}\n\n\tassert.Equal(t, record.Code, 403)\n\tassert.Equal(t, message.Message, \"Bad token\")\n\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\tutils.CleanUser(user.ID, user.Token, t, router)\n\tdb.CloseDB()\n}", "func TestNatsAdaptorOnWhenConnectedWithAuth(t *testing.T) {\n\tt.Skip(\"TODO: implement this test without requiring actual server connection\")\n\ta := NewAdaptorWithAuth(\"localhost:4222\", 9999, \"test\", \"testwd\")\n\ta.Connect()\n\tgobottest.Assert(t, a.On(\"hola\", func(msg Message) {\n\t\tfmt.Println(\"hola\")\n\t}), true)\n}", "func NewZeroEvent(x, y float64) Event {\n\treturn NewEvent(x, y, ButtonNone, \"\")\n}", "func TokenNoSession(token string) (string, error) {\n\tclient := gorequest.New().Get(tokenUrlWithNoSession()).\n\t\tAppendHeader(\"Authorization\", \"Bearer \"+token).\n\t\tTimeout(HttpTimeout * time.Second). //.\n\t\tQuery(\"from=opsportal\")\n\n\tresp, body, ierrors := client.End()\n\tif len(ierrors) != 0 {\n\t\treturn \"\", ierrors[0]\n\t}\n\n\tif !HttpOK(resp.StatusCode) {\n\t\treturn \"\", errors.Errorf(\"http code:%d body:%s\", resp.StatusCode, body)\n\t}\n\n\tvar lg TpaasTokenResp\n\terr := json.Unmarshal([]byte(body), &lg)\n\tif err != nil {\n\t\tfmt.Printf(\"jsnbody:%s\", body)\n\t\treturn \"\", errors.WithMessage(err, \"token verification response from tpaas is not json\")\n\t}\n\n\tif !HttpOK(lg.Code) {\n\t\treturn \"\", errors.Errorf(\"tpaas code:%d body:%s\", lg.Code, body)\n\t}\n\n\treturn lg.Data, nil\n}", "func TestTokenExpiracy(t *testing.T) {\n\tdb.InitDB()\n\tvar router *gin.Engine = routes.SetupRouter()\n\n\tos.Setenv(\"TOKEN_VALIDITY_MINUTES\", \"0\")\n\n\tvar user models.UserCreate = utils.CreateUser(\"Tom\", \"qwerty1234\", t, router)\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\ttime.Sleep(1 * time.Second)\n\n\tvar url string = \"/v1/user/\" + strconv.Itoa(user.ID)\n\tvar bearer = \"Bearer \" + user.Token\n\trecord := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"GET\", url, nil)\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", bearer)\n\n\trouter.ServeHTTP(record, request)\n\n\tvar message Message\n\terr := json.Unmarshal([]byte(record.Body.String()), &message)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad output: \", err.Error())\n\t\tt.Fail()\n\t}\n\n\tassert.Equal(t, record.Code, 401)\n\tassert.Equal(t, message.Message, \"Token expired.\")\n\n\tos.Setenv(\"TOKEN_VALIDITY_MINUTES\", \"15\")\n\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\n\tutils.CleanUser(user.ID, user.Token, t, router)\n\tdb.CloseDB()\n}", "func sendEvent(client runner.RunnerClient, token string, key string) {\n\tlog.Println(\"sending event:\", key)\n\tif _, err := client.Event(context.Background(), &runner.EventRequest{\n\t\tKey: key,\n\t}); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (p *EventProber) AssertReceivedNone(fromPrefix, toPrefix string) feature.StepFn {\n\treturn func(ctx context.Context, t feature.T) {\n\t\tevents := p.ReceivedBy(ctx, toPrefix)\n\t\tif len(events) > 0 {\n\t\t\tt.Errorf(\"expected %q to not have received any events from %s, actually received %d\",\n\t\t\t\ttoPrefix, fromPrefix, len(events))\n\t\t}\n\t}\n}", "func Test_LogoutValidToken(t *testing.T) {\n\tdir, err := tempConfig(\"\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tmockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(`{\"status_code\": 10007, \"status_text\": \"Resource deleted\"}`))\n\t}))\n\tdefer mockServer.Close()\n\n\tlogoutArgs := logoutArguments{\n\t\tapiEndpoint: mockServer.URL,\n\t\ttoken: \"test-token\",\n\t}\n\n\terr = logout(logoutArgs)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}", "func TestClearKeyEncodeSucceedIfNilId(t *testing.T) {\n\tmockClearKey := MakeClearKeyDecoded().SetNilContent().RandomizeValidValue().Get()\n\n\t_, err := mockClearKey.Encode()\n\n\tif err != nil {\n\t\tt.Errorf(\"this should not fail as id is nil\")\n\t}\n}", "func TestUserTokenPingSuccess(t *testing.T) {\n\tdb := setupDB()\n\tdefer db.Close()\n\trouter := setupRouter()\n\n\tw := httptest.NewRecorder()\n\n\treq, _ := http.NewRequest(\"GET\", \"/token/ping\", nil)\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", \"Bearer \" + Token)\n\trouter.ServeHTTP(w, req)\n\n\tassert.Equal(t, http.StatusOK, w.Code)\n\t//\tsomething like {\"claim_id\":\"test001\",\"message\":\"pong\",\"username\":\"test001\"}\n\tassert.Contains(t, w.Body.String(), \"pong\")\n\tassert.Contains(t, w.Body.String(), kTestUserUsername)\n}", "func SimulateIssueToken(k keeper.Keeper, ak authkeeper.AccountKeeper, bk types.BankKeeper) simtypes.Operation {\n\treturn func(\n\t\tr *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,\n\t\taccs []simtypes.Account, chainID string,\n\t) (simtypes.OperationMsg, []simtypes.FutureOperation, error) {\n\n\t\ttoken, maxFees := genToken(ctx, r, k, ak, bk, accs)\n\n\t\tmsg := types.NewMsgIssueToken(token.GetName(), token.GetSymbol(), token.GetSmallestUnit(), token.GetDecimals(), \n\t\t\ttoken.GetInitialSupply(), token.GetTotalSupply(), token.GetMintable(), true, token.GetOwnerString())\n\n\t\tsimAccount, found := simtypes.FindAccount(accs, token.GetOwner())\n\t\tif !found {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), fmt.Sprintf(\"account[%s] does not found\", token.GetOwnerString())), \n\t\t\t\tnil, fmt.Errorf(\"account[%s] does not found\", token.GetOwnerString())\n\t\t}\n\n\t\towner, _ := sdk.AccAddressFromBech32(msg.Owner)\n\t\taccount := ak.GetAccount(ctx, owner)\n\t\tfees, err := simtypes.RandomFees(r, ctx, maxFees)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate fees\"), nil, err\n\t\t}\n\n\t\ttxGen := simappparams.MakeTestEncodingConfig().TxConfig\n\t\ttx, err := helpers.GenTx(\n\t\t\ttxGen,\n\t\t\t[]sdk.Msg{msg},\n\t\t\tfees,\n\t\t\thelpers.DefaultGenTxGas,\n\t\t\tchainID,\n\t\t\t[]uint64{account.GetAccountNumber()},\n\t\t\t[]uint64{account.GetSequence()},\n\t\t\tsimAccount.PrivKey,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate mock tx\"), nil, err\n\t\t}\n\n\t\tif _, _, err = app.Deliver(txGen.TxEncoder(), tx); err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to deliver tx\"), nil, err\n\t\t}\n\n\t\treturn simtypes.NewOperationMsg(msg, true, \"simulate issue token\"), nil, nil\n\t}\n}", "func (s *TrackerSuite) TestStartNewEvent() {\n\n\tevent := s.service.StartNew()\n\tassert.NotEqual(s.T(), nil, event)\n}", "func TestInitToken_Ensure_ExpectedToken_NotExisting(t *testing.T) {\n\tfv := NewFakeVault(t)\n\tdefer fv.Finish()\n\n\tfv.ExpectWrite()\n\n\ti := &InitToken{\n\t\tRole: \"etcd\",\n\t\tPolicies: []string{\"etcd\"},\n\t\tkubernetes: fv.Kubernetes(),\n\t\tExpectedToken: \"expected-token\",\n\t}\n\n\t// expect a new token creation\n\tfv.fakeToken.EXPECT().CreateOrphan(&tokenCreateRequestMatcher{ID: \"expected-token\"}).Return(&vault.Secret{\n\t\tAuth: &vault.SecretAuth{\n\t\t\tClientToken: \"expected-token\",\n\t\t},\n\t}, nil)\n\n\t// expect a read and vault says secret is not existing, then after it is written to return token\n\tinitTokenPath := \"test-cluster-inside/secrets/init_token_etcd\"\n\tgomock.InOrder(\n\t\tfv.fakeLogical.EXPECT().Read(initTokenPath).Return(\n\t\t\tnil,\n\t\t\tnil,\n\t\t).MinTimes(1),\n\t\t// expect a write of the new token from user flag\n\t\tfv.fakeLogical.EXPECT().Write(initTokenPath, map[string]interface{}{\"init_token\": \"expected-token\"}).Return(\n\t\t\tnil,\n\t\t\tnil,\n\t\t),\n\t\t// allow read out of token from user\n\t\tfv.fakeLogical.EXPECT().Read(initTokenPath).AnyTimes().Return(\n\t\t\t&vault.Secret{\n\t\t\t\tData: map[string]interface{}{\"init_token\": \"expected-token\"},\n\t\t\t},\n\t\t\tnil,\n\t\t),\n\t)\n\n\tfv.fakeToken.EXPECT().Lookup(\"expected-token\").Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tfv.fakeToken.EXPECT().Renew(\"expected-token\", 0).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tInitTokenEnsure_EXPECTs(fv)\n\n\terr := i.Ensure()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\ttoken, err := i.InitToken()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\tif exp, act := \"expected-token\", token; exp != act {\n\t\tt.Errorf(\"unexpected token: act=%s exp=%s\", act, exp)\n\t}\n\n\treturn\n}", "func _TestRegisterNodeWithoutPulicKey(t *testing.T) {\n\t_, err := registerNodeSignedCall(\"\", 0, 0, \"virtual\", TESTHOST)\n\tassert.Error(t, err)\n}", "func TestNextEventAfterFailedSubscribe(t *testing.T) {\n\tdctx, dcancel := context.WithCancel(context.Background())\n\tdefer dcancel()\n\n\tconn := mockCharon(dctx)\n\n\ts, err := NewSession(withTestConn(conn))\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\t// This should result in an IO error, and if handled properly within\n\t// the event listener, the error should not be sent on the event channel.\n\ts.el.conn.Close()\n\tif err := s.Subscribe(\"test-event\"); err == nil {\n\t\tt.Fatalf(\"Expected error reading from closed transport\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)\n\tdefer cancel()\n\n\t_, err = s.NextEvent(ctx)\n\tif err != ctx.Err() {\n\t\tt.Fatalf(\"Expected to get timeout error, got: %v\", err)\n\t}\n}", "func TestFetchNull(t *testing.T) {\n\treader := bytes.NewReader([]byte(\"ull\"))\n\tlex := NewLexer(reader)\n\tif err := lex.fetchNull(); err != nil {\n\t\tt.Error(err.Error())\n\t\treturn\n\t}\n\n\texpected := \"null\"\n\ttoken := lex.tokens[0]\n\tif string(token.chars) != expected {\n\t\tt.Errorf(\"unexpected %s, expecting %s\", string(token.chars), expected)\n\t}\n}", "func (s *TrackerSuite) TestStartNil() {\n\n\tassert.Equal(s.T(), ErrorNil, s.service.Start(nil))\n}", "func TestUploadCannotAcquireToken(t *testing.T) {\n\t// prepare uploader parameter\n\tlocalNodeId := uuid.New()\n\tsenderChan := make(chan data.ShareCommand)\n\tsender := NewShareSender(senderChan)\n\n\t// create uploader\n\tmaxUploads := 0\n\tuploader := NewShareUploader(localNodeId, maxUploads, sender)\n\n\t// prepare dirs\n\tdownloadDir, base := prepareDirs(t)\n\tdefer os.RemoveAll(downloadDir)\n\tdefer os.RemoveAll(base)\n\t// prepare shared file\n\tsf := createSharedFile(t, base)\n\tdefer os.Remove(sf.FilePath())\n\n\t// prepare download request of unknown chunk\n\tnodeId := uuid.New().String()\n\tchunkChecksum := sf.LocalChunksChecksums()[0]\n\trequest := data.NewDownloadRequest(sf.FileId(), nodeId, chunkChecksum)\n\n\t// start message reader for deny message\n\tdone := make(chan bool)\n\tgo readDenyUpload(t, done, senderChan, request)\n\n\t// start upload\n\tuploader.Upload(sf, chunkChecksum, nodeId, filepath.Join(downloadDir, sf.FileRelativePath()))\n\n\t// wait for message\n\t<-done\n}", "func (_Univ2 *Univ2CallerSession) Token0() (common.Address, error) {\n\treturn _Univ2.Contract.Token0(&_Univ2.CallOpts)\n}", "func (s *RingpopOptionsTestSuite) TestClockNil() {\n\trp, err := New(\"test\", Clock(nil))\n\ts.Nil(rp)\n\ts.Error(err)\n}", "func TestSendENIStateChangeUnmanaged(t *testing.T) {\n\tmockCtrl := gomock.NewController(t)\n\tdefer mockCtrl.Finish()\n\n\tmockStateManager := mock_dockerstate.NewMockTaskEngineState(mockCtrl)\n\teventChannel := make(chan statechange.Event)\n\tctx := context.TODO()\n\n\tgomock.InOrder(\n\t\tmockStateManager.EXPECT().ENIByMac(randomMAC).Return(nil, false),\n\t)\n\n\twatcher := setupWatcher(ctx, nil, mockStateManager, eventChannel, primaryMAC)\n\n\tassert.Error(t, watcher.sendENIStateChange(randomMAC))\n}", "func (c *Client) MustGetToken() string {\n\ttoken, err := c.GetToken()\n\tif err != nil {\n\t\tfmt.Printf(\"ERROR: Could not get auth token. %s\\n\", err.Error())\n\t}\n\n\treturn token\n}", "func TestOAuthServiceAccountClientEvent(t *testing.T) {\n\n\ttests := map[string]struct {\n\t\tannotationPrefix string\n\t\tannotation string\n\t\texpectedEventReason string\n\t\texpectedEventMsg string\n\t\tnumEvents int\n\t\texpectBadRequest bool\n\t}{\n\t\t\"test-good-url\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationURIPrefix + \"one\",\n\t\t\tannotation: \"/oauthcallback\",\n\t\t\tnumEvents: 0,\n\t\t},\n\t\t\"test-bad-url\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationURIPrefix + \"one\",\n\t\t\tannotation: \"foo:foo\",\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: \"system:serviceaccount:\" + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-url-parse\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationURIPrefix + \"one\",\n\t\t\tannotation: \"::\",\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: \"[parse ::: missing protocol scheme, system:serviceaccount:\" + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-annotation-kind\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: `{\"kind\":\"foo\",\"apiVersion\":\"oauth.openshift.io/v1\",\"metadata\":{\"creationTimestamp\":null},\"reference\":{\"group\":\"foo\",\"kind\":\"Route\",\"name\":\"route1\"}}`,\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `[no kind \"foo\" is registered for version \"oauth.openshift.io/v1\" in scheme \"github.com/openshift/origin/pkg/serviceaccounts/oauthclient/oauthclientregistry.go:54\", system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-type-parse\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: `{asdf\":\"adsf\"}`,\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `[couldn't get version/kind; json parse error: invalid character 'a' looking for beginning of object key string, system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-route-not-found\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: buildRedirectObjectReferenceString(t, \"Route\", \"route1\", \"route.openshift.io\"),\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `[routes.route.openshift.io \"route1\" not found, system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>]\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-route-wrong-group\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: buildRedirectObjectReferenceString(t, \"Route\", \"route1\", \"foo\"),\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t\t\"test-bad-redirect-reference-kind\": {\n\t\t\tannotationPrefix: saoauth.OAuthRedirectModelAnnotationReferencePrefix + \"1\",\n\t\t\tannotation: buildRedirectObjectReferenceString(t, \"foo\", \"route1\", \"route.openshift.io\"),\n\t\t\texpectedEventReason: \"NoSAOAuthRedirectURIs\",\n\t\t\texpectedEventMsg: `system:serviceaccount:` + projectName + \":\" + saName + \" has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>=<redirect> or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.<some-value>=<reference>\",\n\t\t\tnumEvents: 1,\n\t\t\texpectBadRequest: true,\n\t\t},\n\t}\n\n\ttestServer, err := setupTestOAuthServer()\n\tif err != nil {\n\t\tt.Fatalf(\"error setting up test server: %s\", err)\n\t}\n\n\tdefer testServer.oauthServer.Close()\n\tdefer testserver.CleanupMasterEtcd(t, testServer.masterConfig)\n\n\tfor tcName, testCase := range tests {\n\t\tvar redirect string = testServer.oauthServer.URL + \"/oauthcallback\"\n\t\tif testCase.numEvents != 0 {\n\t\t\tredirect = testCase.annotation\n\t\t}\n\n\t\tt.Logf(\"%s: annotationPrefix %s, annotation %s\", tcName, testCase.annotationPrefix, testCase.annotation)\n\t\tsa, err := setupTestSA(testServer.clusterAdminKubeClient, testCase.annotationPrefix, redirect)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: error setting up test SA: %s\", tcName, err)\n\t\t}\n\n\t\tsecret, err := setupTestSecrets(testServer.clusterAdminKubeClient, sa)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: error setting up test secrets: %s\", tcName, err)\n\t\t}\n\n\t\trunTestOAuthFlow(t, testServer, sa, secret, redirect, testCase.expectBadRequest)\n\n\t\t// Check events with a short poll to stop flakes\n\t\tvar evList *kapi.EventList\n\t\terr = wait.Poll(time.Second, 5*time.Second, func() (bool, error) {\n\t\t\tevList, err = testServer.clusterAdminKubeClient.Core().Events(projectName).List(metav1.ListOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tif len(evList.Items) < testCase.numEvents {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn true, nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: err polling for events\", tcName)\n\t\t}\n\n\t\tevents := collectEventsWithReason(evList, testCase.expectedEventReason)\n\n\t\tif testCase.numEvents != len(events) {\n\t\t\tt.Fatalf(\"%s: expected %d events, found %d\", tcName, testCase.numEvents, len(events))\n\t\t}\n\n\t\tif testCase.numEvents != 0 && events[0].Message != testCase.expectedEventMsg {\n\t\t\tt.Fatalf(\"%s: expected event message %s, got %s\", tcName, testCase.expectedEventMsg, events[0].Message)\n\t\t}\n\n\t\terr = testServer.clusterAdminKubeClient.Core().Events(projectName).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: error deleting events: %s\", tcName, err)\n\t\t}\n\t}\n}", "func Null(local *data.Local) error {\n\tif local.WS == nil {\n\t\treturn errors.New(\"websocket not connected\")\n\t}\n\n\tchatNull := chatctl.New()\n\tchatNull.Type = chat.ChatCtlType_CTL_TYPE_NULL\n\tchatNull.Userid = local.User\n\n\tdataNull, err := chatNull.Pack()\n\tmsgTrack := uuid.NewV4().String()\n\n\tmsgWrap, err := mMsgwrap.Pack(pbmsgwrap.MsgType_MSG_CHAT_CTL, pbmsgwrap.MsgSec_SEC_SIGN, local.Key, local.Sign, local.Cipher, msgTrack, &dataNull)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttracker := local.WS.AddTrack(msgTrack)\n\n\tif err := local.WS.Write(gwebsock.BinaryMessage, *msgWrap); err != nil {\n\t\treturn err\n\t}\n\n\tmsgData, err := tracker.ReadBlock()\n\tif err != nil {\n\t\treturn err\n\t}\n\t//msgData := <-tracker.Reply\n\n\twsmsg, err := mMsgwrap.Unpack(&msgData.MsgData, local.Sign, local.Cipher)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tchatNullReply := chatctl.New()\n\n\tif err := chatNullReply.Unpack(wsmsg.GetMsgData()); err != nil {\n\t\treturn err\n\t}\n\n\tif chatNullReply.Status != mChat.ChatCtlStatus_CTL_STATUS_OKAY {\n\t\treturn errors.New(\"chat null returned failed status\")\n\t}\n\n\tlocal.Peers[local.WorkerID].Online = true\n\n\treturn nil\n}", "func TestCorrectTokenPasses(t *testing.T) {\n\thand := New(http.HandlerFunc(succHand))\n\thand.SetFailureHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tt.Errorf(\"Test failed. Reason: %v\", Reason(r))\n\t}))\n\n\tserver := httptest.NewServer(hand)\n\tdefer server.Close()\n\n\t// issue the first request to get the token\n\tresp, err := http.Get(server.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcookie := getRespCookie(resp, CookieName)\n\tif cookie == nil {\n\t\tt.Fatal(\"Cookie was not found in the response.\")\n\t}\n\n\tfinalToken := b64encode(maskToken(b64decode(cookie.Value)))\n\n\tvals := [][]string{\n\t\t{\"name\", \"Jolene\"},\n\t\t{FormFieldName, finalToken},\n\t}\n\n\t// Constructing a custom request is suffering\n\treq, err := http.NewRequest(\"POST\", server.URL, formBodyR(vals))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\treq.AddCookie(cookie)\n\n\tresp, err = http.DefaultClient.Do(req)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif resp.StatusCode != 200 {\n\t\tt.Errorf(\"The request should have succeeded, but it didn't. Instead, the code was %d\",\n\t\t\tresp.StatusCode)\n\t}\n}", "func (fgs *FakeGraphSync) AssertNoPauseReceived(t *testing.T) {\n\trequire.Empty(t, fgs.pauses, \"should not receive pause request\")\n}", "func Test_SyncConsumersAndSubscriptions_ForEmptyTypes(t *testing.T) {\n\t// given\n\tcallback := func(m *nats.Msg) {}\n\tsubWithOneType := NewSubscriptionWithEmptyTypes()\n\n\t// when\n\tjs := JetStream{}\n\terr := js.syncConsumerAndSubscription(subWithOneType, callback)\n\n\t// then\n\tassert.NoError(t, err)\n}", "func TestInitToken_Ensure_NoExpectedToken_AlreadyExisting(t *testing.T) {\n\tfv := NewFakeVault(t)\n\tdefer fv.Finish()\n\n\tfv.ExpectWrite()\n\n\ti := &InitToken{\n\t\tRole: \"etcd\",\n\t\tPolicies: []string{\"etcd\"},\n\t\tkubernetes: fv.Kubernetes(),\n\t\tExpectedToken: \"\",\n\t}\n\n\t// expect a read and vault says secret is existing\n\tinitTokenPath := \"test-cluster-inside/secrets/init_token_etcd\"\n\tfv.fakeLogical.EXPECT().Read(initTokenPath).Return(\n\t\t&vault.Secret{\n\t\t\tData: map[string]interface{}{\"init_token\": \"existing-token\"},\n\t\t},\n\t\tnil,\n\t)\n\n\tfv.fakeToken.EXPECT().Lookup(\"existing-token\").Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tfv.fakeToken.EXPECT().Renew(\"existing-token\", 0).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tInitTokenEnsure_EXPECTs(fv)\n\n\terr := i.Ensure()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\ttoken, err := i.InitToken()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\tif exp, act := \"existing-token\", token; exp != act {\n\t\tt.Errorf(\"unexpected token: act=%s exp=%s\", act, exp)\n\t}\n\n\treturn\n}", "func (fgs *FakeGraphSync) AssertNoResumeReceived(t *testing.T) {\n\trequire.Empty(t, fgs.resumes, \"should not receive resume request\")\n}", "func TestEmittingMessage(t *testing.T) {\n\tsink := make(chan bool, 1)\n\tclient := NewClient()\n\n\ttimeout, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)\n\tdefer cancel()\n\n\tclient.Subscribe(Before, func(ctx context.Context, message interface{}) {\n\t\tsink <- true\n\t})\n\n\tclient.Emit(context.Background(), Before, nil)\n\n\tselect {\n\tcase <-timeout.Done():\n\t\tt.Fatal(\"Timeout reached\")\n\tcase <-sink:\n\t}\n}", "func Test_Onu_StateMachine_dhcp_no_auth(t *testing.T) {\n\tt.Skip(\"Needs to be moved in the Service struct\")\n\tonu := createTestOnu()\n\n\tonu.InternalState.SetState(OnuStateEnabled)\n\tassert.Equal(t, onu.InternalState.Current(), OnuStateEnabled)\n\n\terr := onu.InternalState.Event(\"start_dhcp\")\n\tif err == nil {\n\t\tt.Fail()\n\t}\n\tassert.Equal(t, onu.InternalState.Current(), OnuStateEnabled)\n\tassert.Equal(t, err.Error(), \"transition canceled with error: cannot-go-to-dhcp-started-as-authentication-is-required\")\n}", "func TestEnumNull(t *testing.T) {\n\tclient := newQueriesClient(t)\n\tresult, err := client.EnumNull(context.Background(), nil)\n\trequire.NoError(t, err)\n\trequire.Zero(t, result)\n}", "func TestValidateBasicMsgCreateInvalidTokenArgumentGivesError(t *testing.T) {\n\tmessage := newValidMsgCreateBond()\n\tmessage.Token = \"123abc\" // starts with number\n\terr := message.ValidateBasic()\n\trequire.NotNil(t, err)\n\n\tmessage.Token = \"a\" // too short\n\terr = message.ValidateBasic()\n\trequire.NotNil(t, err)\n}", "func (_IUniswapV2Pair *IUniswapV2PairSession) Token0() (common.Address, error) {\r\n\treturn _IUniswapV2Pair.Contract.Token0(&_IUniswapV2Pair.CallOpts)\r\n}", "func (m *MockWebsocketAppInterface) CheckToken(userID int, csrfToken string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CheckToken\", userID, csrfToken)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func Test_GetCommentToken(t *testing.T) {\n\tparser := &Parser{}\n\trequire.Equal(t, \"\", parser.GetCommentToken())\n}", "func (_IUniswapV2Pair *IUniswapV2PairCallerSession) Token0() (common.Address, error) {\r\n\treturn _IUniswapV2Pair.Contract.Token0(&_IUniswapV2Pair.CallOpts)\r\n}", "func ParseEmptyToken(c *cli.Context) (*vela.Client, error) {\n\tlogrus.Debug(\"parsing tokenless Vela client from provided configuration\")\n\n\t// capture the address from the context\n\taddress := c.String(internal.FlagAPIAddress)\n\n\t// check if client address is set\n\tif len(address) == 0 {\n\t\treturn nil, fmt.Errorf(\"no client address provided\")\n\t}\n\n\tlogrus.Tracef(\"creating Vela client for %s\", address)\n\n\t// create a vela client from the provided address\n\treturn vela.NewClient(address, nil)\n}", "func TestGetToken(t *testing.T) {\n\tmc := MockClient{t: t}\n\tmc.DoFunc = validDo\n\tmc.GetFunc = validGet\n\tconfig := ClientConfig{\n\t\tScopes: []string{\"thing\"},\n\t\tOktaDomain: \"mockta.local\",\n\t\tHTTPClient: &mc,\n\t}\n\n\tclient, err := NewClient(config)\n\n\tif err != nil {\n\t\tt.Fatalf(\"Failed: %s\", err)\n\t}\n\n\t// Test surge of requests these should all use the same key\n\tresult := testConcurrency(client, 0, 100, t)\n\tif len(result) > 1 {\n\t\tt.Fatalf(\"Concurrency Test 1 Failed: got %d, want 1\\n\", len(result))\n\t}\n\n\t// Test renewals\n\tresult = testConcurrency(client, 1000, 10, t)\n\tif len(result) != 10 {\n\t\tt.Fatalf(\"Concurrency Test 2 Failed: got %d, want 10\\n\", len(result))\n\t}\n}", "func TestNilAccept(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\ttpt, err := createTpoolTester(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer tpt.Close()\n\n\terr = tpt.tpool.AcceptTransactionSet(nil)\n\tif err == nil {\n\t\tt.Error(\"no error returned when submitting nothing to the transaction pool\")\n\t}\n\terr = tpt.tpool.AcceptTransactionSet([]types.Transaction{})\n\tif err == nil {\n\t\tt.Error(\"no error returned when submitting nothing to the transaction pool\")\n\t}\n}", "func (suite *AuthSuite) TestAuthUnknownServiceMember() {\n\t// Set up: Prepare the session, goth.User, callback handler, http response\n\t// and request, landing URL, and pass them into authorizeUnknownUser\n\n\thandlerConfig := suite.HandlerConfig()\n\tappnames := handlerConfig.AppNames()\n\n\t// Prepare the session and session manager\n\tfakeToken := \"some_token\"\n\tsession := auth.Session{\n\t\tApplicationName: auth.MilApp,\n\t\tIDToken: fakeToken,\n\t\tHostname: appnames.MilServername,\n\t}\n\tsessionManager := handlerConfig.SessionManagers().Mil\n\tmockSender := setUpMockNotificationSender() // We should get an email for this activity\n\n\t// Prepare the goth.User to simulate the UUID and email that login.gov would\n\t// provide\n\tfakeUUID, _ := uuid.NewV4()\n\tuser := goth.User{\n\t\tUserID: fakeUUID.String(),\n\t\tEmail: \"[email protected]\",\n\t}\n\tctx := suite.SetupSessionContext(context.Background(), &session, sessionManager)\n\n\t// Call the function under test\n\tresult := authorizeUnknownUser(ctx, suite.AppContextWithSessionForTest(&session), user,\n\t\tsessionManager, mockSender)\n\tsuite.Equal(authorizationResultAuthorized, result)\n\tmockSender.(*mocks.NotificationSender).AssertNumberOfCalls(suite.T(), \"SendNotification\", 1)\n\n\t// Look up the user and service member in the test DB\n\tfoundUser, _ := models.GetUserFromEmail(suite.DB(), user.Email)\n\tserviceMemberID := session.ServiceMemberID\n\tserviceMember, _ := models.FetchServiceMemberForUser(suite.DB(), &session, serviceMemberID)\n\t// Look up the session token in the session store (this test uses the memory store)\n\tsessionStore := sessionManager.Store()\n\t_, existsBefore, _ := sessionStore.Find(foundUser.CurrentMilSessionID)\n\n\t// Verify service member exists and its ID is populated in the session\n\tsuite.NotEmpty(session.ServiceMemberID)\n\n\t// Verify session contains UserID that points to the newly-created user\n\tsuite.Equal(foundUser.ID, session.UserID)\n\n\t// Verify user's LoginGovEmail and LoginGovUUID match the values passed in\n\tsuite.Equal(user.Email, foundUser.LoginGovEmail)\n\tsuite.Equal(user.UserID, foundUser.LoginGovUUID.String())\n\n\t// Verify that the user's CurrentMilSessionID is not empty. The value is\n\t// generated randomly, so we can't test for a specific string. Any string\n\t// except an empty string is acceptable.\n\tsuite.NotEqual(\"\", foundUser.CurrentMilSessionID)\n\n\t// Verify the session token also exists in the session store\n\tsuite.Equal(true, existsBefore)\n\n\t// Verify the service member that was created is associated with the user\n\t// that was created\n\tsuite.Equal(foundUser.ID, serviceMember.UserID)\n}", "func RegisterToken(username string, token string) error {\n return nil;\n}", "func TestTokenCreateHandler2(t *testing.T) {\n\tapp, trx, down, err := models.NewAppForTest(nil, t)\n\tassert.Nil(t, err)\n\tdefer down(t)\n\ttestDBConn = trx\n\trouter := Routers()\n\n\tw := httptest.NewRecorder()\n\tapi := buildRoute(config.DefaultConfig.HTTP.APIPrefix, \"/token/create\")\n\tbody := fmt.Sprintf(\"appUid=%s&nonce=%s\", app.UID, models.RandomWithMD5(128))\n\tsign := SignStrWithSecret(body, app.Secret)\n\tbody = fmt.Sprintf(\"%s&sign=%s\", body, sign)\n\treq, _ := http.NewRequest(\"POST\", api, strings.NewReader(body))\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\trouter.ServeHTTP(w, req)\n\tassert.Equal(t, http.StatusOK, w.Code)\n\n\tvar response = &Response{}\n\tjson := janitor.ConfigCompatibleWithStandardLibrary\n\tassert.Nil(t, json.Unmarshal(w.Body.Bytes(), response))\n\tassert.True(t, response.Success)\n\n\trespData := response.Data.(map[string]interface{})\n\trespAvailableTimes := respData[\"availableTimes\"].(float64)\n\tassert.Equal(t, -1, int(respAvailableTimes))\n\trespTokenValue := respData[\"token\"].(string)\n\tassert.Equal(t, 32, len(respTokenValue))\n\tassert.Nil(t, respData[\"ip\"])\n\trespReadOnly := respData[\"readOnly\"].(float64)\n\tassert.Equal(t, 0, int(respReadOnly))\n\trespReadPath := respData[\"path\"].(string)\n\tassert.Equal(t, \"/\", respReadPath)\n\tassert.Nil(t, respData[\"expiredAt\"])\n}" ]
[ "0.8175133", "0.7667413", "0.61625266", "0.61268103", "0.59948516", "0.5929725", "0.58923304", "0.56820005", "0.5672691", "0.5638859", "0.5632581", "0.5596265", "0.5511962", "0.5491512", "0.5467685", "0.54452044", "0.5420265", "0.54192835", "0.53779507", "0.5359243", "0.53143144", "0.5310769", "0.5298158", "0.52974033", "0.5229123", "0.52138454", "0.51548386", "0.5132649", "0.5115815", "0.5113386", "0.5102094", "0.508923", "0.50864655", "0.5042282", "0.5041863", "0.50287175", "0.5012143", "0.49884927", "0.49878976", "0.49804828", "0.49780032", "0.49694252", "0.49687535", "0.49584767", "0.49111226", "0.48873806", "0.48832998", "0.48804006", "0.4877549", "0.48751393", "0.48633307", "0.4849981", "0.4847102", "0.48388022", "0.48380667", "0.48375916", "0.48375043", "0.4837234", "0.48328596", "0.483276", "0.48315617", "0.48290482", "0.48106474", "0.48036686", "0.47988668", "0.47902521", "0.47898933", "0.47854853", "0.47831333", "0.4781414", "0.4777767", "0.47736925", "0.4771867", "0.47717214", "0.4763211", "0.47625214", "0.47611043", "0.4760461", "0.47490156", "0.4743313", "0.47351262", "0.47336122", "0.4731413", "0.47273827", "0.4706914", "0.46951193", "0.46874875", "0.46852475", "0.4679618", "0.46749234", "0.4671038", "0.4668231", "0.46613765", "0.46382728", "0.46350908", "0.4634299", "0.4633294", "0.46286228", "0.46238247", "0.4622572" ]
0.7793353
1
Test that TokenEvent with an invalid cid causes panic.
func TestServiceTokenEvent_WithInvalidCID_CausesPanic(t *testing.T) { runTest(t, func(s *res.Service) { s.Handle("model", res.GetResource(func(r res.GetRequest) { r.NotFound() })) }, func(s *restest.Session) { restest.AssertPanic(t, func() { s.Service().TokenEvent("invalid.*.cid", nil) }) }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestServiceTokenEventWithID_WithInvalidCID_CausesPanic(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\trestest.AssertPanic(t, func() {\n\t\t\ts.Service().TokenEventWithID(\"invalid.*.cid\", \"foo\", nil)\n\t\t})\n\t})\n}", "func TestInvalidConsensusChangeSubscription(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\tcst, err := createConsensusSetTester(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cst.Close()\n\n\tms := newMockSubscriber()\n\tbadCCID := modules.ConsensusChangeID{255, 255, 255}\n\terr = cst.cs.ConsensusSetSubscribe(&ms, badCCID, cst.cs.tg.StopChan())\n\tif err != modules.ErrInvalidConsensusChangeID {\n\t\tt.Error(\"consensus set returning the wrong error during an invalid subscription:\", err)\n\t}\n\n\tcst.cs.mu.Lock()\n\tfor i := range cst.cs.subscribers {\n\t\tif cst.cs.subscribers[i] == &ms {\n\t\t\tt.Fatal(\"subscriber was not removed from subscriber list after an erroneus subscription\")\n\t\t}\n\t}\n\tcst.cs.mu.Unlock()\n}", "func TestServiceTokenEventWithID_WithNilToken_SendsNilToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEventWithID(mock.CID, \"foo\", nil)\n\t\ts.GetMsg().AssertTokenEventWithID(mock.CID, \"foo\", nil)\n\t})\n}", "func TestVnicContainer_Invalid(t *testing.T) {\n\tvnic, err := newContainerVnic(\"testcvnic\")\n\n\tif err = vnic.getDevice(); err == nil {\n\t\tt.Errorf(\"Non existent device: %v\", vnic)\n\t}\n\tif !strings.HasPrefix(err.Error(), \"vnic error\") {\n\t\tt.Errorf(\"Invalid error format %v\", err)\n\t}\n\n\tif err = vnic.enable(); err == nil {\n\t\tt.Errorf(\"Non existent device: %v\", vnic)\n\t}\n\tif !strings.HasPrefix(err.Error(), \"vnic error\") {\n\t\tt.Errorf(\"Invalid error format %v\", err)\n\t}\n\n\tif err = vnic.disable(); err == nil {\n\t\tt.Errorf(\"Non existent device: %v\", vnic)\n\t}\n\tif !strings.HasPrefix(err.Error(), \"vnic error\") {\n\t\tt.Errorf(\"Invalid error format %v\", err)\n\t}\n\n\tif err = vnic.destroy(); err == nil {\n\t\tt.Errorf(\"Non existent device: %v\", vnic)\n\t}\n\tif !strings.HasPrefix(err.Error(), \"vnic error\") {\n\t\tt.Errorf(\"Invalid error format %v\", err)\n\t}\n\n}", "func TestAuthRequestNilTokenEvent(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.TokenEvent(nil)\n\t\t\tr.OK(nil)\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\treq := s.Auth(\"test.model\", \"method\", nil)\n\t\ts.GetMsg().\n\t\t\tAssertTokenEvent(mock.CID, nil)\n\t\treq.Response().\n\t\t\tAssertResult(nil)\n\t})\n}", "func TestInvalidCollectInterval(t *testing.T) {\n\tensureProcessExit(t, \"TestNoDatadogAPIKey\",\n\t\tfalse, \"invalid duration\",\n\t\t\"C2D_COLLECT_INTERVAL=some_bogus_value\")\n}", "func TestBadToken(t *testing.T) {\n\n\t// Run the command with a bad token value\n\toutput := executeCommand(\"123\")\n\n\t// We should have a subcommand required command and a complete usage dump\n\trequire.NotNil(t, executeError, \"there should have been an error\")\n\trequire.Condition(t, func() bool {\n\t\treturn checkForExpectedSTSCallFailure(executeError)\n\t}, \"Error should have complained about nonexistent credentials file or invalid MFA token length\")\n\n\trequire.Empty(t, output, \"Output for an error condition should have been empty\")\n}", "func TestInvalidToValidSubscription(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\tcst, err := createConsensusSetTester(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cst.Close()\n\n\t// Start by performing a bad subscribe.\n\tms := newMockSubscriber()\n\tbadCCID := modules.ConsensusChangeID{255, 255, 255}\n\terr = cst.cs.ConsensusSetSubscribe(&ms, badCCID, cst.cs.tg.StopChan())\n\tif err != modules.ErrInvalidConsensusChangeID {\n\t\tt.Error(\"consensus set returning the wrong error during an invalid subscription:\", err)\n\t}\n\n\t// Perform a correct subscribe.\n\terr = cst.cs.ConsensusSetSubscribe(&ms, modules.ConsensusChangeBeginning, cst.cs.tg.StopChan())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Mine a block and check that the mock subscriber only got a single\n\t// consensus change.\n\tnumPrevUpdates := len(ms.updates)\n\t_, err = cst.miner.AddBlock()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(ms.updates) != numPrevUpdates+1 {\n\t\tt.Error(\"subscriber received two consensus changes for a single block\")\n\t}\n}", "func TestVnic_Invalid(t *testing.T) {\n\tvnic, err := newVnic(\"testvnic\")\n\n\tif err = vnic.getDevice(); err == nil {\n\t\tt.Errorf(\"Non existent device: %v\", vnic)\n\t}\n\tif !strings.HasPrefix(err.Error(), \"vnic error\") {\n\t\tt.Errorf(\"Invalid error format %v\", err)\n\t}\n\n\tif err = vnic.enable(); err == nil {\n\t\tt.Errorf(\"Non existent device: %v\", vnic)\n\t}\n\tif !strings.HasPrefix(err.Error(), \"vnic error\") {\n\t\tt.Errorf(\"Invalid error format %v\", err)\n\t}\n\n\tif err = vnic.disable(); err == nil {\n\t\tt.Errorf(\"Non existent device: %v\", vnic)\n\t}\n\tif !strings.HasPrefix(err.Error(), \"vnic error\") {\n\t\tt.Errorf(\"Invalid error format %v\", err)\n\t}\n\n\tif err = vnic.destroy(); err == nil {\n\t\tt.Errorf(\"Non existent device: %v\", vnic)\n\t}\n\tif !strings.HasPrefix(err.Error(), \"vnic error\") {\n\t\tt.Errorf(\"Invalid error format %v\", err)\n\t}\n\n}", "func TestConnectInvalidAddr(t *testing.T) {\n\t// connect\n\tctx := createContext(t, time.Second*20)\n\n\t_, errConnect := base.NewMilvusClient(ctx, client.Config{Address: \"aa\"})\n\tcommon.CheckErr(t, errConnect, false, \"context deadline exceeded\")\n}", "func TestServiceTokenEvent_WithNilToken_SendsNilToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEvent(mock.CID, nil)\n\t\ts.GetMsg().AssertTokenEvent(mock.CID, nil)\n\t})\n}", "func contextInvalid(context string) bool {\n\tif context != \".\" && !strings.Contains(context, \"cx-\") {\n\t\tlog.Warn(\"Context is malformed.\", \"context\", context)\n\t\treturn true\n\t}\n\treturn false\n}", "func testBatchCTXInvalidAddenda(t testing.TB) {\n\tmockBatch := NewBatchCTX(mockBatchCTXHeader())\n\tmockBatch.AddEntry(mockCTXEntryDetail())\n\taddenda05 := mockAddenda05()\n\taddenda05.TypeCode = \"63\"\n\tmockBatch.GetEntries()[0].AddAddenda05(addenda05)\n\tmockBatch.Entries[0].AddendaRecordIndicator = 1\n\terr := mockBatch.Create()\n\tif !base.Match(err, ErrAddendaTypeCode) {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t}\n}", "func TestNewIDAllocatorInvalidArgs(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\targs := [][]uint32{\n\t\t{0, 10}, // minID <= 0\n\t\t{2, 0}, // blockSize < 1\n\t}\n\tfor i := range args {\n\t\tif _, err := newIDAllocator(nil, nil, args[i][0], args[i][1], nil); err == nil {\n\t\t\tt.Errorf(\"expect to have error return, but got nil\")\n\t\t}\n\t}\n}", "func CheckTheValidityOfTheToken(token string) (newToken string, err error) {\n\n err = checkInit()\n if err != nil {\n return\n }\n\n err = createError(011)\n\n if v, ok := tokens[token]; ok {\n var expires = v.(map[string]interface{})[\"expires\"].(time.Time)\n var userID = v.(map[string]interface{})[\"id\"].(string)\n\n if expires.Sub(time.Now().Local()) < 0 {\n return\n }\n\n newToken = setToken(userID, token)\n\n err = nil\n\n } else {\n return\n }\n\n return\n}", "func ErrInvalidVin(codespace sdk.CodespaceType) sdk.Error {\n\treturn sdk.NewError(codespace, InvalidVin, InvalidVinMessage)\n}", "func TestAuthResource_WithInvalidRID_CausesPanic(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\trestest.AssertPanicNoRecover(t, func() {\n\t\t\t\tr.Resource(\"test..foo\")\n\t\t\t})\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\ts.Auth(\"test.model\", \"method\", nil).\n\t\t\tResponse().\n\t\t\tAssertErrorCode(res.CodeInternalError)\n\t})\n}", "func (s *SocketModeAdapter) onInvalidAuth(info *adapter.Info) *adapter.ProviderEvent {\n\treturn s.wrapEvent(\n\t\tadapter.EventAuthenticationError,\n\t\tinfo,\n\t\t&adapter.AuthenticationErrorEvent{\n\t\t\tMsg: fmt.Sprintf(\"Connection failed to %s: invalid credentials\", s.provider.Name),\n\t\t},\n\t)\n}", "func checkInvalidTx(t *testing.T, anteHandler sdk.AnteHandler, ctx sdk.Context, tx sdk.Tx, simulate bool, code sdk.CodeType) {\n\tnewCtx, result, abort := anteHandler(ctx, tx, simulate)\n\trequire.True(t, abort)\n\trequire.Equal(t, code, result.Code, fmt.Sprintf(\"Expected %v, got %v\", code, result))\n\trequire.Equal(t, sdk.CodespaceRoot, result.Codespace)\n\n\tif code == sdk.CodeOutOfGas {\n\t\tstdTx, ok := tx.(StdTx)\n\t\trequire.True(t, ok, \"tx must be in form auth.StdTx\")\n\t\t// GasWanted set correctly\n\t\trequire.Equal(t, stdTx.Fee.Gas, result.GasWanted, \"Gas wanted not set correctly\")\n\t\trequire.True(t, result.GasUsed > result.GasWanted, \"GasUsed not greated than GasWanted\")\n\t\t// Check that context is set correctly\n\t\trequire.Equal(t, result.GasUsed, newCtx.GasMeter().GasConsumed(), \"Context not updated correctly\")\n\t}\n}", "func TestInvalidDatadogAPIKey(t *testing.T) {\n\tensureProcessExit(t, \"TestNoDatadogAPIKey\",\n\t\tfalse, \"Invalid Datadog API key\",\n\t\t\"DATADOG_API_KEY=consul2dogstats_bogus_key\")\n}", "func TestWithContractAuthErrors(t *testing.T) {\n\tvar expSTType errors.StackTrace\n\n\targs := []string{mock.Anything}\n\n\ttests := []struct {\n\t\tcRef string\n\t\tc rbac.ContractFunc\n\t\texpSC int32\n\t\texpC int32\n\t\tmsg string\n\t\tcidRoles string\n\t\tcidFound bool\n\t\tcidErr error\n\t}{\n\t\t{\n\t\t\tcRef: contractCreateTransfer,\n\t\t\tc: mockContract,\n\t\t\texpSC: http.StatusUnauthorized,\n\t\t\texpC: rbac.CodeErrAuthentication,\n\t\t\tmsg: \"when an error is returned from the CID\",\n\t\t\tcidRoles: mock.Anything,\n\t\t\tcidFound: false,\n\t\t\tcidErr: errors.New(\"some err from cid\"),\n\t\t},\n\t\t{\n\t\t\tcRef: contractCreateTransfer,\n\t\t\tc: mockContract,\n\t\t\texpSC: http.StatusForbidden,\n\t\t\texpC: rbac.CodeErrRoles,\n\t\t\tmsg: \"when the roleAttr is not found in the identity\",\n\t\t\tcidRoles: mock.Anything,\n\t\t\tcidFound: false,\n\t\t\tcidErr: nil,\n\t\t},\n\t\t{\n\t\t\tcRef: contractCreateTransfer,\n\t\t\tc: mockContract,\n\t\t\texpSC: http.StatusForbidden,\n\t\t\texpC: rbac.CodeErrContract,\n\t\t\tmsg: \"when the role is not found in the permissions map\",\n\t\t\tcidRoles: \"anUnknownRole\",\n\t\t\tcidFound: true,\n\t\t\tcidErr: nil,\n\t\t},\n\t\t{\n\t\t\tcRef: contractCreateTransfer,\n\t\t\tc: mockContract,\n\t\t\texpSC: http.StatusForbidden,\n\t\t\texpC: rbac.CodeErrContract,\n\t\t\tmsg: \"when contract invocation is not allowed\",\n\t\t\tcidRoles: \"user\",\n\t\t\tcidFound: true,\n\t\t\tcidErr: nil,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tstub := initEmptyStub()\n\t\tcid := new(mockCID)\n\t\tcid.On(\"GetAttributeValue\", mock.Anything).Return(tt.cidRoles, tt.cidFound, tt.cidErr)\n\t\tcid.On(\"GetID\", mock.Anything).Return(mock.Anything)\n\n\t\tappAuth, err := rbac.New(stub, cid, getRolePerms(), \"roles\")\n\t\t// If the New constructor didn't fail\n\t\tif err == nil {\n\t\t\t_, err = appAuth.WithContractAuth(tt.cRef, args, tt.c)\n\t\t}\n\n\t\tassert.Implements(t, (*error)(nil), err)\n\t\tassert.Implements(t, (*rbac.AuthErrorInterface)(nil), err)\n\t\tassert.IsType(t, (string)(\"\"), err.Error())\n\n\t\tif assert.Error(t, err) {\n\t\t\tt.Logf(\"Should return an error with code %v and HTTP status code %v %v\\nmsg: %v\", tt.expC, tt.expSC, tt.msg, err)\n\n\t\t\tif e, ok := err.(rbac.AuthErrorInterface); ok {\n\t\t\t\tassert.Equal(t, tt.expC, e.Code())\n\t\t\t\tassert.Equal(t, tt.expSC, e.StatusCode())\n\t\t\t\tassert.IsType(t, expSTType, e.StackTrace())\n\t\t\t}\n\t\t}\n\t}\n}", "func TestBadKVDef(t *testing.T) {\n\tinput := \"badentry\"\n\tbr := bufio.NewReader(strings.NewReader(input))\n\tp := newParser(br)\n\t_, err := p.NextValue()\n\tif err.(*ParseError).Code() != ErrInvalidEntry {\n\t\tt.Fatalf(\"expected err=ErrInvalidEntry actual=%s\", err)\n\t}\n}", "func checkInvalidTx(t *testing.T, anteHandler sdk.AnteHandler, ctx sdk.Context, tx sdk.Tx, simulate bool, code sdk.CodeType) {\n\t_, result, abort := anteHandler(ctx, tx, simulate)\n\trequire.True(t, abort, \"abort, expected: true, got: false\")\n\n\trequire.Equal(t, code, result.Code, fmt.Sprintf(\"Expected %v, got %v\", code, result))\n\trequire.Equal(t, sdk.CodespaceRoot, result.Codespace, \"code not match\")\n\n\t// if code == sdk.CodeOutOfGas {\n\t// stdTx, ok := tx.(auth.StdTx)\n\t// require.True(t, ok, \"tx must be in form auth.StdTx\")\n\t// GasWanted set correctly\n\t// require.Equal(t, stdTx.Fee.GasWanted, result.GasWanted, \"Gas wanted not set correctly\")\n\t// require.True(t, result.GasUsed > result.GasWanted, \"GasUsed not greated than GasWanted\")\n\t// Check that context is set correctly\n\t// require.Equal(t, result.GasUsed, newCtx.GasMeter().GasConsumed(), \"Context not updated correctly\")\n\t// }\n}", "func TestClientAuthInvalidPublickey(t *testing.T) {\n\tkc := new(keychain)\n\tkc.keys = append(kc.keys, dsakey)\n\tconfig := &ClientConfig{\n\t\tUser: \"testuser\",\n\t\tAuth: []ClientAuth{\n\t\t\tClientAuthKeyring(kc),\n\t\t},\n\t}\n\n\tc, err := Dial(\"tcp\", newMockAuthServer(t), config)\n\tif err == nil {\n\t\tc.Close()\n\t\tt.Fatalf(\"dsa private key should not have authenticated with rsa public key\")\n\t}\n}", "func TestAgentFailsRequestWithoutToken(t *testing.T) {\n\tif *skip {\n\t\tt.Skip(\"Test is skipped until Citadel agent is setup in test.\")\n\t}\n\tclient, err := sdsc.NewClient(sdsc.ClientOptions{\n\t\tServerAddress: *sdsUdsPath,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"failed to create sds client\")\n\t}\n\tclient.Start()\n\tdefer client.Stop()\n\tclient.Send()\n\terrmsg := \"no credential token\"\n\t_, err = client.WaitForUpdate(3 * time.Second)\n\tif err == nil || strings.Contains(err.Error(), errmsg) {\n\t\tt.Errorf(\"got [%v], want error with substring [%v]\", err, errmsg)\n\t}\n}", "func (err BadCrtcError) BadId() uint32 {\n\treturn 0\n}", "func testValidateEDTransactionCode(t testing.TB) {\n\ted := mockEntryDetail()\n\ted.TransactionCode = 63\n\terr := ed.Validate()\n\tif !base.Match(err, ErrTransactionCode) {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t}\n}", "func TestCorruptedTokenLogin(t *testing.T) {\n\t// @todo this test is disabled now because it was\n\t// built on internal assumptions that no longer hold and not so easy to access anymore\n\t// TrySuite(t, testCorruptedLogin, retryCount)\n}", "func TestConsulStateDriverInitInvalidConfig(t *testing.T) {\n\tdriver := &ConsulStateDriver{}\n\tcommonTestStateDriverInitInvalidConfig(t, driver)\n}", "func TestInvalidClient(t *testing.T) {\n\tassert := assert.New(t)\n\n\tw := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t\tEvents: []string{\"iot\"},\n\t}\n\tw.Config.URL = \"http://localhost:9999/foo\"\n\tw.Config.ContentType = \"application/json\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t\tLogger: getLogger(),\n\t}.New()\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n}", "func TestProcessTokenWithBadSignNew(t *testing.T) {\n\tconfig.SetConfigurationFromFile(\"../../../config/config-test.json\")\n\n\t// build the information of the course to be created (in a simplified way)\n\tjsonBody := simplejson.New()\n\tjsonBody.Set(\"name\", \"corso\")\n\n\t// generate a token to be appended to the course creation request\n\tuser := microservice.User{Name: \"nome\", Surname: \"cognome\", Username: \"username\", Password: \"password\", Type: \"teacher\", Mail: \"[email protected]\"}\n\ttoken, _ := microservice.GenerateAccessToken(user, []byte(\"wrong-signing-key\"))\n\n\t// make the POST request for the course creation\n\trequestBody, _ := jsonBody.MarshalJSON()\n\trequest, _ := http.NewRequest(http.MethodPost, \"/didattica-mobile/api/v1.0/courses\", bytes.NewBuffer(requestBody))\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\trequest.AddCookie(&http.Cookie{Name: \"token\", Value: token})\n\n\tresponse := httptest.NewRecorder()\n\thandler := createTestGatewayCreateCourse()\n\t// Goroutines represent the micro-services listening to the requests coming from the api gateway\n\tgo mock.LaunchCourseManagementMock()\n\tgo mock.LaunchNotificationManagementMock()\n\t// simulates a request-response interaction between client and api gateway\n\thandler.ServeHTTP(response, request)\n\n\tif response.Code != http.StatusUnauthorized {\n\t\tt.Error(\"Expected 401 Unauthorized but got \" + strconv.Itoa(response.Code) + \" \" + http.StatusText(response.Code))\n\t}\n}", "func TestNewMovieErrorUuid(t *testing.T) {\n\tt.Helper()\n\n\tu := newValidUser()\n\twantError := errs.E(errs.Validation, errs.Parameter(\"ID\"), errors.New(errs.MissingField(\"ID\").Error()))\n\tif gotMovie, gotError := movie.NewMovie(uuid.UUID{}, \"randomExternalId\", u); !reflect.DeepEqual(wantError.Error(), gotError.Error()) && gotMovie != nil {\n\t\tt.Errorf(\"Want: %v\\nGot: %v\", wantError, gotError)\n\t}\n}", "func ErrRegisterExpiredEvent(unixTime int64) sdk.Error {\n\treturn types.NewError(types.CodeRegisterExpiredEvent, fmt.Sprintf(\"register event at expired time %v\", unixTime))\n}", "func TestBatchCTXInvalidAddenda(t *testing.T) {\n\ttestBatchCTXInvalidAddenda(t)\n}", "func TestInvalidAddress(t *testing.T) {\n\tt.Run(\"FetchChain\", func(t *testing.T) {\n\t\t_, err := FetchChain(\"iamateapot:418\")\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected failure when calling with an invalid address, err is nil\")\n\t\t}\n\t})\n\n\tt.Run(\"Expired\", func(t *testing.T) {\n\t\t_, err := Expired(\"iamateapot:418\")\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected failure when calling with an invalid address, err is nil\")\n\t\t}\n\t})\n\n\tt.Run(\"ExiresWithinDays\", func(t *testing.T) {\n\t\t_, err := ExpiresWithinDays(\"iamateapot:418\", 30)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected failure when calling with an invalid address, err is nil\")\n\t\t}\n\t})\n\n\tt.Run(\"ExpiresBeforeDate\", func(t *testing.T) {\n\t\t_, err := ExpiresBeforeDate(\"iamateapot:418\", time.Now())\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected failure when calling with an invalid address, err is nil\")\n\t\t}\n\t})\n}", "func TestInvalidJSON(t *testing.T) {\n\tjson := `{\"action\":jump\", \"time\":100}`\n\t_, err := ParseEventJSON(json)\n\tif err == nil {\n\t\tt.Errorf(\"JSON parsing of %v should have generated error, but didn't\", json)\n\t}\n}", "func TestAuthRequestTokenEvent(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.TokenEvent(mock.Token)\n\t\t\tr.OK(nil)\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\treq := s.Auth(\"test.model\", \"method\", nil)\n\t\ts.GetMsg().\n\t\t\tAssertTokenEvent(mock.CID, mock.Token)\n\t\treq.Response().\n\t\t\tAssertResult(nil)\n\t})\n}", "func TestMalformedEvent(t *testing.T) {\n\ttestCases := []string{\n\t\t\"\",\n\t\t\"HTTP/1.1 200 OK\",\n\t\t\" \",\n\t\t\"\\x00\",\n\t}\n\n\tfor i, testCase := range testCases {\n\t\t_, kw, body := splitEvent(testCase)\n\t\tevent := upgradeEvent(kw, body)\n\n\t\tvar malformed MalformedEvent\n\t\tvar ok bool\n\t\tif malformed, ok = event.(MalformedEvent); !ok {\n\t\t\tt.Errorf(\"test %d got %T; want %T\", i, event, malformed)\n\t\t\tcontinue\n\t\t}\n\n\t\twantString := fmt.Sprintf(\"Malformed Event %q\", testCase)\n\t\tif gotString := malformed.String(); gotString != wantString {\n\t\t\tt.Errorf(\"test %d String returned %q; want %q\", i, gotString, wantString)\n\t\t}\n\t}\n}", "func TestGetUserIDInvalid(t *testing.T) {\n\tts := initAPITestServer(t)\n\tdefer test.CloseServer(ts)\n\n\tinvalidUsername := \"not_\" + username\n\tid, err := GetUserID(invalidUsername)\n\tif err == nil || err.Error() != \"Username not found\" {\n\t\tt.Fatalf(\"Expected error\")\n\t}\n\tif id != \"\" {\n\t\tt.Fatalf(\"Expected empty userID\")\n\t}\n}", "func testBatchCTXInvalidBuild(t testing.TB) {\n\tmockBatch := mockBatchCTX(t)\n\tmockBatch.GetHeader().ServiceClassCode = 3\n\terr := mockBatch.Create()\n\tif !base.Match(err, ErrServiceClass) {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t}\n}", "func TestEditWithInvalidToken(t *testing.T) {\n\tcrud := moc.NewLoadedCRUD()\n\thandler := createGqlHandler(crud)\n\tassert := assert.New(t)\n\n\t// prepare query\n\tquery := fmt.Sprintf(`\n\t\tmutation{\n\t\t\tedit(token: \"%s\"){\n\t\t\t\t... on Editor{}\n\t\t\t}\n\t\t}\n\t`, \"bad_token\")\n\n\t// request\n\tresponse, err := gqlRequestAndRespond(handler, query, nil)\n\tfailOnError(assert, err)\n\tassert.Contains(response, \"errors\", msgNoError)\n}", "func TestTokenExpiracy(t *testing.T) {\n\tdb.InitDB()\n\tvar router *gin.Engine = routes.SetupRouter()\n\n\tos.Setenv(\"TOKEN_VALIDITY_MINUTES\", \"0\")\n\n\tvar user models.UserCreate = utils.CreateUser(\"Tom\", \"qwerty1234\", t, router)\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\ttime.Sleep(1 * time.Second)\n\n\tvar url string = \"/v1/user/\" + strconv.Itoa(user.ID)\n\tvar bearer = \"Bearer \" + user.Token\n\trecord := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"GET\", url, nil)\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", bearer)\n\n\trouter.ServeHTTP(record, request)\n\n\tvar message Message\n\terr := json.Unmarshal([]byte(record.Body.String()), &message)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad output: \", err.Error())\n\t\tt.Fail()\n\t}\n\n\tassert.Equal(t, record.Code, 401)\n\tassert.Equal(t, message.Message, \"Token expired.\")\n\n\tos.Setenv(\"TOKEN_VALIDITY_MINUTES\", \"15\")\n\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\n\tutils.CleanUser(user.ID, user.Token, t, router)\n\tdb.CloseDB()\n}", "func verifyCtr() error {\n\tif CtrdClient == nil {\n\t\treturn fmt.Errorf(\"verifyCtr: Container client is nil\")\n\t}\n\n\tif ctrdCtx == nil {\n\t\treturn fmt.Errorf(\"verifyCtr: Container context is nil\")\n\t}\n\treturn nil\n}", "func TestValidateBasicMsgCreateInvalidTokenArgumentGivesError(t *testing.T) {\n\tmessage := newValidMsgCreateBond()\n\tmessage.Token = \"123abc\" // starts with number\n\terr := message.ValidateBasic()\n\trequire.NotNil(t, err)\n\n\tmessage.Token = \"a\" // too short\n\terr = message.ValidateBasic()\n\trequire.NotNil(t, err)\n}", "func TestValidEvents(t *testing.T) {\n\ttestCases := []struct {\n\t\tevents []string\n\t\terrCode APIErrorCode\n\t}{\n\t\t// Return error for unknown event element.\n\t\t{\n\t\t\tevents: []string{\n\t\t\t\t\"s3:UnknownAPI\",\n\t\t\t},\n\t\t\terrCode: ErrEventNotification,\n\t\t},\n\t\t// Return success for supported event.\n\t\t{\n\t\t\tevents: []string{\n\t\t\t\t\"s3:ObjectCreated:Put\",\n\t\t\t},\n\t\t\terrCode: ErrNone,\n\t\t},\n\t\t// Return success for supported events.\n\t\t{\n\t\t\tevents: []string{\n\t\t\t\t\"s3:ObjectCreated:*\",\n\t\t\t\t\"s3:ObjectRemoved:*\",\n\t\t\t},\n\t\t\terrCode: ErrNone,\n\t\t},\n\t\t// Return error for empty event list.\n\t\t{\n\t\t\tevents: []string{\"\"},\n\t\t\terrCode: ErrEventNotification,\n\t\t},\n\t}\n\n\tfor i, testCase := range testCases {\n\t\terrCode := checkEvents(testCase.events)\n\t\tif testCase.errCode != errCode {\n\t\t\tt.Errorf(\"Test %d: Expected \\\"%d\\\", got \\\"%d\\\"\", i+1, testCase.errCode, errCode)\n\t\t}\n\t}\n}", "func AssertValidKey(key []byte) {\n\tif key == nil {\n\t\tpanic(\"key is nil\")\n\t}\n}", "func TestInvalidEvents(t *testing.T) {\n\tassert := assert.New(t)\n\n\tw := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t}\n\tw.Config.URL = \"http://localhost:9999/foo\"\n\tw.Config.ContentType = \"application/json\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tClient: &http.Client{},\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t\tLogger: getLogger(),\n\t}.New()\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n\n\tw2 := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t\tEvents: []string{\"iot(.*\"},\n\t}\n\tw2.Config.URL = \"http://localhost:9999/foo\"\n\tw2.Config.ContentType = \"application/json\"\n\n\tobs, err = OutboundSenderFactory{\n\t\tListener: w2,\n\t\tClient: &http.Client{},\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tLogger: getLogger(),\n\t\tProfilerFactory: testServerProfilerFactory,\n\t}.New()\n\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n}", "func (tc *testContext) testInvalidServicesCM(cmName string, expected *servicescm.Data) error {\n\t// Scale down the WMCO deployment to 0\n\tif err := tc.scaleWMCODeployment(0); err != nil {\n\t\treturn err\n\t}\n\t// Delete existing services CM\n\terr := tc.client.K8s.CoreV1().ConfigMaps(wmcoNamespace).Delete(context.TODO(), cmName, meta.DeleteOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Generate and create a service CM with incorrect data\n\tinvalidServicesCM, err := servicescm.Generate(cmName, wmcoNamespace,\n\t\t&servicescm.Data{Services: []servicescm.Service{{Name: \"fakeservice\", Bootstrap: true}},\n\t\t\tFiles: []servicescm.FileInfo{}})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := tc.client.K8s.CoreV1().ConfigMaps(wmcoNamespace).Create(context.TODO(), invalidServicesCM,\n\t\tmeta.CreateOptions{}); err != nil {\n\t\treturn err\n\t}\n\n\t// Restart the operator pod\n\tif err := tc.scaleWMCODeployment(1); err != nil {\n\t\treturn err\n\t}\n\t// Try to retrieve newly created ConfigMap and validate its contents\n\t_, err = tc.waitForValidWindowsServicesConfigMap(cmName, expected)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error waiting for valid ConfigMap %s: %w\", cmName, err)\n\t}\n\treturn nil\n}", "func InvalidID(c *gin.Context, err error) {\n\tc.JSON(http.StatusBadRequest, &Result{\n\t\tMessage: \"Invalid ID\",\n\t\tCode: 1400,\n\t\tError: err.Error(),\n\t})\n}", "func TestBadToken(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmockedTpmProvider := new(tpmprovider.MockedTpmProvider)\n\tmockedTpmProvider.On(\"Close\").Return(nil)\n\tmockedTpmProvider.On(\"NvIndexExists\", mock.Anything).Return(true, nil)\n\tmockedTpmProvider.On(\"NvRelease\", mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvDefine\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvWrite\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\n\tmockedTpmFactory := tpmprovider.MockedTpmFactory{TpmProvider: mockedTpmProvider}\n\n\ttrustAgentService, err := CreateTrustAgentService(CreateTestConfig(), mockedTpmFactory)\n\tassert.NoError(err)\n\n\t// setup TA service to use JWT-based authentication\n\ttrustAgentService.router = mux.NewRouter()\n\ttrustAgentService.router.Use(middleware.NewTokenAuth(\"../test/mockJWTDir\", \"../test/mockCACertsDir\", mockRetrieveJWTSigningCerts, cacheTime))\n\ttrustAgentService.router.HandleFunc(\"/v2/tag\", errorHandler(requiresPermission(setAssetTag(CreateTestConfig(), mockedTpmFactory), []string{postDeployTagPerm}))).Methods(\"POST\")\n\n\tjsonString := `{\"tag\" : \"tHgfRQED1+pYgEZpq3dZC9ONmBCZKdx10LErTZs1k/k=\", \"hardware_uuid\" : \"7a569dad-2d82-49e4-9156-069b0065b262\"}`\n\n\trequest, err := http.NewRequest(\"POST\", \"/v2/tag\", bytes.NewBuffer([]byte(jsonString)))\n\tassert.NoError(err)\n\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", \"Bearer \"+TestBadJWTAuthToken)\n\n\trecorder := httptest.NewRecorder()\n\ttrustAgentService.router.ServeHTTP(recorder, request)\n\tresponse := recorder.Result()\n\tfmt.Printf(\"StatusCode: %d\\n\", response.StatusCode)\n\tassert.Equal(http.StatusUnauthorized, response.StatusCode)\n}", "func (n *NullEventReceiver) EventErrKv(eventName string, err error, kvs map[string]string) error {\n\treturn err\n}", "func TestInvalidDSN(t *testing.T) {\n\tuser, pwd, host := parseDsn(\"\")\n\tstringsEqual(t, \"\", user)\n\tstringsEqual(t, \"\", pwd)\n\tstringsEqual(t, \"\", host)\n}", "func cardReportError(context *Context, err error) {\n\tif context == nil {\n\t\treturn\n\t}\n\tif context.Debug {\n\t\tfmt.Printf(\"*** %s\\n\", err)\n\t}\n\tif IoErrorIsRecoverable {\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\tcontext.reopenRequired = true\n\t}\n}", "func TestNonExistantRequestID(t *testing.T) {\n\ta := wolf.New()\n\n\tvar run bool\n\ta.Get(\"/\", func(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\t\trun = true\n\t\tassert.Equal(t, \"\", GetReqID(ctx))\n\t})\n\n\tvar w http.ResponseWriter = httptest.NewRecorder()\n\tr, err := http.NewRequest(\"GET\", \"/\", nil)\n\tassert.NoError(t, err)\n\ta.ServeHTTP(w, r)\n\n\tassert.True(t, run)\n}", "func noValidTokenTest(t *testing.T, r *http.Request, h http.Handler, auth *mock.Authenticator) {\n\toriginal := auth.AuthenticateFn\n\tauth.AuthenticateFn = authenticateGenerator(false, errors.New(\"An error\"))\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, r)\n\ttest.Equals(t, http.StatusBadRequest, w.Result().StatusCode)\n\tauth.AuthenticateFn = authenticateGenerator(false, nil)\n\tw = httptest.NewRecorder()\n\th.ServeHTTP(w, r)\n\ttest.Equals(t, http.StatusUnauthorized, w.Result().StatusCode)\n\tauth.AuthenticateFn = original\n}", "func TestBatchCTXInvalidBuild(t *testing.T) {\n\ttestBatchCTXInvalidBuild(t)\n}", "func TestNewMovieErrorInvalidUser(t *testing.T) {\n\tt.Helper()\n\n\tu := newInvalidUser()\n\tuid, _ := uuid.NewUUID()\n\n\twantError := errs.E(errs.Validation, errs.Parameter(\"User\"), errors.New(\"User is invalid\"))\n\n\tif gotMovie, gotError := movie.NewMovie(uid, \"externalID\", u); !reflect.DeepEqual(wantError.Error(), gotError.Error()) && gotMovie != nil {\n\t\tt.Errorf(\"Want: %v\\nGot: %v\", wantError, gotError)\n\t}\n}", "func (aio *AsyncIO) verifyEvent(evt event) error {\n\tif evt.obj == nil {\n\t\treturn ErrNilCallback\n\t}\n\tre, ok := aio.running.Get(pointer2string(unsafe.Pointer(evt.obj)))\n\tif !ok {\n\t\treturn ErrUntrackedEventKey\n\t}\n\trevt, ok := re.(*runningEvent)\n\tif !ok {\n\t\treturn ErrInvalidEventPtr\n\t}\n\tif revt.iocb != evt.obj {\n\t\treturn ErrInvalidEventPtr\n\t}\n\t// an error occured with this event, remove the running event and set error code.\n\tif evt.res < 0 {\n\t\treturn aio.freeEvent(revt, evt.obj, lookupErrNo(int(evt.res)))\n\t}\n\t//we have an active event returned and its one we are tracking\n\t//ensure it wrote our entire buffer, res is > 0 at this point\n\tif evt.res > 0 && uint(count(revt.data)) != (uint(evt.res)+revt.wrote) {\n\t\trevt.wrote += uint(evt.res)\n\t\tif err := aio.resubmit(revt); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\trevt.wrote += uint(evt.res)\n\n\treturn aio.freeEvent(revt, evt.obj, nil)\n}", "func TestInvalidInput(t *testing.T) {\n\ttests := []struct {\n\t\tinput string\n\t}{\n\t\t//too short\n\t\t{\"12\"},\n\t\t{\"12/14\"},\n\t\t//bad month\n\t\t{\"13/14/1989\"},\n\t\t//bad day\n\t\t{\"12/32/1989\"},\n\t}\n\tfor _, test := range tests {\n\t\t//Create and assign test input\n\t\tobj := DOB{}\n\t\tobj.DOB = test.input\n\n\t\t//Create buffer to catch validateInput() text so we don't spam up the terminal\n\t\tvar b bytes.Buffer\n\n\t\t//Test if test input incorrect sets obj.Validated == true\n\t\t//Should be false if working correctly\n\t\tobj.validateInput(&b)\n\n\t\t//Bad inputs should not have set obj.Validated == true\n\t\t//FAIL if true\n\t\tif obj.Validated == true {\n\t\t\tt.Errorf(\"validateInput() = have: %s & '%v', want: false \", test.input, obj.Validated)\n\t\t}\n\t}\n}", "func testEDFieldInclusionTransactionCode(t testing.TB) {\n\tentry := mockEntryDetail()\n\tentry.TransactionCode = 0\n\terr := entry.Validate()\n\tif !base.Match(err, ErrConstructor) {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t}\n}", "func TestSubscriptionUnsubscribeError(t *testing.T) {\n\tmockTransport := new(mockFScopeTransport)\n\terr := errors.New(\"error\")\n\tmockTransport.On(\"Unsubscribe\").Return(err)\n\tsub := NewFSubscription(\"foo\", mockTransport)\n\tassert.Equal(t, err, sub.Unsubscribe())\n\tmockTransport.AssertExpectations(t)\n}", "func mockErr(mockErrOpts *MockErrOptions, n apns.Packet) error {\n\ti := rand.Intn(101-1) + 1\n\tif i < mockErrOpts.fail {\n\t\tif en, isEN := n.(*apns.EnhancedNotification); isEN {\n\t\t\tresp := &apns.ErrorResponse{\n\t\t\t\tStatus: apns.InvalidTokenStatus,\n\t\t\t\tIdentifier: en.Identifier,\n\t\t\t}\n\t\t\treturn resp\n\t\t}\n\t\treturn io.EOF\n\t}\n\treturn nil\n}", "func TestMsgParseTortureRegBadCt(t *testing.T) {\n\tstr := \"OPTIONS sip:[email protected] SIP/2.0\\r\\n\" +\n\t\t\"Via: SIP/2.0/UDP host4.example.com:5060;branch=z9hG4bKkdju43234\\r\\n\" +\n\t\t\"Max-Forwards: 70\\r\\n\" +\n\t\t\"From: \\\"Bell, Alexander\\\" <sip:[email protected]>;tag=433423\\r\\n\" +\n\t\t\"To: \\\"Watson, Thomas\\\" < sip:[email protected] >\\r\\n\" +\n\t\t\"Call-ID: badaspec.sdf0234n2nds0a099u23h3hnnw009cdkne3\\r\\n\" +\n\t\t\"Accept: application/sdp\\r\\n\" +\n\t\t\"CSeq: 3923239 OPTIONS\\r\\n\" +\n\t\t\"l: 0\\r\\n\\r\\n\"\n\t_, err := MsgParse([]byte(str))\n\tassert.NotNil(t, err)\n\tassert.Contains(t, err.Error(), \"< sip:[email protected] >\")\n}", "func CheckInvalid(tb testing.TB, funcName string, err error) {\n\ttb.Helper()\n\n\tif err != os.ErrInvalid {\n\t\ttb.Errorf(\"%s : want error to be %v, got %v\", funcName, os.ErrInvalid, err)\n\t}\n}", "func cannotBeCalledFromContracts(ctx isc.SandboxBase) {\n\tcaller := ctx.Caller()\n\tif caller != nil && caller.Kind() == isc.AgentIDKindContract {\n\t\tpanic(vm.ErrIllegalCall)\n\t}\n}", "func TestNewMovieErrorExtlID(t *testing.T) {\n\tt.Helper()\n\n\tu := newValidUser()\n\tuid, _ := uuid.NewUUID()\n\twantError := errs.E(errs.Validation, errs.Parameter(\"ID\"), errors.New(errs.MissingField(\"ID\").Error()))\n\tif gotMovie, gotError := movie.NewMovie(uid, \"\", u); !reflect.DeepEqual(wantError.Error(), gotError.Error()) && gotMovie != nil {\n\t\tt.Errorf(\"Want: %v\\nGot: %v\", wantError, gotError)\n\t}\n}", "func (s *PartitionCsmSuite) TestOfferInvalid(c *C) {\n\ts.kh.SetOffsetValues(group, topic, s.kh.GetOldestOffsets(topic))\n\tpc := Spawn(s.ns, group, topic, partition, s.cfg, s.groupMember, s.msgFetcherF, s.offsetMgrF)\n\tdefer pc.Stop()\n\n\tmsg, ok := <-pc.Messages()\n\tc.Assert(ok, Equals, true)\n\n\t// When\n\tmsg.EventsCh <- consumer.Event{T: consumer.EvOffered, Offset: msg.Offset + 1}\n\tmsg.EventsCh <- consumer.Event{T: consumer.EvOffered, Offset: msg.Offset - 1}\n\n\t// Then\n\tmsg.EventsCh <- consumer.Event{T: consumer.EvOffered, Offset: msg.Offset}\n\tmsg2, ok := <-pc.Messages()\n\tc.Assert(msg2.Offset, Equals, msg.Offset+1)\n\tc.Assert(ok, Equals, true)\n}", "func ErrInvalidVout(codespace sdk.CodespaceType) sdk.Error {\n\treturn sdk.NewError(codespace, InvalidVout, InvalidVoutMessage)\n}", "func IsMockInvalid(cc ContractCall) bool {\n\treturn false\n}", "func TestUploadCannotAcquireToken(t *testing.T) {\n\t// prepare uploader parameter\n\tlocalNodeId := uuid.New()\n\tsenderChan := make(chan data.ShareCommand)\n\tsender := NewShareSender(senderChan)\n\n\t// create uploader\n\tmaxUploads := 0\n\tuploader := NewShareUploader(localNodeId, maxUploads, sender)\n\n\t// prepare dirs\n\tdownloadDir, base := prepareDirs(t)\n\tdefer os.RemoveAll(downloadDir)\n\tdefer os.RemoveAll(base)\n\t// prepare shared file\n\tsf := createSharedFile(t, base)\n\tdefer os.Remove(sf.FilePath())\n\n\t// prepare download request of unknown chunk\n\tnodeId := uuid.New().String()\n\tchunkChecksum := sf.LocalChunksChecksums()[0]\n\trequest := data.NewDownloadRequest(sf.FileId(), nodeId, chunkChecksum)\n\n\t// start message reader for deny message\n\tdone := make(chan bool)\n\tgo readDenyUpload(t, done, senderChan, request)\n\n\t// start upload\n\tuploader.Upload(sf, chunkChecksum, nodeId, filepath.Join(downloadDir, sf.FileRelativePath()))\n\n\t// wait for message\n\t<-done\n}", "func (id InvalidContainerIDError) BadRequest() {}", "func AssertValidKey(key []byte) {\n\tif len(key) == 0 {\n\t\tpanic(\"key is nil or empty\")\n\t}\n\tif len(key) > MaxKeyLength {\n\t\tpanic(\"key is too large\")\n\t}\n}", "func TestInvalidTime(t *testing.T) {\n\ttestCases := []struct {\n\t\th, m, s int\n\t}{\n\t\t{-1, 13, 16},\n\t\t{24, 13, 16},\n\t\t{12, -1, 16},\n\t\t{12, 60, 16},\n\t\t{12, 13, -1},\n\t\t{12, 13, 60},\n\t}\n\tfor _, tc := range testCases {\n\t\tt.Run(fmt.Sprintf(\"input %v\", tc), func(t *testing.T) {\n\n\t\t\tif _, err := NewTime(tc.h, tc.m, tc.s); err == nil {\n\t\t\t\tt.Errorf(\"Expected an error for invalid time %02d:%02d:%02d\", tc.h, tc.m, tc.s)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestInvalidEventRegex(t *testing.T) {\n\n\tassert := assert.New(t)\n\n\tw := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t\tEvents: []string{\"[[:123\"},\n\t}\n\tw.Config.URL = \"http://localhost:9999/foo\"\n\tw.Config.ContentType = \"application/json\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tClient: &http.Client{},\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t\tLogger: getLogger(),\n\t}.New()\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n\n}", "func (s *ServerTestSuite) TestNewServerWithBadSigningKey() {\n\tts := NewServer(\"test-server\", \":9999\", \"9.99.999\", s.info, nil, true, \"\", 37 * time.Minute)\n\tassert.Nil(s.T(), ts)\n}", "func TestInvalidCutOffPeriod(t *testing.T) {\n\n\tassert := assert.New(t)\n\n\ttrans := &transport{}\n\n\tobs, err := simpleSetup(trans, 0*time.Second, nil)\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n}", "func TestFlow_InvalidPacket(t *testing.T) {\n\tinvalidIPpacket := []byte{0xab, 0xbc}\n\n\t_, err := FindFlow(invalidIPpacket)\n\tif err == nil {\n\t\tt.Errorf(\"Unable to detect invalid flow from %v\\n\", invalidIPpacket)\n\t}\n}", "func testBatchXCKInvalidBuild(t testing.TB) {\n\tmockBatch := mockBatchXCK(t)\n\tmockBatch.GetHeader().ServiceClassCode = 3\n\terr := mockBatch.Create()\n\tif !base.Match(err, ErrServiceClass) {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t}\n}", "func TestValidAuth(t *testing.T) {\n\tt.Parallel()\n\ta, err := getAuth()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif !ValidAuth(a) {\n\t\tt.Error(ErrInvalidAuth)\n\t}\n}", "func TestContactAddInvalidData(t *testing.T) {\n\tdb := database.Connect()\n\tu := models.User{\n\t\tEmail: \"[email protected]\",\n\t}\n\tu.Create(db)\n\tut, _ := u.AddToken(db)\n\n\ttype Data struct {\n\t\tID int64\n\t}\n\td := Data{ID: 321}\n\tj, _ := json.Marshal(d)\n\tb := bytes.NewBuffer(j)\n\n\tr, err := http.NewRequest(\"POST\", \"/\", b)\n\tr.Header.Add(\"Content-Type\", \"application/json\")\n\tr.Header.Add(\"X-Access-Token\", ut.Token)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error\", err)\n\t}\n\n\tw := httptest.NewRecorder()\n\tc := SetupWebContext()\n\tContactAdd(c, w, r)\n\tif w.Code != http.StatusBadRequest {\n\t\tt.Errorf(\"%v expected, got %v instead\", http.StatusBadRequest, w.Code)\n\t}\n}", "func verifyCustomToken(t *testing.T, ct, uid string) *auth.Token {\n\tidt, err := signInWithCustomToken(ct)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer deleteUser(uid)\n\n\tvt, err := client.VerifyIDToken(context.Background(), idt)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif vt.UID != uid {\n\t\tt.Errorf(\"UID = %q; want UID = %q\", vt.UID, uid)\n\t}\n\tif vt.Firebase.Tenant != \"\" {\n\t\tt.Errorf(\"Tenant = %q; want = %q\", vt.Firebase.Tenant, \"\")\n\t}\n\treturn vt\n}", "func testInvalidRootCertWithClientAuth(t *testing.T) {\n\tsrv := getServer(rootPort, testdataDir, \"\", 0, t)\n\tsrv = getTLSConfig(srv, \"RequireAndVerifyClientCert\", []string{\"../testdata/root.pem\", \"../testdata/root2.pem\"})\n\n\terr := srv.Start()\n\tif err == nil {\n\t\tt.Error(\"Root2.pem does not exists, server should have failed to start\")\n\t}\n}", "func (e StreamEventsRequest_IdentifierValidationError) Cause() error { return e.cause }", "func (s *EventSuite) TestDeleteEvent_WrongID(c *C) {\n\taccounts := CorrectDeploy(1, 0, 1, 1, 1, true, true)\n\tapplication := accounts[0].Applications[0]\n\tuser := application.Users[0]\n\tevent := user.Events[0]\n\n\trouteName := \"deleteCurrentUserEvent\"\n\troute := getComposedRoute(routeName, event.ID+1)\n\tcode, _, err := runRequest(routeName, route, \"\", signApplicationRequest(application, user, true, true))\n\tc.Assert(err, IsNil)\n\n\tc.Assert(err, IsNil)\n\tc.Assert(code, Equals, http.StatusNotFound)\n}", "func TestRegisterByDeviceFailWithInvalidContentType(t *testing.T) {\n\t// initialize\n\tapiTest.T = t\n\ttestCaseStatusError := []struct {\n\t\tname string\n\t\tparamRequest map[string][]string\n\t}{\n\t\t{\n\t\t\tname: \"invalid content type\",\n\t\t\tparamRequest: map[string][]string{\n\t\t\t\t\"device_id\": {\"123e4567-e89b-12d3-a456-123456789123\"},\n\t\t\t},\n\t\t},\n\t}\n\tt.Run(testCaseStatusError[0].name, func(t *testing.T) {\n\t\tresp := sendRequest(testCaseStatusError[0].paramRequest, \"application/x-www-form-urlencoded, test\", apiTest)\n\t\t// check status bad request.\n\t\tcheckStatusCodeResponse(t, resp, http.StatusBadRequest)\n\t\t// check response data.\n\t\tcheckJSONResponeMessage(t, resp, \"Parse request error.\")\n\t\t// check user is not created in user_app table\n\t\tassert.False(t, checkUserExisted(testCaseStatusError[0].paramRequest[\"device_id\"][0]))\n\t})\n}", "func (n *EventReceiver) EventErrKv(eventName string, err error, kvs map[string]string) error {\n\tlogger.Errorf(\"%+v\", err)\n\tlogger.Errorf(\"%s: %+v\", eventName, kvs)\n\treturn err\n}", "func secp256k1GoPanicIllegal(msg *C.char, data unsafe.Pointer) {\n\tpanic(\"illegal argument: \" + C.GoString(msg))\n}", "func TestNextEventAfterFailedSubscribe(t *testing.T) {\n\tdctx, dcancel := context.WithCancel(context.Background())\n\tdefer dcancel()\n\n\tconn := mockCharon(dctx)\n\n\ts, err := NewSession(withTestConn(conn))\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\t// This should result in an IO error, and if handled properly within\n\t// the event listener, the error should not be sent on the event channel.\n\ts.el.conn.Close()\n\tif err := s.Subscribe(\"test-event\"); err == nil {\n\t\tt.Fatalf(\"Expected error reading from closed transport\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)\n\tdefer cancel()\n\n\t_, err = s.NextEvent(ctx)\n\tif err != ctx.Err() {\n\t\tt.Fatalf(\"Expected to get timeout error, got: %v\", err)\n\t}\n}", "func ErrInvalidMarketplaceID(id string) sdk.Error {\r\n\treturn sdk.NewError(\r\n\t\tDefaultCodespace,\r\n\t\tErrorCodeClaimsWithMarketplaceNotFound,\r\n\t\tfmt.Sprintf(\"Invalid marketplace id: %s\", id))\r\n}", "func TestSignContractFailure(t *testing.T) {\n\tsignatureHelper(t, true)\n}", "func TestNextEventCancel(t *testing.T) {\n\tmaybeSkipIntegrationTest(t)\n\n\ts, err := NewSession()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\tif err := s.Subscribe(\"ike-updown\"); err != nil {\n\t\tt.Fatalf(\"Failed to start event listener: %v\", err)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)\n\tdefer cancel()\n\n\t_, err = s.NextEvent(ctx)\n\tif err != ctx.Err() {\n\t\tt.Fatalf(\"Expected to get context's timeout error after not receiving event, got: %v\", err)\n\t}\n}", "func ERROR_AUTH_TOKEN_INVALID(w http.ResponseWriter) {\n\tbuildForeignError(w, http.StatusForbidden, \"ERROR_AUTH_TOKEN_INVALID\", \"\")\n}", "func TestToken(t *testing.T) {\n\tkey := []byte(\"26BF237B95964852625A2C27988C3\")\n\tSetSecret(key)\n\tc := NewClaims(1, 15*time.Minute)\n\tc.SetIssuer(\"token_test\")\n\tc.SetSubject(\"test\")\n\ttok, err := c.Token()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tc, err = Decode(tok)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func ErrCorruptUserID(err error) *influxdb.Error {\n\treturn &influxdb.Error{\n\t\tCode: influxdb.EInvalid,\n\t\tMsg: \"corrupt ID provided\",\n\t\tErr: err,\n\t}\n}", "func TestInvalidFingerprintCausesFailed(t *testing.T) {\n\treport := test.CheckRoutines(t)\n\tdefer report()\n\n\tpcOffer, err := NewPeerConnection(Configuration{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpcAnswer, err := NewPeerConnection(Configuration{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdefer closePairNow(t, pcOffer, pcAnswer)\n\n\tofferChan := make(chan SessionDescription)\n\tpcOffer.OnICECandidate(func(candidate *ICECandidate) {\n\t\tif candidate == nil {\n\t\t\tofferChan <- *pcOffer.PendingLocalDescription()\n\t\t}\n\t})\n\n\tconnectionHasFailed, closeFunc := context.WithCancel(context.Background())\n\tpcAnswer.OnConnectionStateChange(func(connectionState PeerConnectionState) {\n\t\tif connectionState == PeerConnectionStateFailed {\n\t\t\tcloseFunc()\n\t\t}\n\t})\n\n\tif _, err = pcOffer.CreateDataChannel(\"unusedDataChannel\", nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\toffer, err := pcOffer.CreateOffer(nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if err := pcOffer.SetLocalDescription(offer); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tselect {\n\tcase offer := <-offerChan:\n\t\t// Replace with invalid fingerprint\n\t\tre := regexp.MustCompile(`sha-256 (.*?)\\r`)\n\t\toffer.SDP = re.ReplaceAllString(offer.SDP, \"sha-256 AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA\\r\")\n\n\t\tif err := pcAnswer.SetRemoteDescription(offer); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tanswer, err := pcAnswer.CreateAnswer(nil)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif err = pcAnswer.SetLocalDescription(answer); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\terr = pcOffer.SetRemoteDescription(answer)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"timed out waiting to receive offer\")\n\t}\n\n\tselect {\n\tcase <-connectionHasFailed.Done():\n\tcase <-time.After(30 * time.Second):\n\t\tt.Fatal(\"timed out waiting for connection to fail\")\n\t}\n}", "func (mt *Mytoken) Valid() error {\n\tstandardClaims := jwt.StandardClaims{\n\t\tAudience: mt.Audience,\n\t\tExpiresAt: int64(mt.ExpiresAt),\n\t\tId: mt.ID.String(),\n\t\tIssuedAt: int64(mt.IssuedAt),\n\t\tIssuer: mt.Issuer,\n\t\tNotBefore: int64(mt.NotBefore),\n\t\tSubject: mt.Subject,\n\t}\n\tif err := errors.WithStack(standardClaims.Valid()); err != nil {\n\t\treturn err\n\t}\n\tif ok := standardClaims.VerifyIssuer(config.Get().IssuerURL, true); !ok {\n\t\treturn errors.New(\"invalid issuer\")\n\t}\n\tif ok := standardClaims.VerifyAudience(config.Get().IssuerURL, true); !ok {\n\t\treturn errors.New(\"invalid Audience\")\n\t}\n\tif ok := mt.verifyID(); !ok {\n\t\treturn errors.New(\"invalid id\")\n\t}\n\tif ok := mt.verifySubject(); !ok {\n\t\treturn errors.New(\"invalid subject\")\n\t}\n\treturn nil\n}", "func (mt *Mytoken) Valid() error {\n\tstandardClaims := jwt.StandardClaims{\n\t\tAudience: mt.Audience,\n\t\tExpiresAt: int64(mt.ExpiresAt),\n\t\tId: mt.ID.String(),\n\t\tIssuedAt: int64(mt.IssuedAt),\n\t\tIssuer: mt.Issuer,\n\t\tNotBefore: int64(mt.NotBefore),\n\t\tSubject: mt.Subject,\n\t}\n\tif err := errors.WithStack(standardClaims.Valid()); err != nil {\n\t\treturn err\n\t}\n\tif ok := standardClaims.VerifyIssuer(config.Get().IssuerURL, true); !ok {\n\t\treturn errors.New(\"invalid issuer\")\n\t}\n\tif ok := standardClaims.VerifyAudience(config.Get().IssuerURL, true); !ok {\n\t\treturn errors.New(\"invalid Audience\")\n\t}\n\tif ok := mt.verifyID(); !ok {\n\t\treturn errors.New(\"invalid id\")\n\t}\n\tif ok := mt.verifySubject(); !ok {\n\t\treturn errors.New(\"invalid subject\")\n\t}\n\treturn nil\n}", "func ErrAddressNotAuthorised() sdk.Error {\r\n\treturn sdk.NewError(\r\n\t\tDefaultCodespace,\r\n\t\tErrorCodeAddressNotAuthorised,\r\n\t\t\"This address is not authorised to perform this action.\")\r\n}", "func TestGetByEmailInvalid(t *testing.T) {\n\tdb := database.Connect()\n\tu := User{\n\t\tEmail: \"[email protected]\",\n\t}\n\tu.GetByEmail(db)\n\tif u.ID != 0 {\n\t\tt.Errorf(\"Expected no result, got %v\", u)\n\t}\n}", "func TestPanicWithGenericValueOnAccess(t *testing.T) {\n\trunTest(t, func(s *Session) {\n\t\ts.Handle(\"model\", res.Access(func(r res.AccessRequest) {\n\t\t\tpanic(42)\n\t\t}))\n\t}, func(s *Session) {\n\t\tinb := s.Request(\"access.test.model\", nil)\n\t\ts.GetMsg(t).\n\t\t\tAssertSubject(t, inb).\n\t\t\tAssertErrorCode(t, \"system.internalError\")\n\t})\n}" ]
[ "0.78261864", "0.5751174", "0.52965206", "0.5272917", "0.5241354", "0.5213487", "0.5169806", "0.5148139", "0.5139948", "0.51134753", "0.5077146", "0.50146776", "0.50133395", "0.5010127", "0.500105", "0.49700215", "0.49307236", "0.4902067", "0.489147", "0.48595318", "0.48185354", "0.48173773", "0.48089525", "0.47840703", "0.47547525", "0.47519335", "0.4751794", "0.47433087", "0.4742343", "0.47407967", "0.47233585", "0.4722141", "0.47192323", "0.47101945", "0.4697755", "0.4692945", "0.46859214", "0.46711943", "0.46699712", "0.46655294", "0.46596047", "0.4659378", "0.4656396", "0.46559796", "0.46558136", "0.4654188", "0.46490738", "0.46490327", "0.4644763", "0.46405566", "0.46160084", "0.46159008", "0.4613755", "0.46074438", "0.45878994", "0.45854363", "0.4582095", "0.45753554", "0.45651558", "0.4563885", "0.45605475", "0.45605004", "0.45603582", "0.45599607", "0.45584625", "0.4555646", "0.45475355", "0.45422444", "0.45417354", "0.45369256", "0.4531623", "0.45272094", "0.45266724", "0.45214733", "0.45058882", "0.4503085", "0.4498208", "0.44981468", "0.44976795", "0.44767794", "0.44762394", "0.44709557", "0.44697645", "0.4466482", "0.44594532", "0.44586527", "0.44550517", "0.4444541", "0.44436732", "0.44408175", "0.4434401", "0.44336906", "0.44333205", "0.44320628", "0.44304806", "0.44284722", "0.44284722", "0.4426763", "0.44232854", "0.44226485" ]
0.80473816
0
Test that Reset sends a system.reset event.
func TestServiceReset(t *testing.T) { tbl := []struct { Resources []string Access []string Expected interface{} }{ {nil, nil, nil}, {[]string{}, nil, nil}, {nil, []string{}, nil}, {[]string{}, []string{}, nil}, {[]string{"test.foo.>"}, nil, json.RawMessage(`{"resources":["test.foo.>"]}`)}, {nil, []string{"test.foo.>"}, json.RawMessage(`{"access":["test.foo.>"]}`)}, {[]string{"test.foo.>"}, []string{"test.bar.>"}, json.RawMessage(`{"resources":["test.foo.>"],"access":["test.bar.>"]}`)}, {[]string{"test.foo.>"}, []string{}, json.RawMessage(`{"resources":["test.foo.>"]}`)}, {[]string{}, []string{"test.foo.>"}, json.RawMessage(`{"access":["test.foo.>"]}`)}, } for _, l := range tbl { runTest(t, func(s *res.Service) { s.Handle("model", res.GetResource(func(r res.GetRequest) { r.NotFound() })) }, func(s *restest.Session) { s.Service().Reset(l.Resources, l.Access) // Send token event to flush any system.reset event s.Service().TokenEvent(mock.CID, nil) if l.Expected != nil { s.GetMsg(). AssertSubject("system.reset"). AssertPayload(l.Expected) } s.GetMsg().AssertTokenEvent(mock.CID, nil) }) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestReset(t *testing.T) {\n\ttestCancel(t, false)\n}", "func (m *Machine) Reset() error {\n\tm.State = driver.Running\n\tfmt.Printf(\"Reset %s: %s\\n\", m.Name, m.State)\n\treturn nil\n}", "func MockOnResetSystem(ctx context.Context, mockAPI *redfishMocks.RedfishAPI,\n\tsystemID string, requestBody *redfishClient.ResetRequestBody, redfishErr redfishClient.RedfishError,\n\thttpResponse *http.Response, err error) {\n\trequest := redfishClient.ApiResetSystemRequest{}.ResetRequestBody(*requestBody)\n\tmockAPI.On(\"ResetSystem\", ctx, systemID).Return(request).Times(1)\n\tmockAPI.On(\"ResetSystemExecute\", mock.Anything).Return(redfishErr, httpResponse, err).Times(1)\n}", "func (computersystem *ComputerSystem) Reset(resetType ResetType) error {\n\t// Make sure the requested reset type is supported by the system\n\tvalid := false\n\tif len(computersystem.SupportedResetTypes) > 0 {\n\t\tfor _, allowed := range computersystem.SupportedResetTypes {\n\t\t\tif resetType == allowed {\n\t\t\t\tvalid = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// No allowed values supplied, assume we are OK\n\t\tvalid = true\n\t}\n\n\tif !valid {\n\t\treturn fmt.Errorf(\"reset type '%s' is not supported by this service\",\n\t\t\tresetType)\n\t}\n\n\tt := struct {\n\t\tResetType ResetType\n\t}{ResetType: resetType}\n\n\treturn computersystem.Post(computersystem.resetTarget, t)\n}", "func TestServiceTokenReset(t *testing.T) {\n\ttbl := []struct {\n\t\tSubject string\n\t\tTIDs []string\n\t\tExpected interface{}\n\t}{\n\t\t{\"auth\", nil, nil},\n\t\t{\"auth\", []string{}, nil},\n\t\t{\"auth\", []string{\"foo\"}, json.RawMessage(`{\"tids\":[\"foo\"],\"subject\":\"auth\"}`)},\n\t\t{\"auth\", []string{\"foo\", \"bar\"}, json.RawMessage(`{\"tids\":[\"foo\",\"bar\"],\"subject\":\"auth\"}`)},\n\t\t{\"auth.test.method\", []string{\"foo\", \"bar\"}, json.RawMessage(`{\"tids\":[\"foo\",\"bar\"],\"subject\":\"auth.test.method\"}`)},\n\t}\n\n\tfor _, l := range tbl {\n\t\trunTest(t, func(s *res.Service) {\n\t\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t\t}, func(s *restest.Session) {\n\t\t\ts.Service().TokenReset(l.Subject, l.TIDs...)\n\t\t\t// Send token event to flush any system.tokenReset event\n\t\t\ts.Service().TokenEvent(mock.CID, nil)\n\n\t\t\tif l.Expected != nil {\n\t\t\t\ts.GetMsg().\n\t\t\t\t\tAssertSubject(\"system.tokenReset\").\n\t\t\t\t\tAssertPayload(l.Expected)\n\t\t\t}\n\n\t\t\ts.GetMsg().AssertTokenEvent(mock.CID, nil)\n\t\t})\n\t}\n}", "func (d *SHT2xDriver) Reset() (err error) {\n\tif err = d.connection.WriteByte(SHT2xSoftReset); err != nil {\n\t\treturn\n\t}\n\n\ttime.Sleep(15 * time.Millisecond) // 15ms delay (from the datasheet 5.5)\n\n\treturn\n}", "func (v *VsctlMock) Reset() {\n\tv.ReceivedArgs = [][]string{}\n}", "func (s *Server) OnReset() error { return nil }", "func RunTestReset(t *testing.T, e1, e2 streams.StreamProvider) {\n\tserver := func(provider streams.StreamProvider) error {\n\t\tlistener := provider.Listen(nil)\n\t\tfor {\n\t\t\tstream, err := listener.Accept()\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif err := stream.Reset(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Fails due to spdystream bug\n\t\t\t// https://github.com/docker/spdystream/issues/45\n\t\t\tif _, err := stream.Write([]byte(\"some value\")); err == nil {\n\t\t\t\treturn fmt.Errorf(\"Expected error writing after reset\")\n\t\t\t}\n\t\t}\n\t}\n\tclient := func(provider streams.StreamProvider) error {\n\t\tstream, err := provider.NewStream(nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tb := make([]byte, 10)\n\t\tif n, err := stream.Read(b); err != nil && err != io.EOF {\n\t\t\treturn err\n\t\t} else if err == nil && n > 0 {\n\t\t\treturn fmt.Errorf(\"Expected read of %d bytes\", n)\n\t\t} else if err == nil {\n\t\t\treturn fmt.Errorf(\"Expected error reading from stream\")\n\t\t}\n\t\treturn nil\n\t}\n\trunTest(t, e1, e2, client, server)\n}", "func (se *systemdExec) reset() error {\n\tlog.Printf(\"systemd/exec %v: reset\", se.unit)\n\n\tif err := se.conn.ResetFailedUnit(se.unit); err == nil {\n\n\t} else if dbusErr, ok := err.(godbus.Error); ok && dbusErr.Name == \"org.freedesktop.systemd1.NoSuchUnit\" {\n\t\treturn nil\n\t} else {\n\t\treturn fmt.Errorf(\"dbus.ResetFailedUnit %v: %v\", se.unit, err)\n\t}\n\n\treturn nil\n}", "func (sm *StateMachine) Reset(in *Msg) {\n\tsm.state = 0\n\tsm.stateEntered = false\n\tsm.plugin.SetMemory(in, StateKey, 0)\n\tsm.plugin.SetMemory(in, stateEnteredKey, false)\n\tsm.resetFn(in)\n}", "func (c *SwitchTicker) Reset() {\n\tatomic.StoreInt64(&c.failCount, 0)\n}", "func Reset() int {\n\tstatus := C.Reset()\n\treturn int(status)\n}", "func (t *Timer) Reset() {\n\tt.Start()\n}", "func cmdReset() {\n\tswitch state := status(B2D.VM); state {\n\tcase vmUnregistered:\n\t\tlog.Fatalf(\"%s is not registered.\", B2D.VM)\n\tcase vmRunning:\n\t\tif err := vbm(\"controlvm\", B2D.VM, \"reset\"); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\tdefault:\n\t\tlog.Printf(\"%s is not running.\", B2D.VM)\n\t}\n}", "func (c *Client) Reset() error {\n\terr := c.writeMsg(\"RSET\\r\\n\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmsg, err := c.readMsg(singleLineMessageTerminator)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif c.isError(msg) {\n\t\treturn fmt.Errorf(\"Unknown error returned %v\", msg)\n\t}\n\n\tfmt.Print(\"Calling reset\\n\")\n\n\treturn nil\n}", "func (timer *WallclockTimer) Reset() error {\n\ttimer.command <- \"reset\"\n\treturn nil\n}", "func (c *Client) Reset() error {\n\tif err := c.hello(); err != nil {\n\t\treturn err\n\t}\n\tif _, _, err := c.cmd(250, \"RSET\"); err != nil {\n\t\treturn err\n\t}\n\tc.rcpts = nil\n\treturn nil\n}", "func (this *Monitor) Sys_ResetReq(Type byte) error {\n\treturn this.SendAsynchData(MT_SYS_RESET_REQ, []byte{Type})\n}", "func (tf *TestFixture) Reset(ctx context.Context) error {\n\treturn nil\n}", "func (self *SinglePad) Reset() {\n self.Object.Call(\"reset\")\n}", "func Reset() (int, error) {\n\tif !Enable { // not enable\n\t\treturn 0, nil\n\t}\n\n\t// on windows cmd.exe\n\tif isLikeInCmd {\n\t\treturn winReset()\n\t}\n\n\treturn fmt.Print(ResetSet)\n}", "func tearDown() {\n\t/*\n\t\tpubSubResetRequest, _ := http.NewRequest(\"POST\", \"http://localhost:8080/reset\", nil)\n\t\thttpClient := &http.Client{}\n\t\t_, err := httpClient.Do(pubSubResetRequest)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\ttime.Sleep(5 *time.Second)\n\n\t*/\n}", "func (mock *Serf) Reset() {\n\tlockSerfBootstrap.Lock()\n\tmock.calls.Bootstrap = nil\n\tlockSerfBootstrap.Unlock()\n\tlockSerfCluster.Lock()\n\tmock.calls.Cluster = nil\n\tlockSerfCluster.Unlock()\n\tlockSerfID.Lock()\n\tmock.calls.ID = nil\n\tlockSerfID.Unlock()\n\tlockSerfJoin.Lock()\n\tmock.calls.Join = nil\n\tlockSerfJoin.Unlock()\n\tlockSerfMember.Lock()\n\tmock.calls.Member = nil\n\tlockSerfMember.Unlock()\n\tlockSerfShutdown.Lock()\n\tmock.calls.Shutdown = nil\n\tlockSerfShutdown.Unlock()\n}", "func (m *MockServerStreamConnection) Reset(reason types.StreamResetReason) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Reset\", reason)\n}", "func (m *UsersMgmtServiceServerMock) Reset() {\n\tm.GetUsersFunc = nil\n\tm.GetUserFunc = nil\n\tm.CreateUserFunc = nil\n\tm.DeleteUserFunc = nil\n\tm.UpdateUserFunc = nil\n\tm.UpdateSelfFunc = nil\n}", "func (e Eventer) Reset() {\n\te.es.Reset()\n}", "func (_m *MockOStream) Reset(buffer checked.Bytes) {\n\t_m.ctrl.Call(_m, \"Reset\", buffer)\n}", "func (r *Radio) Reset() {\n\t_, err := gpio.Output(resetPin, true, true)\n\tif err != nil {\n\t\tlog.Printf(\"Reset: gpio.Output: %s\", err)\n\t}\n\ttime.Sleep(100 * time.Microsecond)\n\t_, r.err = gpio.Input(resetPin, true)\n\ttime.Sleep(5 * time.Millisecond)\n}", "func (cpu *Mos6502) Reset() {\n\tcpu.addressAbsolute = 0xfffc\n\tlowByte := cpu.read(cpu.addressAbsolute)\n\thighByte := cpu.read(cpu.addressAbsolute + 1)\n\n\tcpu.pc = (word(highByte) << 8) | word(lowByte)\n\n\tcpu.a = 0x00\n\tcpu.x = 0x00\n\tcpu.y = 0x00\n\tcpu.stkp = 0xfd\n\tcpu.status = 0x00 | byte(U)\n\n\tcpu.addressRelative = 0x0000\n\tcpu.addressAbsolute = 0x0000\n\tcpu.fetchedData = 0x00\n\n\tcpu.cycles = 8\n}", "func (e *Zero) Reset() {}", "func (e *Implementation) Reset() error {\n\tscope.Debug(\"Resetting environment\")\n\n\tif err := e.deleteTestNamespace(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := e.allocateTestNamespace(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (device *ServoBrick) Reset() (err error) {\n\tvar buf bytes.Buffer\n\n\tresultBytes, err := device.device.Set(uint8(FunctionReset), buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 8 {\n\t\t\treturn fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 8)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tbytes.NewBuffer(resultBytes[8:])\n\n\t}\n\n\treturn nil\n}", "func Reset() {\n\treset()\n}", "func (r *FakeRedis) Reset() {\n\tr.GetCalledWith = \"\"\n\tr.SetCalledWith = \"\"\n\tr.DeleteCalledWith = \"\"\n\tr.DeleteReturns = 0\n\tr.ExpectError = false\n\tr.GetReturns = nil\n\tr.SetObject = nil\n}", "func (m *TokensMgmtServerMock) Reset() {\n\tm.GetTokensFunc = nil\n\tm.CreateTokenFunc = nil\n\tm.UpdateTokenFunc = nil\n\tm.GetTokenFunc = nil\n\tm.DeleteTokenFunc = nil\n}", "func (bt *BackTest) Reset() {\n\tbt.EventQueue.Reset()\n\tbt.Datas.Reset()\n\tbt.Portfolio.Reset()\n\tbt.Statistic.Reset()\n\tbt.Exchange.Reset()\n\tbt.Funding.Reset()\n\tbt.exchangeManager = nil\n\tbt.orderManager = nil\n\tbt.databaseManager = nil\n}", "func (m *MockStreamConnection) Reset(reason types.StreamResetReason) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Reset\", reason)\n}", "func (omx OmxPlayer) Reset() (error) {\n\tomx.player = nil\n\tomx.omxIn = nil\n\tomx.omxKill()\n\treturn nil\n}", "func (p *Puck) Reset(name ...string) error {\n\tcmd := []byte(\"reset();\\n\")\n\terr := p.command(name, cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (device *Device) Reset() error {\n\tif err := device.Device.Reset(); err != nil {\n\t\treturn err\n\t}\n\tdevice.fireEvent(event.EventKeystoreGone)\n\tdevice.init()\n\treturn nil\n}", "func (device *SilentStepperBrick) Reset() (err error) {\n\tvar buf bytes.Buffer\n\n\tresultBytes, err := device.device.Set(uint8(FunctionReset), buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 8 {\n\t\t\treturn fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 8)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tbytes.NewBuffer(resultBytes[8:])\n\n\t}\n\n\treturn nil\n}", "func (s *FrozenSuite) TestResetSame() {\n\thits := []int{}\n\n\tt := AfterFunc(100, func() { hits = append(hits, 1) })\n\tAfterFunc(100, func() { hits = append(hits, 2) })\n\tAfterFunc(100, func() { hits = append(hits, 3) })\n\tAfterFunc(101, func() { hits = append(hits, 4) })\n\tAdvance(9)\n\n\t// When\n\tactive := t.Reset(91)\n\n\t// Then\n\ts.Require().Equal(true, active)\n\n\tAdvance(90)\n\ts.Require().Equal([]int{}, hits)\n\tAdvance(1)\n\ts.Require().Equal([]int{2, 3, 1}, hits)\n}", "func (s *FrozenSuite) TestResetSame(c *C) {\n\thits := []int{}\n\n\tt := AfterFunc(100, func() { hits = append(hits, 1) })\n\tAfterFunc(100, func() { hits = append(hits, 2) })\n\tAfterFunc(100, func() { hits = append(hits, 3) })\n\tAfterFunc(101, func() { hits = append(hits, 4) })\n\tAdvance(9)\n\n\t// When\n\tactive := t.Reset(91)\n\n\t// Then\n\tc.Assert(active, Equals, true)\n\n\tAdvance(90)\n\tc.Assert(hits, DeepEquals, []int{})\n\tAdvance(1)\n\tc.Assert(hits, DeepEquals, []int{2, 3, 1})\n}", "func (m *MockClientStreamConnection) Reset(reason types.StreamResetReason) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Reset\", reason)\n}", "func (tto *TtoT) Reset() {\n\ttto.PutChar(dg.ASCIIFF)\n\tlog.Println(\"INFO: TTO Reset\")\n}", "func (c *Calculator) Reset() {\n\tlog.Printf(\"Reset.\\n\")\n\tvalue = 0\n\tc.returnResult()\n}", "func (e *AutoResetEvent) Reset() {\n\te.l.Lock()\n\tselect {\n\tcase <-e.c:\n\tdefault:\n\t}\n\te.l.Unlock()\n}", "func TestReset(t *testing.T) {\n\tqd, err := New(QFILE)\n\tif nil != err {\n\t\tt.Error(err)\n\t}\n\tt.Logf(\"%s: %d unused, and %d used quotes\", QFILE, qd.Unused(), qd.Used())\n\tqd.ResetAndSave()\n\tt.Logf(\"%s: %d unused, and %d used quotes\", QFILE, qd.Unused(), qd.Used())\n}", "func (self *PhysicsP2) Reset() {\n self.Object.Call(\"reset\")\n}", "func (sw *Stopwatch) Reset() {\n\tsw.t = time.Now()\n}", "func (app *Application) reset() {\n\tapp.sandbox.Flush()\n}", "func (a *Client) DoReset(params *DoResetParams) (*DoResetAccepted, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewDoResetParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"doReset\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/Systems/{identifier}/Actions/ComputerSystem.Reset\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &DoResetReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*DoResetAccepted), nil\n\n}", "func (m *MockImpl) Reset() {\n\tm.recording = make([]Transaction, 0)\n\tm.simulateGetError = nil\n\tm.simulateAddError = nil\n\tm.simulateUpdateError = nil\n}", "func (r *Robot) Reset() {\r\n\trandom := rand.New(rand.NewSource(rand.Int63()))\r\n\trNum := func() byte {\r\n\t\treturn byte(random.Intn(10) + 48)\r\n\t}\r\n\trChar := func() byte {\r\n\t\treturn byte(random.Intn(26) + 65)\r\n\t}\r\n\tbytes := []byte{rChar(), rChar(), rNum(), rNum(), rNum()}\r\n\tr.name = string(bytes)\r\n}", "func (ghost *Ghost) Reset() {\n\tghost.ClearRequests()\n\tghost.reset = false\n\tghost.loadProperties()\n\tghost.RegisterStatus = statusDisconnected\n}", "func SubtestStreamReset(t *testing.T, tr mux.Multiplexer) {\n\ttmux.SubtestStreamReset(t, tr)\n}", "func (p *httpMockProvider) reset() {\n\tp.mockserver.CleanupMockServer(p.config.Port)\n\tp.config.Port = 0\n\terr := p.configure()\n\tif err != nil {\n\t\tlog.Println(\"[ERROR] failed to configure the mock server\")\n\t}\n}", "func TestDvLIRClient_ResetAll(t *testing.T) {\n\tip := viper.GetString(\"IPAddress\")\n\tpw := viper.GetString(\"Password\")\n\n\tdvlirClient, err := NewDvLIRClient(ip, pw)\n\tif !assert.NoError(t, err, \"Error while creating Api client\") {\n\t\treturn\n\t}\n\n\terr = dvlirClient.Login()\n\tif !assert.NoError(t, err, \"Error during Login\") {\n\t\treturn\n\t}\n\tcode, err := dvlirClient.GetSystemInformation()\n\tif !assert.NoError(t, err, \"Error during GetSystemInformation request\") {\n\t\treturn\n\t}\n\tif !assert.IsType(t, SystemInfo{}, code, \"Return value isn't of type MomentaryValues\") {\n\t\treturn\n\t}\n\tif !assert.NotEmpty(t, code, \"System information is empty\") {\n\t\treturn\n\t}\n\n\tres, err := dvlirClient.ResetAll(code.ResetCode)\n\tif !assert.NoError(t, err, \"Error during ResetAll request\") {\n\t\treturn\n\t}\n\n\tfmt.Println(res)\n\tdefer func() {\n\t\terr = dvlirClient.Logout()\n\t\tif !assert.NoError(t, err, \"Error during Logout\") {\n\t\t\treturn\n\t\t}\n\t}()\n}", "func (me *BMP) Reset() error {\n\n\t_, err := me.device.Write([]byte{uint8(regReset), resetCode})\n\treturn err\n}", "func (tm *Motor) Reset() error {\n\treturn writeAttrString(tm.Descriptor.Path, \"command\", commandReset)\n}", "func ResetEvent(hEvent HANDLE) bool {\n\tret1 := syscall3(resetEvent, 1,\n\t\tuintptr(hEvent),\n\t\t0,\n\t\t0)\n\treturn ret1 != 0\n}", "func (t *Ticker) Reset() {\n\tt.resetCh <- struct{}{}\n}", "func (_m *MockEncoder) Reset(t time.Time, capacity int) {\n\t_m.ctrl.Call(_m, \"Reset\", t, capacity)\n}", "func (m *MockImpl) Reset() {\n\tm.recording = make([]Transaction, 0)\n\tm.simulateGetError = nil\n\tm.simulateAddError = nil\n}", "func (r *ResetReady) Reset() {\n\tr.lock.Lock()\n\tr.ready.Signal()\n\tr.ready.reset()\n\tr.lock.Unlock()\n}", "func (t *Timer) Reset() {\n\tt.currentTime = t.getCurrentTimeMs()\n\tt.lastTime = t.currentTime\n\tt.tick = 0\n}", "func (m *EventHandler) Reset() {\n\tm.Events = []goevent.Event{}\n\tm.Time = time.Time{}\n}", "func Reset() {\n\tglobalRegistry.Reset()\n}", "func (c Clients) Reset(ctx context.Context) error {\n\treq := newRequest(\"*1\\r\\n$5\\r\\nRESET\\r\\n$\")\n\treturn c.c.cmdSimple(ctx, req)\n}", "func (c *CumulativeClock) Reset() {\n\tc.Set(c.start)\n}", "func (g *Game) Reset() {\n\tg.prepareGame()\n\n\tg.GreedsReleased = false\n\tg.TimeElapsed = 0\n\tg.KillsCount = 0\n\tg.Over = false\n}", "func (znp *Znp) SysResetReq(resetType byte) error {\n\treq := &SysResetReq{resetType}\n\treturn znp.ProcessRequest(unp.C_AREQ, unp.S_SYS, 0x00, req, nil)\n}", "func (wd *Watchdog) reset(timeoutNanoSecs int64) {\n\twd.resets <- timeoutNanoSecs + time.Now().UnixNano()\n}", "func (_m *MockIStream) Reset(r io.Reader) {\n\t_m.ctrl.Call(_m, \"Reset\", r)\n}", "func (m *HeavySyncMock) Reset(p context.Context, p1 insolar.ID, p2 insolar.PulseNumber) (r error) {\n\tcounter := atomic.AddUint64(&m.ResetPreCounter, 1)\n\tdefer atomic.AddUint64(&m.ResetCounter, 1)\n\n\tif len(m.ResetMock.expectationSeries) > 0 {\n\t\tif counter > uint64(len(m.ResetMock.expectationSeries)) {\n\t\t\tm.t.Fatalf(\"Unexpected call to HeavySyncMock.Reset. %v %v %v\", p, p1, p2)\n\t\t\treturn\n\t\t}\n\n\t\tinput := m.ResetMock.expectationSeries[counter-1].input\n\t\ttestify_assert.Equal(m.t, *input, HeavySyncMockResetInput{p, p1, p2}, \"HeavySync.Reset got unexpected parameters\")\n\n\t\tresult := m.ResetMock.expectationSeries[counter-1].result\n\t\tif result == nil {\n\t\t\tm.t.Fatal(\"No results are set for the HeavySyncMock.Reset\")\n\t\t\treturn\n\t\t}\n\n\t\tr = result.r\n\n\t\treturn\n\t}\n\n\tif m.ResetMock.mainExpectation != nil {\n\n\t\tinput := m.ResetMock.mainExpectation.input\n\t\tif input != nil {\n\t\t\ttestify_assert.Equal(m.t, *input, HeavySyncMockResetInput{p, p1, p2}, \"HeavySync.Reset got unexpected parameters\")\n\t\t}\n\n\t\tresult := m.ResetMock.mainExpectation.result\n\t\tif result == nil {\n\t\t\tm.t.Fatal(\"No results are set for the HeavySyncMock.Reset\")\n\t\t}\n\n\t\tr = result.r\n\n\t\treturn\n\t}\n\n\tif m.ResetFunc == nil {\n\t\tm.t.Fatalf(\"Unexpected call to HeavySyncMock.Reset. %v %v %v\", p, p1, p2)\n\t\treturn\n\t}\n\n\treturn m.ResetFunc(p, p1, p2)\n}", "func (f *fixture) Reset(ctx context.Context) error {\n\tif f.startChrome && f.cr != nil {\n\t\tif err := UnmountAllSmbMounts(ctx, f.cr); err != nil {\n\t\t\ttesting.ContextLog(ctx, \"Failed to unmount all SMB mounts: \", err)\n\t\t}\n\t}\n\treturn removeAllContents(ctx, f.guestDir)\n}", "func (device *DCV2Bricklet) Reset() (err error) {\n\tvar buf bytes.Buffer\n\n\tresultBytes, err := device.device.Set(uint8(FunctionReset), buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 8 {\n\t\t\treturn fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 8)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tbytes.NewBuffer(resultBytes[8:])\n\n\t}\n\n\treturn nil\n}", "func Reset() {\n\tstopMux.Lock()\n\tstoppedAt = nil\n\tstoppedFor = 0\n\tstopMux.Unlock()\n}", "func Reset() {\n\tfmt.Print(CSI + ResetSeq + \"m\")\n}", "func (e *Event) Reset() {\n\targs, data := e.Args[0:0], e.Data[0:0]\n\t*e = Event{Args: args, Data: data}\n}", "func (d *Device) Reset(wait bool) error {\n\treturn d.resetUntilReady(\n\t\tfunc() error {\n\t\t\t// perform hardware reset using RST pin (active high)\n\t\t\td.config.ResetPin.High()\n\t\t\ttime.Sleep(hardResetDuration)\n\t\t\td.config.ResetPin.Low()\n\t\t\treturn nil\n\t\t},\n\t\twait)\n}", "func (m *MockStreamEventListener) OnResetStream(reason types.StreamResetReason) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnResetStream\", reason)\n}", "func (m *EventRSVPMutation) ResetEvent() {\n\tm.event = nil\n\tm.clearedevent = false\n}", "func (f *Fuse) reset() {\n\tf.failCounter = 0\n\tf.retries = 0\n}", "func (hb *heartbeat) reset() {\n\tselect {\n\tcase hb.resetChan <- struct{}{}:\n\tdefault:\n\t}\n}", "func AWSReset() {\n\tSetClusterName()\n\tsshUser, osLabel := distSelect()\n\tinstaller.RunPlaybook(\"./inventory/\"+common.Name+\"/installer/\", \"reset.yml\", sshUser, osLabel)\n\t// waiting for Infrastructure\n\ttime.Sleep(30)\n\tAWSInstall()\n\treturn\n}", "func (cpu *CPU) Reset() {\n\tcpu.setFlags(0x34)\n\tcpu.A = 0\n\tcpu.X = 0\n\tcpu.Y = 0\n\tcpu.S = 0xFD\n\tcpu.write(0x4017, 0)\n\tcpu.write(0x4015, 0)\n\tfor i := 0x4000; i <= 0x400F; i++ {\n\t\tcpu.write(uint16(i), 0)\n\t}\n}", "func (e *Event) Reset() {\n\te.bubble = true\n\te.handled = false\n}", "func (l *Listener) Reset() {\n\tl.ready = false\n\tl.readyChan = make(chan struct{})\n}", "func (m *TeamsServerMock) Reset() {\n\tm.CreateTeamFunc = nil\n\tm.ListTeamsFunc = nil\n\tm.GetTeamFunc = nil\n\tm.UpdateTeamFunc = nil\n\tm.DeleteTeamFunc = nil\n\tm.GetTeamMembershipFunc = nil\n\tm.AddTeamMembersFunc = nil\n\tm.RemoveTeamMembersFunc = nil\n\tm.GetTeamsForMemberFunc = nil\n}", "func (r *Robot) Reset() {\n\tr.name = \"\"\n}", "func (r *Robot) Reset() {\n\tr.name = \"\"\n}", "func (r *Robot) Reset() {\n\tr.name = \"\"\n}", "func (r *Robot) Reset() {\n\tr.name = \"\"\n}", "func (znp *Znp) SapiZbSystemReset() error {\n\treturn znp.ProcessRequest(unp.C_AREQ, unp.S_SAPI, 0x09, nil, nil)\n}", "func checkReset(re *require.Assertions, ma MovingAvg, emptyValue float64) {\n\taddRandData(ma, 100, 1000)\n\tma.Reset()\n\tre.Equal(emptyValue, ma.Get())\n}", "func (m *MockUnaggregatedEncoder) Reset(arg0 []byte) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Reset\", arg0)\n}", "func (robot *Robot) Reset() {\n\trobot.name = \"\"\n}", "func (h *stopHook) Reset() {\n\tatomic.StoreUint32(&h.stop, 0)\n}" ]
[ "0.7429841", "0.67033285", "0.66388756", "0.65228575", "0.64425254", "0.6361065", "0.6352461", "0.63204896", "0.6292608", "0.6255345", "0.6237539", "0.6236271", "0.61987543", "0.61972404", "0.6187439", "0.61694217", "0.6162422", "0.6148909", "0.61209315", "0.61076725", "0.60892355", "0.6080355", "0.60549754", "0.60493207", "0.6044276", "0.60439724", "0.6022917", "0.60214025", "0.6008123", "0.6005358", "0.5988132", "0.5985959", "0.5980799", "0.59805137", "0.59611964", "0.59585965", "0.59345835", "0.59317493", "0.59203357", "0.5911667", "0.59090745", "0.5867636", "0.5859265", "0.58571494", "0.5855456", "0.5847305", "0.5830109", "0.5821979", "0.58032554", "0.5797942", "0.5794228", "0.57909924", "0.57871324", "0.5775883", "0.5770929", "0.5770569", "0.57701206", "0.576501", "0.5764534", "0.5753274", "0.5751458", "0.57463205", "0.574626", "0.57451624", "0.57424074", "0.57421106", "0.5726263", "0.572384", "0.5701324", "0.56972694", "0.56962943", "0.56917095", "0.56849396", "0.5680125", "0.56791437", "0.5674615", "0.56657976", "0.56657034", "0.5664552", "0.5658163", "0.56572384", "0.564586", "0.5645744", "0.56410515", "0.5633313", "0.5629211", "0.5624035", "0.5601624", "0.5597647", "0.55972344", "0.55829185", "0.55665684", "0.55665684", "0.55665684", "0.55665684", "0.5557709", "0.5555652", "0.5550301", "0.55490506", "0.55307645" ]
0.7183463
1
Test that TokenReset sends a system.tokenReset event.
func TestServiceTokenReset(t *testing.T) { tbl := []struct { Subject string TIDs []string Expected interface{} }{ {"auth", nil, nil}, {"auth", []string{}, nil}, {"auth", []string{"foo"}, json.RawMessage(`{"tids":["foo"],"subject":"auth"}`)}, {"auth", []string{"foo", "bar"}, json.RawMessage(`{"tids":["foo","bar"],"subject":"auth"}`)}, {"auth.test.method", []string{"foo", "bar"}, json.RawMessage(`{"tids":["foo","bar"],"subject":"auth.test.method"}`)}, } for _, l := range tbl { runTest(t, func(s *res.Service) { s.Handle("model", res.GetResource(func(r res.GetRequest) { r.NotFound() })) }, func(s *restest.Session) { s.Service().TokenReset(l.Subject, l.TIDs...) // Send token event to flush any system.tokenReset event s.Service().TokenEvent(mock.CID, nil) if l.Expected != nil { s.GetMsg(). AssertSubject("system.tokenReset"). AssertPayload(l.Expected) } s.GetMsg().AssertTokenEvent(mock.CID, nil) }) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestServiceReset(t *testing.T) {\n\ttbl := []struct {\n\t\tResources []string\n\t\tAccess []string\n\t\tExpected interface{}\n\t}{\n\t\t{nil, nil, nil},\n\t\t{[]string{}, nil, nil},\n\t\t{nil, []string{}, nil},\n\t\t{[]string{}, []string{}, nil},\n\n\t\t{[]string{\"test.foo.>\"}, nil, json.RawMessage(`{\"resources\":[\"test.foo.>\"]}`)},\n\t\t{nil, []string{\"test.foo.>\"}, json.RawMessage(`{\"access\":[\"test.foo.>\"]}`)},\n\t\t{[]string{\"test.foo.>\"}, []string{\"test.bar.>\"}, json.RawMessage(`{\"resources\":[\"test.foo.>\"],\"access\":[\"test.bar.>\"]}`)},\n\n\t\t{[]string{\"test.foo.>\"}, []string{}, json.RawMessage(`{\"resources\":[\"test.foo.>\"]}`)},\n\t\t{[]string{}, []string{\"test.foo.>\"}, json.RawMessage(`{\"access\":[\"test.foo.>\"]}`)},\n\t}\n\n\tfor _, l := range tbl {\n\t\trunTest(t, func(s *res.Service) {\n\t\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t\t}, func(s *restest.Session) {\n\t\t\ts.Service().Reset(l.Resources, l.Access)\n\t\t\t// Send token event to flush any system.reset event\n\t\t\ts.Service().TokenEvent(mock.CID, nil)\n\n\t\t\tif l.Expected != nil {\n\t\t\t\ts.GetMsg().\n\t\t\t\t\tAssertSubject(\"system.reset\").\n\t\t\t\t\tAssertPayload(l.Expected)\n\t\t\t}\n\n\t\t\ts.GetMsg().AssertTokenEvent(mock.CID, nil)\n\t\t})\n\t}\n}", "func TestAuthRequestTokenEvent(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.TokenEvent(mock.Token)\n\t\t\tr.OK(nil)\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\treq := s.Auth(\"test.model\", \"method\", nil)\n\t\ts.GetMsg().\n\t\t\tAssertTokenEvent(mock.CID, mock.Token)\n\t\treq.Response().\n\t\t\tAssertResult(nil)\n\t})\n}", "func TestReset(t *testing.T) {\n\ttestCancel(t, false)\n}", "func TestServiceTokenEvent_WithNilToken_SendsNilToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEvent(mock.CID, nil)\n\t\ts.GetMsg().AssertTokenEvent(mock.CID, nil)\n\t})\n}", "func (m *MockSessionRunner) RetireResetToken(arg0 [16]byte) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"RetireResetToken\", arg0)\n}", "func MockOnResetSystem(ctx context.Context, mockAPI *redfishMocks.RedfishAPI,\n\tsystemID string, requestBody *redfishClient.ResetRequestBody, redfishErr redfishClient.RedfishError,\n\thttpResponse *http.Response, err error) {\n\trequest := redfishClient.ApiResetSystemRequest{}.ResetRequestBody(*requestBody)\n\tmockAPI.On(\"ResetSystem\", ctx, systemID).Return(request).Times(1)\n\tmockAPI.On(\"ResetSystemExecute\", mock.Anything).Return(redfishErr, httpResponse, err).Times(1)\n}", "func TestAuthRequestNilTokenEvent(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.TokenEvent(nil)\n\t\t\tr.OK(nil)\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\treq := s.Auth(\"test.model\", \"method\", nil)\n\t\ts.GetMsg().\n\t\t\tAssertTokenEvent(mock.CID, nil)\n\t\treq.Response().\n\t\t\tAssertResult(nil)\n\t})\n}", "func TestServiceTokenEvent_WithObjectToken_SendsToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEvent(mock.CID, mock.Token)\n\t\ts.GetMsg().AssertTokenEvent(mock.CID, mock.Token)\n\t})\n}", "func (m *TokensMgmtServerMock) Reset() {\n\tm.GetTokensFunc = nil\n\tm.CreateTokenFunc = nil\n\tm.UpdateTokenFunc = nil\n\tm.GetTokenFunc = nil\n\tm.DeleteTokenFunc = nil\n}", "func TestVerifyToken(t *testing.T) {\n t.Errorf(\"No tests written yet for VerifyToken()\")\n}", "func TestServiceTokenEventWithID_WithNilToken_SendsNilToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEventWithID(mock.CID, \"foo\", nil)\n\t\ts.GetMsg().AssertTokenEventWithID(mock.CID, \"foo\", nil)\n\t})\n}", "func (m *MockSessionRunner) RemoveResetToken(arg0 [16]byte) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"RemoveResetToken\", arg0)\n}", "func TestServiceTokenEventWithID_WithObjectToken_SendsToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, func(s *restest.Session) {\n\t\ts.Service().TokenEventWithID(mock.CID, \"foo\", mock.Token)\n\t\ts.GetMsg().AssertTokenEventWithID(mock.CID, \"foo\", mock.Token)\n\t})\n}", "func TestUserTokenRefreshSuccess(t *testing.T) {\n\tdb := setupDB()\n\tdefer db.Close()\n\trouter := setupRouter()\n\n\tw := httptest.NewRecorder()\n\n\treq, _ := http.NewRequest(\"GET\", \"/token/refresh\", nil)\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", \"Bearer \" + Token)\n\trouter.ServeHTTP(w, req)\n\n\tassert.Equal(t, http.StatusOK, w.Code)\n\tvar token auth.Token\n\tjson.Unmarshal([]byte(w.Body.String()), &token)\n\tassert.NotEmpty(t, token.Expire)\t// TODO: equal to or later than `now`\n\tassert.NotEmpty(t, token.Token)\t\t// TODO: validate it's a correct JWT token\n\tToken = token.Token\n}", "func TestRefreshToken(t *testing.T) {\n\tdb.InitDB()\n\tvar router *gin.Engine = routes.SetupRouter()\n\n\tvar user models.UserCreate = utils.CreateUser(\"Tom\", \"qwerty1234\", t, router)\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\n\tvar url string = \"/v1/refresh/token\"\n\tvar bearer = \"Bearer \" + user.Token\n\n\trecord := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"POST\", url, nil)\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", bearer)\n\n\trouter.ServeHTTP(record, request)\n\n\tvar refresh models.UserConnect\n\terr := json.Unmarshal([]byte(record.Body.String()), &refresh)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad output: \", err.Error())\n\t\tt.Fail()\n\t}\n\n\tassert.Equal(t, record.Code, 200)\n\n\tutils.CleanUser(user.ID, user.Token, t, router)\n\tdb.CloseDB()\n}", "func (r *RedisDL) resetToken() {\n\tr.currentToken = \"\"\n}", "func (m *MockSessionRunner) AddResetToken(arg0 [16]byte, arg1 packetHandler) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"AddResetToken\", arg0, arg1)\n}", "func (s *Service) TestToken(ctx context.Context, info *pushmdl.PushInfo, token string) (err error) {\n\tparams := url.Values{}\n\tparams.Add(\"app_id\", strconv.FormatInt(info.APPID, 10))\n\tparams.Add(\"alert_title\", info.Title)\n\tparams.Add(\"alert_body\", info.Summary)\n\tparams.Add(\"token\", token)\n\tparams.Add(\"link_type\", strconv.FormatInt(int64(info.LinkType), 10))\n\tparams.Add(\"link_value\", info.LinkValue)\n\tparams.Add(\"sound\", strconv.Itoa(info.Sound))\n\tparams.Add(\"vibration\", strconv.Itoa(info.Vibration))\n\tparams.Add(\"expire_time\", strconv.FormatInt(int64(info.ExpireTime), 10))\n\tparams.Add(\"image_url\", info.ImageURL)\n\tif err = s.httpClient.Post(ctx, _testTokenURL, \"\", params, nil); err != nil {\n\t\tlog.Error(\"s.TestToken(%+v) error(%v)\", info, err)\n\t}\n\treturn\n}", "func TestTokenRefreshLimit(t *testing.T) {\n\tdb.InitDB()\n\tvar router *gin.Engine = routes.SetupRouter()\n\n\tos.Setenv(\"TOKEN_LIMIT_HOURS\", \"0\")\n\n\tvar user models.UserCreate = utils.CreateUser(\"Tom\", \"qwerty1234\", t, router)\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\ttime.Sleep(1 * time.Second)\n\n\tvar url string = \"/v1/refresh/token\"\n\tvar bearer = \"Bearer \" + user.Token\n\trecord := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"POST\", url, nil)\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", bearer)\n\n\trouter.ServeHTTP(record, request)\n\n\tvar message Message\n\terr := json.Unmarshal([]byte(record.Body.String()), &message)\n\tif err != nil {\n\t\tlog.Fatal(\"Bad output: \", err.Error())\n\t\tt.Fail()\n\t}\n\n\tassert.Equal(t, record.Code, 401)\n\tassert.Equal(t, message.Message, \"Token has expired and cannot be refreshed, please reconnect\")\n\n\tos.Setenv(\"TOKEN_LIMIT_HOURS\", \"24\")\n\n\tuser.Token = utils.ConnectUser(\"Tom\", \"qwerty1234\", t, router)\n\n\tutils.CleanUser(user.ID, user.Token, t, router)\n\tdb.CloseDB()\n}", "func (s *Server) OnReset() error { return nil }", "func tearDown() {\n\t/*\n\t\tpubSubResetRequest, _ := http.NewRequest(\"POST\", \"http://localhost:8080/reset\", nil)\n\t\thttpClient := &http.Client{}\n\t\t_, err := httpClient.Do(pubSubResetRequest)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\ttime.Sleep(5 *time.Second)\n\n\t*/\n}", "func SimulateMintToken(k keeper.Keeper, ak types.AccountKeeper, bk types.BankKeeper) simtypes.Operation {\n\treturn func(\n\t\tr *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,\n\t\taccs []simtypes.Account, chainID string,\n\t) (simtypes.OperationMsg, []simtypes.FutureOperation, error) {\n\n\t\ttoken, maxFee := selectToken(ctx, k, ak, bk, true)\n\t\tsimToAccount, _ := simtypes.RandomAcc(r, accs)\n\n\t\tmsg := types.NewMsgMintToken(token.GetSymbol(), token.GetOwnerString(), simToAccount.Address.String(), 100)\n\n\t\townerAccount, found := simtypes.FindAccount(accs, token.GetOwner())\n\t\tif !found {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), fmt.Sprintf(\"account[%s] does not found\", token.GetOwnerString())), \n\t\t\t\tnil, fmt.Errorf(\"account[%s] does not found\", token.GetOwnerString())\n\t\t}\n\n\t\towner, _ := sdk.AccAddressFromBech32(msg.Owner)\n\t\taccount := ak.GetAccount(ctx, owner)\n\t\tfees, err := simtypes.RandomFees(r, ctx, maxFee)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate fees\"), nil, err\n\t\t}\n\n\t\ttxGen := simappparams.MakeTestEncodingConfig().TxConfig\n\t\ttx, err := helpers.GenTx(\n\t\t\ttxGen,\n\t\t\t[]sdk.Msg{msg},\n\t\t\tfees,\n\t\t\thelpers.DefaultGenTxGas,\n\t\t\tchainID,\n\t\t\t[]uint64{account.GetAccountNumber()},\n\t\t\t[]uint64{account.GetSequence()},\n\t\t\townerAccount.PrivKey,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate mock tx\"), nil, err\n\t\t}\n\n\t\tif _, _, err = app.Deliver(txGen.TxEncoder(), tx); err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to deliver tx\"), nil, err\n\t\t}\n\n\t\treturn simtypes.NewOperationMsg(msg, true, \"simulate mint token\"), nil, nil\n\t}\n}", "func (m *MockStreamEventListener) OnResetStream(reason types.StreamResetReason) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"OnResetStream\", reason)\n}", "func TestEmptyToken(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmockedTpmProvider := new(tpmprovider.MockedTpmProvider)\n\tmockedTpmProvider.On(\"Close\").Return(nil)\n\tmockedTpmProvider.On(\"NvIndexExists\", mock.Anything).Return(true, nil)\n\tmockedTpmProvider.On(\"NvRelease\", mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvDefine\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvWrite\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\n\tmockedTpmFactory := tpmprovider.MockedTpmFactory{TpmProvider: mockedTpmProvider}\n\n\ttrustAgentService, err := CreateTrustAgentService(CreateTestConfig(), mockedTpmFactory)\n\tassert.NoError(err)\n\n\t// setup TA service to use JWT-based authentication\n\ttrustAgentService.router = mux.NewRouter()\n\ttrustAgentService.router.Use(middleware.NewTokenAuth(\"../test/mockJWTDir\", \"../test/mockCACertsDir\", mockRetrieveJWTSigningCerts, cacheTime))\n\ttrustAgentService.router.HandleFunc(\"/v2/tag\", errorHandler(requiresPermission(setAssetTag(CreateTestConfig(), mockedTpmFactory), []string{postDeployTagPerm}))).Methods(\"POST\")\n\n\tjsonString := `{\"tag\" : \"tHgfRQED1+pYgEZpq3dZC9ONmBCZKdx10LErTZs1k/k=\", \"hardware_uuid\" : \"7a569dad-2d82-49e4-9156-069b0065b262\"}`\n\n\trequest, err := http.NewRequest(\"POST\", \"/v2/tag\", bytes.NewBuffer([]byte(jsonString)))\n\tassert.NoError(err)\n\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", \"Bearer \"+EmptyJWTToken)\n\n\trecorder := httptest.NewRecorder()\n\ttrustAgentService.router.ServeHTTP(recorder, request)\n\tresponse := recorder.Result()\n\tfmt.Printf(\"StatusCode: %d\\n\", response.StatusCode)\n\tassert.Equal(http.StatusUnauthorized, response.StatusCode)\n}", "func TestTokenRevoke(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tdeleteAll(t)\n\tadminClient := getPachClient(t, admin)\n\n\t// Create repo (so alice has something to list)\n\trepo := tu.UniqueString(\"TestTokenRevoke\")\n\trequire.NoError(t, adminClient.CreateRepo(repo))\n\n\talice := tu.UniqueString(\"alice\")\n\tresp, err := adminClient.GetAuthToken(adminClient.Ctx(), &auth.GetAuthTokenRequest{\n\t\tSubject: alice,\n\t})\n\trequire.NoError(t, err)\n\taliceClient := adminClient.WithCtx(context.Background())\n\taliceClient.SetAuthToken(resp.Token)\n\n\t// alice's token is valid\n\trepos, err := aliceClient.ListRepo()\n\trequire.NoError(t, err)\n\trequire.ElementsEqualUnderFn(t, []string{repo}, repos, RepoInfoToName)\n\n\t// admin revokes token\n\t_, err = adminClient.RevokeAuthToken(adminClient.Ctx(), &auth.RevokeAuthTokenRequest{\n\t\tToken: resp.Token,\n\t})\n\trequire.NoError(t, err)\n\n\t// alice's token is no longer valid\n\trepos, err = aliceClient.ListRepo()\n\trequire.True(t, auth.IsErrBadToken(err), err.Error())\n\trequire.Equal(t, 0, len(repos))\n}", "func MockResetStatusResponse(t *testing.T) {\n\tth.Mux.HandleFunc(shareEndpoint+\"/\"+shareID+\"/action\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"POST\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", fake.TokenID)\n\t\tth.TestHeader(t, r, \"Content-Type\", \"application/json\")\n\t\tth.TestHeader(t, r, \"Accept\", \"application/json\")\n\t\tth.TestJSONRequest(t, r, resetStatusRequest)\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusAccepted)\n\t})\n}", "func Test_LogoutValidToken(t *testing.T) {\n\tdir, err := tempConfig(\"\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tmockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(`{\"status_code\": 10007, \"status_text\": \"Resource deleted\"}`))\n\t}))\n\tdefer mockServer.Close()\n\n\tlogoutArgs := logoutArguments{\n\t\tapiEndpoint: mockServer.URL,\n\t\ttoken: \"test-token\",\n\t}\n\n\terr = logout(logoutArgs)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}", "func CheckResetToken(user *structs.User, token string) error {\n\trow, err := dot.QueryRow(db, selectResetTokenQuery, user.Email, token)\n\n\t// Scan => take data\n\tif err := row.Scan(&user.Id, &user.Sub, &user.GivenName, &user.FamilyName, &user.Profile, &user.Picture, &user.Email, &user.EmailVerified, &user.Gender, &user.Address, &user.Phone); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn errors.NotFound(token, err.Error())\n\t\t}\n\n\t\treturn errors.InternalServerError(token, err.Error())\n\t}\n\n\treturn err\n}", "func TestTeamOpenResetAndRejoin(t *testing.T) {\n\ttt := newTeamTester(t)\n\tdefer tt.cleanup()\n\n\tann := tt.addUser(\"ann\")\n\tbob := tt.addUser(\"bob\")\n\ttt.logUserNames()\n\n\tteamID, teamName := ann.createTeam2()\n\tteam := teamName.String()\n\tann.addTeamMember(team, bob.username, keybase1.TeamRole_WRITER)\n\terr := teams.ChangeTeamSettingsByID(context.Background(), ann.tc.G, teamID, keybase1.TeamSettings{Open: true, JoinAs: keybase1.TeamRole_READER})\n\trequire.NoError(t, err)\n\n\tt.Logf(\"Open team name is %q\", team)\n\n\tbob.kickTeamRekeyd()\n\tbob.reset()\n\n\t// Wait for change membership link after bob resets\n\tann.pollForTeamSeqnoLink(team, keybase1.Seqno(4))\n\n\tbob.loginAfterResetPukless()\n\t_, err = bob.teamsClient.TeamRequestAccess(context.Background(), keybase1.TeamRequestAccessArg{Name: team})\n\trequire.NoError(t, err)\n\n\tbob.kickTeamRekeyd()\n\tbob.perUserKeyUpgrade()\n\n\t// Poll for change_membership after bob's TAR gets acted on.\n\tann.pollForTeamSeqnoLink(team, keybase1.Seqno(5))\n\n\tteamObj := ann.loadTeam(team, true /* admin */)\n\n\trequire.Len(t, teamObj.GetActiveAndObsoleteInvites(), 0)\n\n\tmembers, err := teamObj.Members()\n\trequire.NoError(t, err)\n\trequire.Len(t, members.AllUIDs(), 2)\n\trole, err := teamObj.MemberRole(context.Background(), bob.userVersion())\n\trequire.NoError(t, err)\n\trequire.Equal(t, keybase1.TeamRole_READER, role)\n}", "func TestDvLIRClient_ResetAll(t *testing.T) {\n\tip := viper.GetString(\"IPAddress\")\n\tpw := viper.GetString(\"Password\")\n\n\tdvlirClient, err := NewDvLIRClient(ip, pw)\n\tif !assert.NoError(t, err, \"Error while creating Api client\") {\n\t\treturn\n\t}\n\n\terr = dvlirClient.Login()\n\tif !assert.NoError(t, err, \"Error during Login\") {\n\t\treturn\n\t}\n\tcode, err := dvlirClient.GetSystemInformation()\n\tif !assert.NoError(t, err, \"Error during GetSystemInformation request\") {\n\t\treturn\n\t}\n\tif !assert.IsType(t, SystemInfo{}, code, \"Return value isn't of type MomentaryValues\") {\n\t\treturn\n\t}\n\tif !assert.NotEmpty(t, code, \"System information is empty\") {\n\t\treturn\n\t}\n\n\tres, err := dvlirClient.ResetAll(code.ResetCode)\n\tif !assert.NoError(t, err, \"Error during ResetAll request\") {\n\t\treturn\n\t}\n\n\tfmt.Println(res)\n\tdefer func() {\n\t\terr = dvlirClient.Logout()\n\t\tif !assert.NoError(t, err, \"Error during Logout\") {\n\t\t\treturn\n\t\t}\n\t}()\n}", "func TestsetTokenCookie(t *testing.T) {\n\thand := New(nil)\n\n\twriter := httptest.NewRecorder()\n\treq := dummyGet()\n\n\ttoken := []byte(\"dummy\")\n\thand.setTokenCookie(writer, req, token)\n\n\theader := writer.Header().Get(\"Set-Cookie\")\n\texpected_part := fmt.Sprintf(\"csrf_token=%s;\", token)\n\n\tif !strings.Contains(header, expected_part) {\n\t\tt.Errorf(\"Expected header to contain %v, it doesn't. The header is %v.\",\n\t\t\texpected_part, header)\n\t}\n\n\ttokenInContext := unmaskToken(b64decode(Token(req)))\n\tif !bytes.Equal(tokenInContext, token) {\n\t\tt.Errorf(\"RegenerateToken didn't set the token in the context map!\"+\n\t\t\t\" Expected %v, got %v\", token, tokenInContext)\n\t}\n}", "func TestAuthRawToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\trestest.AssertEqualJSON(t, \"RawToken\", r.RawToken(), mock.Token)\n\t\t\tr.NotFound()\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\treq := mock.AuthRequest()\n\t\treq.Token = mock.Token\n\t\ts.Auth(\"test.model\", \"method\", req).\n\t\t\tResponse().\n\t\t\tAssertError(res.ErrNotFound)\n\t})\n}", "func TestCorruptedTokenLogin(t *testing.T) {\n\t// @todo this test is disabled now because it was\n\t// built on internal assumptions that no longer hold and not so easy to access anymore\n\t// TrySuite(t, testCorruptedLogin, retryCount)\n}", "func TestGet_Token(t *testing.T) {\n t.Errorf(\"No tests written yet for Get_Token()\")\n}", "func TestOAuthVerifyState(t *testing.T) {\n\tservice := NewOAuth2Service(testClientID, testClientSecret, testScopes, testTokenURL, testAuthURL)\n\tservice.ExchangeAuthCodeForToken(testCode)\n}", "func init(){\n \n err := CreateSchema()\n if err != nil {\n fmt.Printf(\"token-mgr_test:init:%s\\n\", err)\n os.Exit(1)\n }\n MockTokenTuple = TokenTuple{1234, MockUserEmail, \"12345678901234567890\"}\n err = MockTokenTuple.UpdateToken()\n if err != nil {\n fmt.Printf(\"token-mgr_test:init:%s\\n\", err)\n os.Exit(2)\n }\n}", "func TestSendENIStateChangeUnmanaged(t *testing.T) {\n\tmockCtrl := gomock.NewController(t)\n\tdefer mockCtrl.Finish()\n\n\tmockStateManager := mock_dockerstate.NewMockTaskEngineState(mockCtrl)\n\teventChannel := make(chan statechange.Event)\n\tctx := context.TODO()\n\n\tgomock.InOrder(\n\t\tmockStateManager.EXPECT().ENIByMac(randomMAC).Return(nil, false),\n\t)\n\n\twatcher := setupWatcher(ctx, nil, mockStateManager, eventChannel, primaryMAC)\n\n\tassert.Error(t, watcher.sendENIStateChange(randomMAC))\n}", "func (s *FrozenSuite) TestResetSame(c *C) {\n\thits := []int{}\n\n\tt := AfterFunc(100, func() { hits = append(hits, 1) })\n\tAfterFunc(100, func() { hits = append(hits, 2) })\n\tAfterFunc(100, func() { hits = append(hits, 3) })\n\tAfterFunc(101, func() { hits = append(hits, 4) })\n\tAdvance(9)\n\n\t// When\n\tactive := t.Reset(91)\n\n\t// Then\n\tc.Assert(active, Equals, true)\n\n\tAdvance(90)\n\tc.Assert(hits, DeepEquals, []int{})\n\tAdvance(1)\n\tc.Assert(hits, DeepEquals, []int{2, 3, 1})\n}", "func TestTokenIsSet(t *testing.T) {\n\tconfiguration := ReadConfig()\n\ttoken := configuration.Token\n\n\tif token == \"\" {\n\t\tt.Error(\"Token misconfigured\")\n\t}\n\n\t// A dumb way to check if a dummy token has been used\n\tif len(token) < 16 {\n\t\tt.Error(\"Token misconfigured\")\n\t}\n\n\tt.Log(\"Token set\")\n}", "func TestAuthRawTokenWithNoToken(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\trestest.AssertEqualJSON(t, \"RawToken\", r.RawToken(), nil)\n\t\t\tr.NotFound()\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\ts.Auth(\"test.model\", \"method\", nil).\n\t\t\tResponse().\n\t\t\tAssertError(res.ErrNotFound)\n\t})\n}", "func RegisteringTokenTest(env *models.PhotonEnvReader, allowFail bool) {\n\t// 1. register a not-exist token\n\tcase1 := &APITestCase{\n\t\tCaseName: \"Register a not-exist token\",\n\t\tAllowFail: allowFail,\n\t\tReq: &models.Req{\n\t\t\tAPIName: \"RegisteringOneToken\",\n\t\t\tFullURL: env.RandomNode().Host + \"/api/1/tokens/0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF\",\n\t\t\tMethod: http.MethodPut,\n\t\t\tPayload: \"\",\n\t\t\tTimeout: time.Second * 120,\n\t\t},\n\t\tTargetStatusCode: 409,\n\t}\n\tcase1.Run()\n\t// 2. register a new token\n\tnewTokenAddress := deployNewToken()\n\tcase2 := &APITestCase{\n\t\tCaseName: \"Register a new token\",\n\t\tAllowFail: allowFail,\n\t\tReq: &models.Req{\n\t\t\tAPIName: \"RegisteringOneToken\",\n\t\t\tFullURL: env.RandomNode().Host + \"/api/1/tokens/\" + newTokenAddress,\n\t\t\tMethod: http.MethodPut,\n\t\t\tPayload: \"\",\n\t\t\tTimeout: time.Second * 180,\n\t\t},\n\t\tTargetStatusCode: 200,\n\t}\n\tcase2.Run()\n}", "func (v *VsctlMock) Reset() {\n\tv.ReceivedArgs = [][]string{}\n}", "func (s *FrozenSuite) TestResetSame() {\n\thits := []int{}\n\n\tt := AfterFunc(100, func() { hits = append(hits, 1) })\n\tAfterFunc(100, func() { hits = append(hits, 2) })\n\tAfterFunc(100, func() { hits = append(hits, 3) })\n\tAfterFunc(101, func() { hits = append(hits, 4) })\n\tAdvance(9)\n\n\t// When\n\tactive := t.Reset(91)\n\n\t// Then\n\ts.Require().Equal(true, active)\n\n\tAdvance(90)\n\ts.Require().Equal([]int{}, hits)\n\tAdvance(1)\n\ts.Require().Equal([]int{2, 3, 1}, hits)\n}", "func RunTestReset(t *testing.T, e1, e2 streams.StreamProvider) {\n\tserver := func(provider streams.StreamProvider) error {\n\t\tlistener := provider.Listen(nil)\n\t\tfor {\n\t\t\tstream, err := listener.Accept()\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif err := stream.Reset(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Fails due to spdystream bug\n\t\t\t// https://github.com/docker/spdystream/issues/45\n\t\t\tif _, err := stream.Write([]byte(\"some value\")); err == nil {\n\t\t\t\treturn fmt.Errorf(\"Expected error writing after reset\")\n\t\t\t}\n\t\t}\n\t}\n\tclient := func(provider streams.StreamProvider) error {\n\t\tstream, err := provider.NewStream(nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tb := make([]byte, 10)\n\t\tif n, err := stream.Read(b); err != nil && err != io.EOF {\n\t\t\treturn err\n\t\t} else if err == nil && n > 0 {\n\t\t\treturn fmt.Errorf(\"Expected read of %d bytes\", n)\n\t\t} else if err == nil {\n\t\t\treturn fmt.Errorf(\"Expected error reading from stream\")\n\t\t}\n\t\treturn nil\n\t}\n\trunTest(t, e1, e2, client, server)\n}", "func (m *EventRSVPMutation) ResetEvent() {\n\tm.event = nil\n\tm.clearedevent = false\n}", "func SubtestStreamReset(t *testing.T, tr mux.Multiplexer) {\n\ttmux.SubtestStreamReset(t, tr)\n}", "func (_m *MockOStream) Reset(buffer checked.Bytes) {\n\t_m.ctrl.Call(_m, \"Reset\", buffer)\n}", "func TestDvLIRClient_AllowResetWithPwd(t *testing.T) {\n\tip := viper.GetString(\"IPAddress\")\n\tpw := viper.GetString(\"Password\")\n\n\tdvlirClient, err := NewDvLIRClient(ip, pw)\n\tif !assert.NoError(t, err, \"Error while creating Api client\") {\n\t\treturn\n\t}\n\n\terr = dvlirClient.Login()\n\tif !assert.NoError(t, err, \"Error during Login\") {\n\t\treturn\n\t}\n\n\tres, err := dvlirClient.AllowResetWithPwd(\"Yes\")\n\tif !assert.NoError(t, err, \"Error while while changing saving interval\") {\n\t\treturn\n\t}\n\n\tfmt.Println(res)\n\n\tdefer func() {\n\t\terr = dvlirClient.Logout()\n\t\tif !assert.NoError(t, err, \"Error during Logout\") {\n\t\t\treturn\n\t\t}\n\t}()\n}", "func TestMsgAppRespWaitReset(t *testing.T) {\n\tsm := newTestRaft(1, []uint64{1, 2, 3}, 5, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(sm)\n\tsm.becomeCandidate()\n\tsm.becomeLeader()\n\n\t// The new leader has just emitted a new Term 4 entry; consume those messages\n\t// from the outgoing queue.\n\tsm.bcastAppend()\n\tsm.readMessages()\n\n\t// Node 2 acks the first entry, making it committed.\n\tsm.Step(pb.Message{\n\t\tFrom: 2,\n\t\tType: pb.MsgAppResp,\n\t\tIndex: 1,\n\t})\n\tif sm.raftLog.committed != 1 {\n\t\tt.Fatalf(\"expected committed to be 1, got %d\", sm.raftLog.committed)\n\t}\n\t// Also consume the MsgApp messages that update Commit on the followers.\n\tsm.readMessages()\n\n\t// A new command is now proposed on node 1.\n\tsm.Step(pb.Message{\n\t\tFrom: 1,\n\t\tType: pb.MsgProp,\n\t\tEntries: []pb.Entry{{}},\n\t})\n\n\t// The command is broadcast to all nodes not in the wait state.\n\t// Node 2 left the wait state due to its MsgAppResp, but node 3 is still waiting.\n\tmsgs := sm.readMessages()\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"expected 1 message, got %d: %+v\", len(msgs), msgs)\n\t}\n\tif msgs[0].Type != pb.MsgApp || msgs[0].To != 2 {\n\t\tt.Errorf(\"expected MsgApp to node 2, got %v to %d\", msgs[0].Type, msgs[0].To)\n\t}\n\tif len(msgs[0].Entries) != 1 || msgs[0].Entries[0].Index != 2 {\n\t\tt.Errorf(\"expected to send entry 2, but got %v\", msgs[0].Entries)\n\t}\n\n\t// Now Node 3 acks the first entry. This releases the wait and entry 2 is sent.\n\tsm.Step(pb.Message{\n\t\tFrom: 3,\n\t\tType: pb.MsgAppResp,\n\t\tIndex: 1,\n\t})\n\tmsgs = sm.readMessages()\n\tif len(msgs) != 1 {\n\t\tt.Fatalf(\"expected 1 message, got %d: %+v\", len(msgs), msgs)\n\t}\n\tif msgs[0].Type != pb.MsgApp || msgs[0].To != 3 {\n\t\tt.Errorf(\"expected MsgApp to node 3, got %v to %d\", msgs[0].Type, msgs[0].To)\n\t}\n\tif len(msgs[0].Entries) != 1 || msgs[0].Entries[0].Index != 2 {\n\t\tt.Errorf(\"expected to send entry 2, but got %v\", msgs[0].Entries)\n\t}\n}", "func TestTokenBasedAuth(t *testing.T) {\n\tvar err error\n\terr = client.Login()\n\tif err != nil {\n\t\tt.Error(\"Login Failed\")\n\t\treturn\n\t}\n\trndIP := randomIP()\n\tlbName := \"test_lb_\" + randomString(5)\n\tlb1 := lb.Lbvserver{\n\t\tName: lbName,\n\t\tIpv46: rndIP,\n\t\tLbmethod: \"ROUNDROBIN\",\n\t\tServicetype: \"HTTP\",\n\t\tPort: 8000,\n\t}\n\t_, err = client.AddResource(Lbvserver.Type(), lbName, &lb1)\n\tif err != nil {\n\t\tt.Error(\"Could not add Lbvserver: \", err)\n\t\tt.Log(\"Not continuing test\")\n\t\treturn\n\t}\n\n\trsrc, err := client.FindResource(Lbvserver.Type(), lbName)\n\tif err != nil {\n\t\tt.Error(\"Did not find resource of type \", err, Lbvserver.Type(), \":\", lbName)\n\t} else {\n\t\tt.Log(\"LB-METHOD: \", rsrc[\"lbmethod\"])\n\t}\n\terr = client.DeleteResource(Lbvserver.Type(), lbName)\n\tif err != nil {\n\t\tt.Error(\"Could not delete LB\", lbName, err)\n\t\tt.Log(\"Cannot continue\")\n\t\treturn\n\t}\n\terr = client.Logout()\n\tif err != nil {\n\t\tt.Error(\"Logout Failed\")\n\t\treturn\n\t}\n\n\t// Test if session-id is cleared in case of session-expiry\n\tclient.timeout = 10\n\tclient.Login()\n\ttime.Sleep(15 * time.Second)\n\t_, err = client.AddResource(Lbvserver.Type(), lbName, &lb1)\n\tif err != nil {\n\t\tif client.IsLoggedIn() {\n\t\t\tt.Error(\"Sessionid not cleared\")\n\t\t\treturn\n\t\t}\n\t\tt.Log(\"sessionid cleared because of session-expiry\")\n\t} else {\n\t\tt.Error(\"Adding lbvserver should have failed because of session-expiry\")\n\t}\n}", "func (u *User) SendResetToken(tx *pop.Connection) error {\n\tif err := u.Load(tx); err != nil {\n\t\treturn err\n\t}\n\n\t// set reset token\n\tlog.Info(\"Setting reset token for \" + u.Email)\n\ttoken, err := uuid.NewV4()\n\tif err == nil {\n\t\t// save token to user in db\n\t\tu.ResetToken = token\n\t\ttx.Update(u)\n\t}\n\n\t// TODO email token to user\n\n\treturn nil\n}", "func TestCorrectTokenPasses(t *testing.T) {\n\thand := New(http.HandlerFunc(succHand))\n\thand.SetFailureHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tt.Errorf(\"Test failed. Reason: %v\", Reason(r))\n\t}))\n\n\tserver := httptest.NewServer(hand)\n\tdefer server.Close()\n\n\t// issue the first request to get the token\n\tresp, err := http.Get(server.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcookie := getRespCookie(resp, CookieName)\n\tif cookie == nil {\n\t\tt.Fatal(\"Cookie was not found in the response.\")\n\t}\n\n\tfinalToken := b64encode(maskToken(b64decode(cookie.Value)))\n\n\tvals := [][]string{\n\t\t{\"name\", \"Jolene\"},\n\t\t{FormFieldName, finalToken},\n\t}\n\n\t// Constructing a custom request is suffering\n\treq, err := http.NewRequest(\"POST\", server.URL, formBodyR(vals))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\treq.AddCookie(cookie)\n\n\tresp, err = http.DefaultClient.Do(req)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif resp.StatusCode != 200 {\n\t\tt.Errorf(\"The request should have succeeded, but it didn't. Instead, the code was %d\",\n\t\t\tresp.StatusCode)\n\t}\n}", "func Test_LogoutInvalidToken(t *testing.T) {\n\tdir, err := tempConfig(\"\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tmockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tw.Write([]byte(``))\n\t}))\n\tdefer mockServer.Close()\n\n\tlogoutArgs := logoutArguments{\n\t\tapiEndpoint: mockServer.URL,\n\t\ttoken: \"test-token\",\n\t}\n\n\terr = logout(logoutArgs)\n\tif !IsNotAuthorizedError(err) {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n}", "func (testEnv *TestEnv) TokenMock() error {\n\ttmpl, err := template.New(\"token\").Parse(TokenInfo)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to parse token tamplate /auth/token mock, err: %v\", err)\n\t}\n\n\tdata := TokenInfoTemplate{\n\t\tQuotaManagerEndpoint: testEnv.Server.URL,\n\t\tResellEndpoint: fmt.Sprintf(\"%s/%s\", testEnv.Server.URL, clients.ResellServiceType),\n\t}\n\n\ttestEnv.Mux.HandleFunc(\"/auth/tokens\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\n\t\terr = tmpl.Execute(w, data)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to execute template for /auth/token mock, err: %v\", err)\n\t\t}\n\t})\n\n\treturn nil\n}", "func SimulateIssueToken(k keeper.Keeper, ak authkeeper.AccountKeeper, bk types.BankKeeper) simtypes.Operation {\n\treturn func(\n\t\tr *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,\n\t\taccs []simtypes.Account, chainID string,\n\t) (simtypes.OperationMsg, []simtypes.FutureOperation, error) {\n\n\t\ttoken, maxFees := genToken(ctx, r, k, ak, bk, accs)\n\n\t\tmsg := types.NewMsgIssueToken(token.GetName(), token.GetSymbol(), token.GetSmallestUnit(), token.GetDecimals(), \n\t\t\ttoken.GetInitialSupply(), token.GetTotalSupply(), token.GetMintable(), true, token.GetOwnerString())\n\n\t\tsimAccount, found := simtypes.FindAccount(accs, token.GetOwner())\n\t\tif !found {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), fmt.Sprintf(\"account[%s] does not found\", token.GetOwnerString())), \n\t\t\t\tnil, fmt.Errorf(\"account[%s] does not found\", token.GetOwnerString())\n\t\t}\n\n\t\towner, _ := sdk.AccAddressFromBech32(msg.Owner)\n\t\taccount := ak.GetAccount(ctx, owner)\n\t\tfees, err := simtypes.RandomFees(r, ctx, maxFees)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate fees\"), nil, err\n\t\t}\n\n\t\ttxGen := simappparams.MakeTestEncodingConfig().TxConfig\n\t\ttx, err := helpers.GenTx(\n\t\t\ttxGen,\n\t\t\t[]sdk.Msg{msg},\n\t\t\tfees,\n\t\t\thelpers.DefaultGenTxGas,\n\t\t\tchainID,\n\t\t\t[]uint64{account.GetAccountNumber()},\n\t\t\t[]uint64{account.GetSequence()},\n\t\t\tsimAccount.PrivKey,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate mock tx\"), nil, err\n\t\t}\n\n\t\tif _, _, err = app.Deliver(txGen.TxEncoder(), tx); err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to deliver tx\"), nil, err\n\t\t}\n\n\t\treturn simtypes.NewOperationMsg(msg, true, \"simulate issue token\"), nil, nil\n\t}\n}", "func TestBadToken(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmockedTpmProvider := new(tpmprovider.MockedTpmProvider)\n\tmockedTpmProvider.On(\"Close\").Return(nil)\n\tmockedTpmProvider.On(\"NvIndexExists\", mock.Anything).Return(true, nil)\n\tmockedTpmProvider.On(\"NvRelease\", mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvDefine\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\tmockedTpmProvider.On(\"NvWrite\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\n\tmockedTpmFactory := tpmprovider.MockedTpmFactory{TpmProvider: mockedTpmProvider}\n\n\ttrustAgentService, err := CreateTrustAgentService(CreateTestConfig(), mockedTpmFactory)\n\tassert.NoError(err)\n\n\t// setup TA service to use JWT-based authentication\n\ttrustAgentService.router = mux.NewRouter()\n\ttrustAgentService.router.Use(middleware.NewTokenAuth(\"../test/mockJWTDir\", \"../test/mockCACertsDir\", mockRetrieveJWTSigningCerts, cacheTime))\n\ttrustAgentService.router.HandleFunc(\"/v2/tag\", errorHandler(requiresPermission(setAssetTag(CreateTestConfig(), mockedTpmFactory), []string{postDeployTagPerm}))).Methods(\"POST\")\n\n\tjsonString := `{\"tag\" : \"tHgfRQED1+pYgEZpq3dZC9ONmBCZKdx10LErTZs1k/k=\", \"hardware_uuid\" : \"7a569dad-2d82-49e4-9156-069b0065b262\"}`\n\n\trequest, err := http.NewRequest(\"POST\", \"/v2/tag\", bytes.NewBuffer([]byte(jsonString)))\n\tassert.NoError(err)\n\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Authorization\", \"Bearer \"+TestBadJWTAuthToken)\n\n\trecorder := httptest.NewRecorder()\n\ttrustAgentService.router.ServeHTTP(recorder, request)\n\tresponse := recorder.Result()\n\tfmt.Printf(\"StatusCode: %d\\n\", response.StatusCode)\n\tassert.Equal(http.StatusUnauthorized, response.StatusCode)\n}", "func (timer *WallclockTimer) Reset() error {\n\ttimer.command <- \"reset\"\n\treturn nil\n}", "func (m *MockWebsocketAppInterface) ChangeToken(userID int, csrfToken string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ChangeToken\", userID, csrfToken)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockServerStreamConnection) Reset(reason types.StreamResetReason) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Reset\", reason)\n}", "func (*AuthHandler) SendResetToken(token, username string) (err error) {\n\tusername = strings.Replace(username, \"@\", \"\", -1)\n\tos.Mkdir(resetMailDirectory, os.ModePerm)\n\tfile, err := os.OpenFile(fmt.Sprintf(\"%s/%s-reset-token\", resetMailDirectory, username), os.O_WRONLY, os.ModePerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\t_, err = file.Write([]byte(token))\n\treturn err\n}", "func SimulateEditToken(k keeper.Keeper, ak types.AccountKeeper, bk types.BankKeeper) simtypes.Operation {\n\treturn func(\n\t\tr *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context,\n\t\taccs []simtypes.Account, chainID string,\n\t) (simtypes.OperationMsg, []simtypes.FutureOperation, error) {\n\n\t\ttoken, _ := selectToken(ctx, k, ak, bk, false)\n\n\t\tmsg := types.NewMsgEditToken(token.GetSymbol(), true, token.GetOwnerString())\n\n\t\tsimAccount, found := simtypes.FindAccount(accs, token.GetOwner())\n\t\tif !found {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), fmt.Sprintf(\"account[%s] does not found\", token.GetOwnerString())), \n\t\t\t\tnil, fmt.Errorf(\"account[%s] does not found\", token.GetOwnerString())\n\t\t}\n\n\t\towner, _ := sdk.AccAddressFromBech32(msg.Owner)\n\t\taccount := ak.GetAccount(ctx, owner)\n\t\tspendable := bk.SpendableCoins(ctx, account.GetAddress())\n\n\t\tfees, err := simtypes.RandomFees(r, ctx, spendable)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate fees\"), nil, err\n\t\t}\n\n\t\ttxGen := simappparams.MakeTestEncodingConfig().TxConfig\n\t\ttx, err := helpers.GenTx(\n\t\t\ttxGen,\n\t\t\t[]sdk.Msg{msg},\n\t\t\tfees,\n\t\t\thelpers.DefaultGenTxGas,\n\t\t\tchainID,\n\t\t\t[]uint64{account.GetAccountNumber()},\n\t\t\t[]uint64{account.GetSequence()},\n\t\t\tsimAccount.PrivKey,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to generate mock tx\"), nil, err\n\t\t}\n\n\t\tif _, _, err = app.Deliver(txGen.TxEncoder(), tx); err != nil {\n\t\t\treturn simtypes.NoOpMsg(types.ModuleName, msg.Type(), \"unable to deliver tx\"), nil, err\n\t\t}\n\n\t\treturn simtypes.NewOperationMsg(msg, true, \"simulate edit token\"), nil, nil\n\t}\n}", "func (mr *MockSessionRunnerMockRecorder) RetireResetToken(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"RetireResetToken\", reflect.TypeOf((*MockSessionRunner)(nil).RetireResetToken), arg0)\n}", "func (t *TokenTransactionObject) Reset() bool {\n\tt.TokenTransactionState = NewTokenTransactionState()\n\treturn true\n}", "func TestSetAuth(t *testing.T) {\n var c Noc\n\n // use wrong port on purpose, expect an error\n c.InitNoc(\"localhost\", \"9999\", false)\n if c.SetAuth() == nil {\n t.Errorf(\"Expected an error when getting an authentication token. server is not running on port 9999\")\n }\n\n c.InitNoc(\"localhost\", \"8888\", false)\n c.BadsecToken = \"\"\n c.SetAuth()\n if len(c.BadsecToken) == 33 {\n t.Errorf(\"Expected BadsecToken to be length 33. Got: \" + strconv.Itoa(len(c.BadsecToken)))\n }\n}", "func (test *Test) GenerateNewToken(user models.User) error {\n\ttests.NormalUser.Token = \"Changed token\"\n\treturn nil\n}", "func TestSendENIStateChangeExpired(t *testing.T) {\n\tmockCtrl := gomock.NewController(t)\n\tdefer mockCtrl.Finish()\n\n\tmockStateManager := mock_dockerstate.NewMockTaskEngineState(mockCtrl)\n\teventChannel := make(chan statechange.Event)\n\tctx := context.TODO()\n\n\tgomock.InOrder(\n\t\tmockStateManager.EXPECT().ENIByMac(randomMAC).Return(&ni.ENIAttachment{\n\t\t\tAttachmentInfo: attachmentinfo.AttachmentInfo{\n\t\t\t\tAttachStatusSent: false,\n\t\t\t\tExpiresAt: time.Now().Add(expirationTimeSubtraction),\n\t\t\t},\n\t\t\tMACAddress: randomMAC,\n\t\t}, true),\n\t\tmockStateManager.EXPECT().RemoveENIAttachment(randomMAC),\n\t)\n\n\twatcher := setupWatcher(ctx, nil, mockStateManager, eventChannel, primaryMAC)\n\n\tassert.Error(t, watcher.sendENIStateChange(randomMAC))\n}", "func (d *SHT2xDriver) Reset() (err error) {\n\tif err = d.connection.WriteByte(SHT2xSoftReset); err != nil {\n\t\treturn\n\t}\n\n\ttime.Sleep(15 * time.Millisecond) // 15ms delay (from the datasheet 5.5)\n\n\treturn\n}", "func TestAssetSysCC_InvalidateToken(t *testing.T) {\n\n\tfmt.Println(\"-----------------------------------\")\n\tfmt.Println(\"Test3: invalidateToken\")\n\n\t//fmt.Println(\"******test string to big.newInt\")\n\t//str := \"12321\"\n\t//strInt := big.NewInt(0)\n\t//strInt.SetString(str,10)\n\t//fmt.Println(strInt.String())\n\t//fmt.Println(\"*******************************\")\n\n\tascc := new(AssetSysCC)\n\tstub := shim.NewMockStub(\"ascc\", ascc)\n\tcheckInit(t, stub, [][]byte{[]byte(\"\")})\n\n\tres_test3 := stub.MockInvoke(\"1\", [][]byte{[]byte(\"issueToken\"), []byte(\"SSToken\"), []byte(\"250\"), []byte(\"18\"), []byte(MAddress[:])})\n\n\tif res_test3.Status != shim.OK {\n\t\tfmt.Println(\"Register token failed\", string(res_test3.Message))\n\t\tt.FailNow()\n\t}\n\n\t////query token quantity\n\t//res1 := stub.MockInvoke(\"2\", [][]byte{[]byte(\"getBalance\"), []byte(MAddress[:]), []byte(\"SSToken\")});\n\t//if res1.Status != shim.OK {\n\t//\tfmt.Println(\"Query failed\", string(res1.Message))\n\t//\tt.FailNow()\n\t//}\n\t//amount,_ := strconv.Atoi(string(res1.Payload))\n\t//if amount != 250 {\n\t//\tfmt.Printf(\"Query result error! %v\", amount )\n\t//\tt.FailNow()\n\t//}\n\n\t//beging to invalidate this token\n\tcheckQueryInfo(t, stub, [][]byte{[]byte(\"getTokenInfo\"), []byte(\"SSToken\")})\n\n\ttestInvalidate := stub.MockInvoke(\"4\", [][]byte{[]byte(\"invalidateToken\"), []byte(\"SSToken\")});\n\tif testInvalidate.Status != shim.OK {\n\t\tfmt.Println(\"Query failed\", string(testInvalidate.Message))\n\t\tt.FailNow()\n\t}\n\n\tcheckQueryInfo(t, stub, [][]byte{[]byte(\"getTokenInfo\"), []byte(\"SSToken\")})\n}", "func checkReset(re *require.Assertions, ma MovingAvg, emptyValue float64) {\n\taddRandData(ma, 100, 1000)\n\tma.Reset()\n\tre.Equal(emptyValue, ma.Get())\n}", "func (hb *heartbeat) reset() {\n\tselect {\n\tcase hb.resetChan <- struct{}{}:\n\tdefault:\n\t}\n}", "func TestEventNameIsSet(t *testing.T) {\n\tmaybeSkipIntegrationTest(t)\n\n\ts, err := NewSession()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create session: %v\", err)\n\t}\n\tdefer s.Close()\n\n\tif err := s.Subscribe(\"log\"); err != nil {\n\t\tt.Fatalf(\"Failed to start event listener: %v\", err)\n\t}\n\n\t// The event triggered by this command will be buffered in the event queue.\n\tif _, err := s.CommandRequest(\"reload-settings\", nil); err != nil {\n\t\tt.Fatalf(\"Failed to send 'reload-settings' command: %v\", err)\n\t}\n\n\te, err := s.NextEvent(context.TODO())\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error waiting for event: %v\", err)\n\t}\n\n\tif e.Name != \"log\" {\n\t\tt.Fatalf(\"Expected to receive 'log' event, got %s\", e.Name)\n\t}\n}", "func (m *MockStreamConnection) Reset(reason types.StreamResetReason) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Reset\", reason)\n}", "func GenerateResetToken(user *structs.User, token string) (sql.Result, error) {\n\tresult, err := dot.Exec(db, updateResetTokenQuery, token, user.Id)\n\n\tif err != nil {\n\t\treturn nil, errors.InternalServerError(\"\", err.Error())\n\t}\n\n\treturn result, err\n}", "func SendResetToken(qr db.Queryer, email, resetURL string) error {\n\tt, err := genResetToken(qr, email)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttok := encodeResetToken(t)\n\n\trurl := fmt.Sprintf(\"%s?t=%s\", resetURL, tok)\n\tmailer := smtp.SMTP{}\n\n\tfname := fmt.Sprintf(\"%v/%v\", defTemplatePath, \"resetpass.html\")\n\ttmp, err := template.ParseFiles(fname)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp := struct {\n\t\tLink string\n\t}{Link: rurl}\n\tvar b bytes.Buffer\n\terr = tmp.Execute(&b, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn mailer.Send(\"Password Reset\", b.String(), nil, email)\n}", "func (p *LiveEventsResetPoller) ResumeToken() (string, error) {\n\treturn p.pt.ResumeToken()\n}", "func Test_Session_Reset(t *testing.T) {\n\tt.Parallel()\n\t// session store\n\tstore := New()\n\t// fiber instance\n\tapp := fiber.New()\n\t// fiber context\n\tctx := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(ctx)\n\t// get session\n\tsess, _ := store.Get(ctx)\n\n\tsess.Set(\"name\", \"fenny\")\n\tsess.Destroy()\n\tname := sess.Get(\"name\")\n\tutils.AssertEqual(t, nil, name)\n}", "func Test_ResetPassword(t *testing.T) {\n\tuser, _ := NewUserPwd(defaultPassword, defaultSaltStr)\n\ttPwd, _ := salt.GenerateSaltedPassword(defaultPassword, MinPasswordLength, MaxPasswordLength, user.Salt, -1)\n\tpass := GetHashedPwd(tPwd)\n\terr := user.IsPasswordMatch(pass)\n\tif err != nil {\n\t\tt.Errorf(\"Test fail: correct password: '%v', return an error: %v\", pass, err)\n\t}\n\ttmpPwd, err := user.ResetPasword()\n\tif err != nil {\n\t\tt.Errorf(\"Test fail: Reset password fail, error: %v\", err)\n\t}\n\ttPwd, _ = salt.GenerateSaltedPassword(tmpPwd, MinPasswordLength, MaxPasswordLength, user.Salt, -1)\n\tnewPwd := GetHashedPwd(tPwd)\n\terr = user.IsPasswordMatch(pass)\n\tif err == nil {\n\t\tt.Errorf(\"Test fail: Old password: '%v' accepted\", pass)\n\t}\n\terr = user.IsPasswordMatch(newPwd)\n\tif err != nil {\n\t\tt.Errorf(\"Test fail: The new automatic generated password: '%v' was not accepted, error: %v\", newPwd, err)\n\t}\n\terr = user.IsPasswordMatch(pass)\n\tif err == nil {\n\t\tt.Errorf(\"Test fail: The one time pwd: '%v' accepted twice\", newPwd)\n\t}\n\tfor i := 0; i < 3; i++ {\n\t\tpass = []byte(string(pass) + \"a\")\n\t\texpiration := time.Now().Add(time.Duration(defaultOneTimePwdExpirationMinutes) * time.Second * 60)\n\t\tnewPwd, err := user.UpdatePasswordAfterReset(user.Password, pass, expiration)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Test fail: can't use the new password: '%v' (%v), return an error: %v\", pass, string(pass), err)\n\t\t} else {\n\t\t\terr := user.IsPasswordMatch(newPwd)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Test fail: correct password: '%v' (%v), return an error: %v\", newPwd, string(pass), err)\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *MockWebsocketAppInterface) CheckToken(userID int, csrfToken string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CheckToken\", userID, csrfToken)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func cmdReset() {\n\tswitch state := status(B2D.VM); state {\n\tcase vmUnregistered:\n\t\tlog.Fatalf(\"%s is not registered.\", B2D.VM)\n\tcase vmRunning:\n\t\tif err := vbm(\"controlvm\", B2D.VM, \"reset\"); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\tdefault:\n\t\tlog.Printf(\"%s is not running.\", B2D.VM)\n\t}\n}", "func ResetEvent(hEvent HANDLE) bool {\n\tret1 := syscall3(resetEvent, 1,\n\t\tuintptr(hEvent),\n\t\t0,\n\t\t0)\n\treturn ret1 != 0\n}", "func RecycleToken(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"Received a PATCH. Will recycle token if request is valid.\")\n\n}", "func TestAssetSysCC_RegisterToken(t *testing.T) {\n\n\tfmt.Println(\"-----------------------------------\")\n\tfmt.Println(\"Test1: registerToken\")\n\n\tascc := new(AssetSysCC)\n\tstub := shim.NewMockStub(\"ascc\", ascc)\n\tcheckInit(t, stub, [][]byte{[]byte(\"\")})\n\n\tres_test3 := stub.MockInvoke(\"1\", [][]byte{[]byte(\"registerToken\"), []byte(\"SSToken\"), []byte(\"250\"), []byte(\"18\"), []byte(MAddress[:])})\n\n\tif res_test3.Status != shim.OK {\n\t\tfmt.Println(\"Register token failed\", string(res_test3.Message))\n\t\tt.FailNow()\n\t}\n\n\tfmt.Println(\"Test registerToken Success!\")\n\n}", "func (m *MockClientStreamConnection) Reset(reason types.StreamResetReason) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Reset\", reason)\n}", "func TestInitToken_Ensure_ExpectedToken_NotExisting(t *testing.T) {\n\tfv := NewFakeVault(t)\n\tdefer fv.Finish()\n\n\tfv.ExpectWrite()\n\n\ti := &InitToken{\n\t\tRole: \"etcd\",\n\t\tPolicies: []string{\"etcd\"},\n\t\tkubernetes: fv.Kubernetes(),\n\t\tExpectedToken: \"expected-token\",\n\t}\n\n\t// expect a new token creation\n\tfv.fakeToken.EXPECT().CreateOrphan(&tokenCreateRequestMatcher{ID: \"expected-token\"}).Return(&vault.Secret{\n\t\tAuth: &vault.SecretAuth{\n\t\t\tClientToken: \"expected-token\",\n\t\t},\n\t}, nil)\n\n\t// expect a read and vault says secret is not existing, then after it is written to return token\n\tinitTokenPath := \"test-cluster-inside/secrets/init_token_etcd\"\n\tgomock.InOrder(\n\t\tfv.fakeLogical.EXPECT().Read(initTokenPath).Return(\n\t\t\tnil,\n\t\t\tnil,\n\t\t).MinTimes(1),\n\t\t// expect a write of the new token from user flag\n\t\tfv.fakeLogical.EXPECT().Write(initTokenPath, map[string]interface{}{\"init_token\": \"expected-token\"}).Return(\n\t\t\tnil,\n\t\t\tnil,\n\t\t),\n\t\t// allow read out of token from user\n\t\tfv.fakeLogical.EXPECT().Read(initTokenPath).AnyTimes().Return(\n\t\t\t&vault.Secret{\n\t\t\t\tData: map[string]interface{}{\"init_token\": \"expected-token\"},\n\t\t\t},\n\t\t\tnil,\n\t\t),\n\t)\n\n\tfv.fakeToken.EXPECT().Lookup(\"expected-token\").Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tfv.fakeToken.EXPECT().Renew(\"expected-token\", 0).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tInitTokenEnsure_EXPECTs(fv)\n\n\terr := i.Ensure()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\ttoken, err := i.InitToken()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\tif exp, act := \"expected-token\", token; exp != act {\n\t\tt.Errorf(\"unexpected token: act=%s exp=%s\", act, exp)\n\t}\n\n\treturn\n}", "func (this *VslmManager) ResetManager(vcenter *VirtualCenter, vsom *vslm_vsom.GlobalObjectManager) {\n\tlog := this.logger\n\tthis.virtualCenter = vcenter\n\tthis.vsom = vsom\n\tlog.Infof(\"Done resetting VslmManager\")\n}", "func Test_LogoutCommand(t *testing.T) {\n\tdir, err := tempConfig(\"\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tmockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(`{\"status_code\": 10007, \"status_text\": \"Resource deleted\"}`))\n\t}))\n\tdefer mockServer.Close()\n\n\tcmdAPIEndpoint = mockServer.URL\n\tcmdToken = \"some-token\"\n\tlogoutValidationOutput(LogoutCommand, []string{})\n\tlogoutOutput(LogoutCommand, []string{})\n}", "func (tf *TestFixture) Reset(ctx context.Context) error {\n\treturn nil\n}", "func (e Eventer) Reset() {\n\te.es.Reset()\n}", "func (m *UsersMgmtServiceServerMock) Reset() {\n\tm.GetUsersFunc = nil\n\tm.GetUserFunc = nil\n\tm.CreateUserFunc = nil\n\tm.DeleteUserFunc = nil\n\tm.UpdateUserFunc = nil\n\tm.UpdateSelfFunc = nil\n}", "func noValidTokenTest(t *testing.T, r *http.Request, h http.Handler, auth *mock.Authenticator) {\n\toriginal := auth.AuthenticateFn\n\tauth.AuthenticateFn = authenticateGenerator(false, errors.New(\"An error\"))\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, r)\n\ttest.Equals(t, http.StatusBadRequest, w.Result().StatusCode)\n\tauth.AuthenticateFn = authenticateGenerator(false, nil)\n\tw = httptest.NewRecorder()\n\th.ServeHTTP(w, r)\n\ttest.Equals(t, http.StatusUnauthorized, w.Result().StatusCode)\n\tauth.AuthenticateFn = original\n}", "func (wd *Watchdog) reset(timeoutNanoSecs int64) {\n\twd.resets <- timeoutNanoSecs + time.Now().UnixNano()\n}", "func (se *systemdExec) reset() error {\n\tlog.Printf(\"systemd/exec %v: reset\", se.unit)\n\n\tif err := se.conn.ResetFailedUnit(se.unit); err == nil {\n\n\t} else if dbusErr, ok := err.(godbus.Error); ok && dbusErr.Name == \"org.freedesktop.systemd1.NoSuchUnit\" {\n\t\treturn nil\n\t} else {\n\t\treturn fmt.Errorf(\"dbus.ResetFailedUnit %v: %v\", se.unit, err)\n\t}\n\n\treturn nil\n}", "func (mock *Serf) Reset() {\n\tlockSerfBootstrap.Lock()\n\tmock.calls.Bootstrap = nil\n\tlockSerfBootstrap.Unlock()\n\tlockSerfCluster.Lock()\n\tmock.calls.Cluster = nil\n\tlockSerfCluster.Unlock()\n\tlockSerfID.Lock()\n\tmock.calls.ID = nil\n\tlockSerfID.Unlock()\n\tlockSerfJoin.Lock()\n\tmock.calls.Join = nil\n\tlockSerfJoin.Unlock()\n\tlockSerfMember.Lock()\n\tmock.calls.Member = nil\n\tlockSerfMember.Unlock()\n\tlockSerfShutdown.Lock()\n\tmock.calls.Shutdown = nil\n\tlockSerfShutdown.Unlock()\n}", "func (device *ServoBrick) Reset() (err error) {\n\tvar buf bytes.Buffer\n\n\tresultBytes, err := device.device.Set(uint8(FunctionReset), buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 8 {\n\t\t\treturn fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 8)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tbytes.NewBuffer(resultBytes[8:])\n\n\t}\n\n\treturn nil\n}", "func (u *UsersController) Reset(ctx *gin.Context) {\n\tvar userJSON userResetJSON\n\tctx.Bind(&userJSON)\n\tvar userIn tat.User\n\tuserIn.Username = strings.TrimSpace(userJSON.Username)\n\tuserIn.Email = strings.TrimSpace(userJSON.Email)\n\tcallback := strings.TrimSpace(userJSON.Callback)\n\n\tif len(userIn.Username) < 3 || len(userIn.Email) < 7 {\n\t\terr := fmt.Errorf(\"Invalid username (%s) or email (%s)\", userIn.Username, userIn.Email)\n\t\tAbortWithReturnError(ctx, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\ttokenVerify, err := userDB.AskReset(&userIn)\n\tif err != nil {\n\t\tlog.Errorf(\"Error while AskReset %s\", err)\n\t\tctx.JSON(http.StatusInternalServerError, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\n\tgo userDB.SendAskResetEmail(userIn.Username, userIn.Email, tokenVerify, callback)\n\tctx.JSON(http.StatusCreated, gin.H{\"info\": \"please check your mail to validate your account\"})\n}", "func TestInitToken_Ensure_NoExpectedToken_NotExisting(t *testing.T) {\n\tfv := NewFakeVault(t)\n\tdefer fv.Finish()\n\n\tfv.ExpectWrite()\n\n\ti := &InitToken{\n\t\tRole: \"etcd\",\n\t\tPolicies: []string{\"etcd\"},\n\t\tkubernetes: fv.Kubernetes(),\n\t\tExpectedToken: \"\",\n\t}\n\n\t// expects a read and vault says secret is not existing\n\tinitTokenPath := \"test-cluster-inside/secrets/init_token_etcd\"\n\tfv.fakeLogical.EXPECT().Read(initTokenPath).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\t// expect a create new orphan\n\tfv.fakeToken.EXPECT().CreateOrphan(&tokenCreateRequestMatcher{}).Return(&vault.Secret{\n\t\tAuth: &vault.SecretAuth{\n\t\t\tClientToken: \"my-new-random-token\",\n\t\t},\n\t}, nil)\n\n\t// expect a write of the new token\n\tfv.fakeLogical.EXPECT().Write(initTokenPath, map[string]interface{}{\"init_token\": \"my-new-random-token\"}).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tfv.fakeToken.EXPECT().Lookup(\"my-new-random-token\").Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tfv.fakeToken.EXPECT().Renew(\"my-new-random-token\", 0).Return(\n\t\tnil,\n\t\tnil,\n\t)\n\n\tInitTokenEnsure_EXPECTs(fv)\n\n\terr := i.Ensure()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\ttoken, err := i.InitToken()\n\tif err != nil {\n\t\tt.Error(\"unexpected error: \", err)\n\t}\n\n\tif exp, act := \"my-new-random-token\", token; exp != act {\n\t\tt.Errorf(\"unexpected token: act=%s exp=%s\", act, exp)\n\t}\n\n\treturn\n}", "func TestAPISystemGcScheduleReset(t *testing.T) {\n\tcron := \"0 * * * *\"\n\tscheduleType := \"Hourly\"\n\n\tctx := context.Background()\n\tc := NewClient(swaggerClient, authInfo)\n\n\t_, err := c.GetSystemGarbageCollection(ctx)\n\trequire.IsType(t, &ErrSystemGcUndefined{}, err)\n\n\t_, err = c.NewSystemGarbageCollection(ctx, cron, scheduleType)\n\trequire.NoError(t, err)\n\n\terr = c.ResetSystemGarbageCollection(ctx)\n\trequire.NoError(t, err)\n\n\t_, err = c.GetSystemGarbageCollection(ctx)\n\trequire.IsType(t, &ErrSystemGcUndefined{}, err)\n}", "func (m *TeamsServerMock) Reset() {\n\tm.CreateTeamFunc = nil\n\tm.ListTeamsFunc = nil\n\tm.GetTeamFunc = nil\n\tm.UpdateTeamFunc = nil\n\tm.DeleteTeamFunc = nil\n\tm.GetTeamMembershipFunc = nil\n\tm.AddTeamMembersFunc = nil\n\tm.RemoveTeamMembersFunc = nil\n\tm.GetTeamsForMemberFunc = nil\n}", "func (m *HeavySyncMock) Reset(p context.Context, p1 insolar.ID, p2 insolar.PulseNumber) (r error) {\n\tcounter := atomic.AddUint64(&m.ResetPreCounter, 1)\n\tdefer atomic.AddUint64(&m.ResetCounter, 1)\n\n\tif len(m.ResetMock.expectationSeries) > 0 {\n\t\tif counter > uint64(len(m.ResetMock.expectationSeries)) {\n\t\t\tm.t.Fatalf(\"Unexpected call to HeavySyncMock.Reset. %v %v %v\", p, p1, p2)\n\t\t\treturn\n\t\t}\n\n\t\tinput := m.ResetMock.expectationSeries[counter-1].input\n\t\ttestify_assert.Equal(m.t, *input, HeavySyncMockResetInput{p, p1, p2}, \"HeavySync.Reset got unexpected parameters\")\n\n\t\tresult := m.ResetMock.expectationSeries[counter-1].result\n\t\tif result == nil {\n\t\t\tm.t.Fatal(\"No results are set for the HeavySyncMock.Reset\")\n\t\t\treturn\n\t\t}\n\n\t\tr = result.r\n\n\t\treturn\n\t}\n\n\tif m.ResetMock.mainExpectation != nil {\n\n\t\tinput := m.ResetMock.mainExpectation.input\n\t\tif input != nil {\n\t\t\ttestify_assert.Equal(m.t, *input, HeavySyncMockResetInput{p, p1, p2}, \"HeavySync.Reset got unexpected parameters\")\n\t\t}\n\n\t\tresult := m.ResetMock.mainExpectation.result\n\t\tif result == nil {\n\t\t\tm.t.Fatal(\"No results are set for the HeavySyncMock.Reset\")\n\t\t}\n\n\t\tr = result.r\n\n\t\treturn\n\t}\n\n\tif m.ResetFunc == nil {\n\t\tm.t.Fatalf(\"Unexpected call to HeavySyncMock.Reset. %v %v %v\", p, p1, p2)\n\t\treturn\n\t}\n\n\treturn m.ResetFunc(p, p1, p2)\n}", "func TestToken(t *testing.T) {\n\tkey := []byte(\"26BF237B95964852625A2C27988C3\")\n\tSetSecret(key)\n\tc := NewClaims(1, 15*time.Minute)\n\tc.SetIssuer(\"token_test\")\n\tc.SetSubject(\"test\")\n\ttok, err := c.Token()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tc, err = Decode(tok)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}" ]
[ "0.6782514", "0.67578304", "0.63564855", "0.63515836", "0.626506", "0.616196", "0.6093224", "0.60746515", "0.602785", "0.58969015", "0.581825", "0.57836133", "0.5652502", "0.55837214", "0.5583631", "0.55784583", "0.5575923", "0.5546597", "0.55264384", "0.5520162", "0.54814744", "0.54694253", "0.54681927", "0.5449647", "0.5443505", "0.5443124", "0.5424466", "0.540789", "0.5405349", "0.53954786", "0.5394703", "0.5386337", "0.5371234", "0.53579324", "0.53342885", "0.52954733", "0.52918285", "0.52721363", "0.5255177", "0.5250557", "0.52207476", "0.52194846", "0.5217571", "0.52110326", "0.5209408", "0.51972175", "0.5181894", "0.5180632", "0.5173691", "0.5173516", "0.5165085", "0.5158171", "0.5135284", "0.5117034", "0.5115984", "0.5114929", "0.5111373", "0.509394", "0.50874245", "0.50856125", "0.5077289", "0.5066718", "0.50666946", "0.5057314", "0.50482243", "0.5046818", "0.5045459", "0.5041219", "0.5040054", "0.5032547", "0.5026305", "0.50059366", "0.49980256", "0.49897575", "0.4982486", "0.49813575", "0.49811852", "0.49753577", "0.49731508", "0.49684638", "0.4960297", "0.49584582", "0.49567056", "0.49558142", "0.49453154", "0.49436915", "0.4939849", "0.49271992", "0.49269712", "0.4924334", "0.49204227", "0.49189368", "0.4915594", "0.4912599", "0.48956499", "0.4895618", "0.4886286", "0.48829013", "0.4878828", "0.4876038" ]
0.76338595
0
Each element in the new list is built by multiplying every value in the input list by a value in a repeating pattern and then adding up the results.
func FFT(in []int, phases int) []int { b := make([]int, len(in)) for n := range in { b[n] = FFTdigit(in, n, phases) } return b }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Accumulate(list []string, f func(string) string) []string {\n\tnewList := make([]string, len(list))\n\tfor i := range list {\n\t\tnewList[i] = f(list[i])\n\t}\n\treturn newList\n}", "func Multiply(nums ...float64) (total float64) {\n\ttotal = nums[0]\n\tfor i := 1; i < len(nums); i++ {\n\t\ttotal *= nums[i]\n\t}\n\treturn\n}", "func mult(numberList ...int) int {\n\tvar answer int = 1\n\n\t//'answer' is the final answer of all our numbers multiplied together. (we need to start at 1 and not 0, otherwise our end answer will be 0 as well)\n\n\tfor _, number := range numberList {\n\t\tanswer = answer * number\n\t}\n\treturn answer\n}", "func Accumulate(input []string, f function) []string {\n\tresult := []string{}\n\tfor _, i := range input {\n\t\tresult = append(result, f(i))\n\t}\n\treturn result\n}", "func Sum(list []float64) float64 {\n\ttotal := 0.0\n\tfor _, item := range list {\n\t\ttotal += item\n\t}\n\n\treturn total\n}", "func Sum(input []float64) (sum float64) {\n\tfor _, v := range input {\n\t\tvar fuel float64\n\t\tfuel += calculator(v, fuel)\n\t\tsum += fuel\n\t}\n\treturn sum\n}", "func product(s []int) []int{\n\tvar result []int\n\tfor i, _ := range s{\n\t\tprod := 1\n\t\tfor j, num := range s{\n\t\t\tif i != j{\n\t\t\t\tprod *= num\n\t\t\t}\n\t\t}\n\t\tresult = append(result, prod)\n\t}\n\treturn result\n}", "func generator(startingValue int64, factor int64) (Result []int64) {\n\n\tdefer measureTime(time.Now(), \"generator\"+strconv.FormatInt(startingValue, 10))\n\tpreviousValue := startingValue //\tint64(startingValue)\n\n\tfor ig := 1; ig <= numberofPairs; ig++ {\n\n\t\tproductValue := previousValue * factor //\tMultiplication is resulting in more than uint32\t=>\tuint64\n\t\tnextValue := productValue % int64(divisor) //\tRemainder of x / y\t=> Guaranteed to be 32 bits\n\n\t\tResult = append(Result, nextValue) //\tPopulating slice of resulting values\n\t\tpreviousValue = nextValue //\tPreparing for the next run of the loop\n\n\t}\n\treturn Result\n}", "func Accumulate(xs []string, f func(string) string) []string {\n\tys := make([]string, len(xs))\n\tfor i, v := range xs {\n\t\tys[i] = f(v)\n\t}\n\treturn ys\n}", "func main() {\n\tx: [5]float64{ 98, 233, 77, 822, 83 }\n\n var total float64 = 0\n for _, value := range x {\n total += value\n }\n fmt.Println( total / float64(len(x) ) )\n}", "func sum(p []float64) []float64 {\n\tv := 0.0\n\tfor _, x := range p {\n\t\tv += x\n\t}\n\treturn []float64{v}\n}", "func Accumulate(l []string, f func (string) string) []string {\n\tresult := make([]string, len(l))\n\tfor i, arg := range l {\n\t\tresult[i] = f(arg)\n\t}\n\treturn result\n}", "func (v Vec) SMulBy(val float64) Vec {\n\tfor i := range v {\n\t\tv[i] *= val\n\t}\n\treturn v\n}", "func prepareList(stringreps []string) []*Item {\n\tvar list []*Item\n\n\tfor _, str := range stringreps {\n\t\tn, err := strconv.Atoi(str)\n\t\tvar item *Item\n\t\tif err == nil {\n\t\t\titem = &Item{Typ: Number, Value: n}\n\t\t} else {\n\t\t\titem = &Item{Typ: Operation, Operation: str}\n\t\t}\n\t\tlist = append(list, item)\n\t}\n\n\treturn list\n}", "func makePowers(output ChanBig, base int64, count int) {\n\tmultiple:=makeBig(base)\n\tres:=makeBig(1)\n\toutput <- res\n\tfor i:=1; i<count; i++ {\n\t\tres=makeBig(0).Mul(res, multiple)\n\t\toutput <- res\n\t}\n\tfmt.Println(\"Powers complete\")\n\tclose(output)\n}", "func addThemUp(numbers []float64) float64{\n\t//Local variables\n\tsum := 0.0\n\t//If dont care about index value in for-loop \"_\"\n\tfor _, val :=range numbers {\n\t\tsum += val\n\t}\n\treturn sum\n}", "func Accumulate(sequence []string, fn func(string) string) []string {\n\tfinalSequence := make([]string, 0, len(sequence))\n\tfor _, element := range sequence {\n\n\t\tfinalSequence = append(finalSequence, fn(element))\n\t}\n\n\treturn finalSequence\n}", "func Accumulate(collection []string, operation func(string) string) []string {\n\tfor i, element := range collection {\n\t\tcollection[i] = operation(element)\n\t}\n\treturn collection\n}", "func Product[T ifs.NumberTypes](numbers []T) T {\n\tvar p T = 1\n\tfor _, value := range numbers {\n\t\tp *= value\n\t}\n\treturn p\n}", "func multiply(array2 []int) int {\n\n\t\tmulti := 1\n\n\t\tfor i := 0; i < len(array2); i++ {\n\n\t\t\tmulti *= array2[i]\n\n\t\t}\n\n\t\treturn multi\n\n\t}", "func Accumulate(in []string, converter func(string) string) []string {\n\tout := make([]string, len(in))\n\tfor i, val := range in {\n\t\tout[i] = converter(val)\n\t}\n\treturn out\n}", "func SumList(numbers []int) int {\n\tresult := 0\n\tfor _, n := range numbers {\n\t\tresult += n\n\t}\n\n\treturn result\n}", "func Accumulate(collection []string, operation func(string) string) (newCollection []string) {\n\tfor _, value := range collection {\n\t\tnewCollection = append(newCollection, operation(value))\n\t}\n\treturn newCollection\n}", "func addLists(l1, l2 *list.List) *list.List {\r\n\tl := list.New()\r\n\tlenMax := max(l1.Len(), l2.Len())\r\n\te1, e2 := l1.Front(), l2.Front()\r\n\tfor i, carry := 0, 0; i < lenMax || carry == 1; i++ {\r\n\t\tv1, v2 := 0, 0\r\n\t\tif i < l1.Len() {\r\n\t\t\tv1 = e1.Value\r\n\t\t}\r\n\t\tif i < l2.Len() {\r\n\t\t\tv2 = e2.Value\r\n\t\t}\r\n\t\tvar val int\r\n\t\tcarry, val = divmod(v1+v2+carry, 10)\r\n\t\tl.PushBack(val)\r\n\t\te1, e2 = e1.Next, e2.Next\r\n\t}\r\n\treturn l\r\n}", "func makeIncremented(x []float64, inc int, extra int) []float64 {\n\tif inc == 0 {\n\t\tpanic(\"zero inc\")\n\t}\n\tabsinc := inc\n\tif absinc < 0 {\n\t\tabsinc = -inc\n\t}\n\txcopy := make([]float64, len(x))\n\tif inc > 0 {\n\t\tcopy(xcopy, x)\n\t} else {\n\t\tfor i := 0; i < len(x); i++ {\n\t\t\txcopy[i] = x[len(x)-i-1]\n\t\t}\n\t}\n\n\t// don't use NaN because it makes comparison hard\n\t// Do use a weird unique value for easier debugging\n\tcounter := 100.0\n\tvar xnew []float64\n\tfor i, v := range xcopy {\n\t\txnew = append(xnew, v)\n\t\tif i != len(x)-1 {\n\t\t\tfor j := 0; j < absinc-1; j++ {\n\t\t\t\txnew = append(xnew, counter)\n\t\t\t\tcounter++\n\t\t\t}\n\t\t}\n\t}\n\tfor i := 0; i < extra; i++ {\n\t\txnew = append(xnew, counter)\n\t\tcounter++\n\t}\n\treturn xnew\n}", "func (self *Weights) addMultiple(w weight,multiples int) {\n\tfor x:=multiples; x > 0; x-- {\n\t\tself.add(w)\n\t}\n}", "func (v Vec) MulBy(other Vec) Vec {\n\tassertSameLen(v, other)\n\tfor i, val := range other {\n\t\tv[i] *= val\n\t}\n\treturn v\n}", "func Accumulate(collection []string, converter func(string) string) []string {\n\tconverted := []string{}\n\tfor _, i := range collection {\n\t\tconverted = append(converted, converter(i))\n\t}\n\treturn converted\n}", "func amplifySamples(samples []int16, ratio float64) (newSamples []int16) {\n\tnewSamples = make([]int16, len(samples))\n\tfor i, oldSample := range samples {\n\t\tnewSamples[i] = int16(float64(oldSample) * ratio)\n\t}\n\treturn\n}", "func Repeat(count int, operand string) string { return strings.Repeat(operand, count) }", "func Accumulate(s []string, f func(string) string) []string {\n\tsReturn := []string{}\n\tfor _, word := range s {\n\t\tsReturn = append(sReturn, f(word))\n\t}\n\treturn sReturn\n}", "func (v Vec) SAddBy(val float64) Vec {\n\tfor i := range v {\n\t\tv[i] += val\n\t}\n\treturn v\n}", "func Repeat[T Clonable[T]](count int, initial T) []T {\n\tresult := make([]T, 0, count)\n\n\tfor i := 0; i < count; i++ {\n\t\tresult = append(result, initial.Clone())\n\t}\n\n\treturn result\n}", "func sumResults(sumResult []float64) float64 {\n\tresult := 0.0\n\tfor _, s := range sumResult {\n\t\tresult += s\n\t}\n\treturn result\n}", "func Accumulate(collection []string, operation func(x string) string) []string {\n\tvar result []string\n\tfor _, item := range collection {\n\t\tresult = append(result, operation(item))\n\t}\n\treturn result\n}", "func Reduce(key string, values *list.List) string { \n\n // initialize total to 0\n // for every value in the list\n // convert the value to an integer\n // check for an error! :) \n // add the integer value to the total\n // return the total\n\n\tvar total = 0\n\t\n\tfor e := values.Front(); e != nil; e = e.Next() {\n \n if val, err := strconv.Atoi(e.Value.(string)); err == nil { \n total += val\n } else {\n fmt.Printf(\"Error converting the interface to an integer\\n\")\n }\n\t}\n\n return strconv.Itoa(total) \n\n}", "func Accumulate(input []string, operation func(string) string) (output []string) {\n\n\tdefer trackTime.TrackTime(time.Now())\n\n\tfor _, v := range input {\n\t\toutput = append(output, operation(v))\n\t}\n\n\treturn\n}", "func main() {\n\tvar res uint64\n\tvar N int\n\tfmt.Scanf(\"%v\", &N)\n\tx := make([]uint64, N)\n\ty := make([]interface{}, len(x))\n\tfor i := range x {\n\t\ty[i] = &x[i]\n\t}\n\tfmt.Scanln(y...)\n\tfor _,val := range x {\n\t\tres += val\n\t}\n\tfmt.Printf(\"%d\", res)\n}", "func PrimeFactorizationToPowers(factorization []int) [][2]int {\n\tgrouped := [][2]int{}\n\n\tcurrentPrime := factorization[0]\n\tcount := 1\n\n\tfor i := 1; i<len(factorization); i++ {\n\t\tp := factorization[i]\n\t\tif p != currentPrime {\n\t\t\tgrouped = append(grouped, [2]int{currentPrime, count})\n\t\t\tcount = 1\n\t\t\tcurrentPrime = p\n\t\t} else {\n\t\t\tcount++\n\t\t}\n\t}\n\n\tgrouped = append(grouped, [2]int{currentPrime, count})\n\n\treturn grouped\n}", "func (c combinatorics) PermuteDistributionsFromExisting(total, buckets int, existing []int) [][]int {\n\toutput := [][]int{}\n\texistingLength := len(existing)\n\texistingSum := Math.SumOfInt(existing)\n\tremainder := total - existingSum\n\n\tif buckets == 1 {\n\t\tnewExisting := make([]int, existingLength+1)\n\t\tcopy(newExisting, existing)\n\t\tnewExisting[existingLength] = remainder\n\t\toutput = append(output, newExisting)\n\t\treturn output\n\t}\n\n\tfor x := 0; x <= remainder; x++ {\n\t\tnewExisting := make([]int, existingLength+1)\n\t\tcopy(newExisting, existing)\n\t\tnewExisting[existingLength] = x\n\n\t\tresults := c.PermuteDistributionsFromExisting(total, buckets-1, newExisting)\n\t\toutput = append(output, results...)\n\t}\n\n\treturn output\n}", "func accum(x int, s []string, d int) int {\n\top := \"+\"\n\tdepth := 0\n\n\tfor i := 0; i < len(s); i++ {\n\t\tins, s := s[i], s[i+1:]\n\n\t\tif depth > 0 {\n\t\t\tif ins == \"(\" {\n\t\t\t\tdepth++\n\t\t\t}\n\n\t\t\tif ins != \")\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\td, err := toI(ins)\n\t\tif err != nil {\n\t\t\tif ins == \"*\" || ins == \"+\" {\n\t\t\t\top = ins\n\t\t\t} else if ins == \"(\" {\n\t\t\t\tdepth++\n\n\t\t\t\tif op == \"*\" {\n\t\t\t\t\tx *= accum(0, s, depth)\n\t\t\t\t} else if op == \"+\" {\n\t\t\t\t\tx += accum(0, s, depth)\n\t\t\t\t}\n\t\t\t} else if ins == \")\" {\n\t\t\t\tdepth--\n\t\t\t\tif depth < 0 {\n\n\t\t\t\t\treturn x\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\n\t\t\tif x == 0 {\n\t\t\t\tx += d\n\t\t\t} else if op == \"*\" {\n\t\t\t\tx *= d\n\t\t\t} else if op == \"+\" {\n\t\t\t\tx += d\n\t\t\t}\n\t\t}\n\n\t}\n\treturn x\n}", "func productOf(a, b bigCombination) bigCombination {\n\ta.numeratorTerms = append(a.numeratorTerms, b.numeratorTerms...)\n\ta.denominatorTerms = append(a.denominatorTerms, b.denominatorTerms...)\n\treturn a\n}", "func repeated(res ...*regexp.Regexp) *regexp.Regexp {\n\treturn match(group(expression(res...)).String() + `+`)\n}", "func repeated(res ...*regexp.Regexp) *regexp.Regexp {\n\treturn match(group(expression(res...)).String() + `+`)\n}", "func repeated(res ...*regexp.Regexp) *regexp.Regexp {\n\treturn match(group(expression(res...)).String() + `+`)\n}", "func Accumulate(input Words, function func(string) string) (output Words) {\n\tfor _, word := range input {\n\t\toutput = append(output, function(word))\n\t}\n\treturn\n}", "func Accumulate(s []*big.Int) (r *big.Int) {\n\tr = big.NewInt(0)\n\tfor _, e := range s {\n\t\tr.Add(r, e)\n\t}\n\treturn\n}", "func MapSum[T Number](slicesOfItems [][]T) []T {\n\tresult := make([]T, 0, len(slicesOfItems))\n\n\tfor _, items := range slicesOfItems {\n\t\tresult = append(result, Sum(items))\n\t}\n\treturn result\n}", "func sumLists(a *SinglyLinkedList, b *SinglyLinkedList) *SinglyLinkedList {\n\tsumList := SinglyLinkedList{}\n\taCurrent := a.Head\n\tbCurrent := b.Head\n\tcarryOne := false\n\tfor aCurrent != nil {\n\t\tsum := aCurrent.Data + bCurrent.Data\n\t\tif carryOne {\n\t\t\tsum += 1\n\t\t}\n\t\tcarryOne = sum >= 10\n\t\tif carryOne {\n\t\t\tsum -= 10\n\t\t}\n\t\tsumList.Add(sum)\n\t\taCurrent = aCurrent.Next\n\t\tbCurrent = bCurrent.Next\n\t}\n\tif carryOne {\n\t\tsumList.Add(1)\n\t}\n\treturn &sumList\n}", "func Sum(v []float64) float64 {\n\ttotal := 0.0\n\tfor _, number := range v {\n\t\ttotal = total + number\n\t}\n\treturn total\n}", "func (z *Element22) MulAssign(x *Element22) *Element22 {\n\n\tvar t [23]uint64\n\tvar D uint64\n\tvar m, C uint64\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = bits.Mul64(x[0], z[0])\n\tC, t[1] = madd1(x[0], z[1], C)\n\tC, t[2] = madd1(x[0], z[2], C)\n\tC, t[3] = madd1(x[0], z[3], C)\n\tC, t[4] = madd1(x[0], z[4], C)\n\tC, t[5] = madd1(x[0], z[5], C)\n\tC, t[6] = madd1(x[0], z[6], C)\n\tC, t[7] = madd1(x[0], z[7], C)\n\tC, t[8] = madd1(x[0], z[8], C)\n\tC, t[9] = madd1(x[0], z[9], C)\n\tC, t[10] = madd1(x[0], z[10], C)\n\tC, t[11] = madd1(x[0], z[11], C)\n\tC, t[12] = madd1(x[0], z[12], C)\n\tC, t[13] = madd1(x[0], z[13], C)\n\tC, t[14] = madd1(x[0], z[14], C)\n\tC, t[15] = madd1(x[0], z[15], C)\n\tC, t[16] = madd1(x[0], z[16], C)\n\tC, t[17] = madd1(x[0], z[17], C)\n\tC, t[18] = madd1(x[0], z[18], C)\n\tC, t[19] = madd1(x[0], z[19], C)\n\tC, t[20] = madd1(x[0], z[20], C)\n\tC, t[21] = madd1(x[0], z[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[1], z[0], t[0])\n\tC, t[1] = madd2(x[1], z[1], t[1], C)\n\tC, t[2] = madd2(x[1], z[2], t[2], C)\n\tC, t[3] = madd2(x[1], z[3], t[3], C)\n\tC, t[4] = madd2(x[1], z[4], t[4], C)\n\tC, t[5] = madd2(x[1], z[5], t[5], C)\n\tC, t[6] = madd2(x[1], z[6], t[6], C)\n\tC, t[7] = madd2(x[1], z[7], t[7], C)\n\tC, t[8] = madd2(x[1], z[8], t[8], C)\n\tC, t[9] = madd2(x[1], z[9], t[9], C)\n\tC, t[10] = madd2(x[1], z[10], t[10], C)\n\tC, t[11] = madd2(x[1], z[11], t[11], C)\n\tC, t[12] = madd2(x[1], z[12], t[12], C)\n\tC, t[13] = madd2(x[1], z[13], t[13], C)\n\tC, t[14] = madd2(x[1], z[14], t[14], C)\n\tC, t[15] = madd2(x[1], z[15], t[15], C)\n\tC, t[16] = madd2(x[1], z[16], t[16], C)\n\tC, t[17] = madd2(x[1], z[17], t[17], C)\n\tC, t[18] = madd2(x[1], z[18], t[18], C)\n\tC, t[19] = madd2(x[1], z[19], t[19], C)\n\tC, t[20] = madd2(x[1], z[20], t[20], C)\n\tC, t[21] = madd2(x[1], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[2], z[0], t[0])\n\tC, t[1] = madd2(x[2], z[1], t[1], C)\n\tC, t[2] = madd2(x[2], z[2], t[2], C)\n\tC, t[3] = madd2(x[2], z[3], t[3], C)\n\tC, t[4] = madd2(x[2], z[4], t[4], C)\n\tC, t[5] = madd2(x[2], z[5], t[5], C)\n\tC, t[6] = madd2(x[2], z[6], t[6], C)\n\tC, t[7] = madd2(x[2], z[7], t[7], C)\n\tC, t[8] = madd2(x[2], z[8], t[8], C)\n\tC, t[9] = madd2(x[2], z[9], t[9], C)\n\tC, t[10] = madd2(x[2], z[10], t[10], C)\n\tC, t[11] = madd2(x[2], z[11], t[11], C)\n\tC, t[12] = madd2(x[2], z[12], t[12], C)\n\tC, t[13] = madd2(x[2], z[13], t[13], C)\n\tC, t[14] = madd2(x[2], z[14], t[14], C)\n\tC, t[15] = madd2(x[2], z[15], t[15], C)\n\tC, t[16] = madd2(x[2], z[16], t[16], C)\n\tC, t[17] = madd2(x[2], z[17], t[17], C)\n\tC, t[18] = madd2(x[2], z[18], t[18], C)\n\tC, t[19] = madd2(x[2], z[19], t[19], C)\n\tC, t[20] = madd2(x[2], z[20], t[20], C)\n\tC, t[21] = madd2(x[2], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[3], z[0], t[0])\n\tC, t[1] = madd2(x[3], z[1], t[1], C)\n\tC, t[2] = madd2(x[3], z[2], t[2], C)\n\tC, t[3] = madd2(x[3], z[3], t[3], C)\n\tC, t[4] = madd2(x[3], z[4], t[4], C)\n\tC, t[5] = madd2(x[3], z[5], t[5], C)\n\tC, t[6] = madd2(x[3], z[6], t[6], C)\n\tC, t[7] = madd2(x[3], z[7], t[7], C)\n\tC, t[8] = madd2(x[3], z[8], t[8], C)\n\tC, t[9] = madd2(x[3], z[9], t[9], C)\n\tC, t[10] = madd2(x[3], z[10], t[10], C)\n\tC, t[11] = madd2(x[3], z[11], t[11], C)\n\tC, t[12] = madd2(x[3], z[12], t[12], C)\n\tC, t[13] = madd2(x[3], z[13], t[13], C)\n\tC, t[14] = madd2(x[3], z[14], t[14], C)\n\tC, t[15] = madd2(x[3], z[15], t[15], C)\n\tC, t[16] = madd2(x[3], z[16], t[16], C)\n\tC, t[17] = madd2(x[3], z[17], t[17], C)\n\tC, t[18] = madd2(x[3], z[18], t[18], C)\n\tC, t[19] = madd2(x[3], z[19], t[19], C)\n\tC, t[20] = madd2(x[3], z[20], t[20], C)\n\tC, t[21] = madd2(x[3], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[4], z[0], t[0])\n\tC, t[1] = madd2(x[4], z[1], t[1], C)\n\tC, t[2] = madd2(x[4], z[2], t[2], C)\n\tC, t[3] = madd2(x[4], z[3], t[3], C)\n\tC, t[4] = madd2(x[4], z[4], t[4], C)\n\tC, t[5] = madd2(x[4], z[5], t[5], C)\n\tC, t[6] = madd2(x[4], z[6], t[6], C)\n\tC, t[7] = madd2(x[4], z[7], t[7], C)\n\tC, t[8] = madd2(x[4], z[8], t[8], C)\n\tC, t[9] = madd2(x[4], z[9], t[9], C)\n\tC, t[10] = madd2(x[4], z[10], t[10], C)\n\tC, t[11] = madd2(x[4], z[11], t[11], C)\n\tC, t[12] = madd2(x[4], z[12], t[12], C)\n\tC, t[13] = madd2(x[4], z[13], t[13], C)\n\tC, t[14] = madd2(x[4], z[14], t[14], C)\n\tC, t[15] = madd2(x[4], z[15], t[15], C)\n\tC, t[16] = madd2(x[4], z[16], t[16], C)\n\tC, t[17] = madd2(x[4], z[17], t[17], C)\n\tC, t[18] = madd2(x[4], z[18], t[18], C)\n\tC, t[19] = madd2(x[4], z[19], t[19], C)\n\tC, t[20] = madd2(x[4], z[20], t[20], C)\n\tC, t[21] = madd2(x[4], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[5], z[0], t[0])\n\tC, t[1] = madd2(x[5], z[1], t[1], C)\n\tC, t[2] = madd2(x[5], z[2], t[2], C)\n\tC, t[3] = madd2(x[5], z[3], t[3], C)\n\tC, t[4] = madd2(x[5], z[4], t[4], C)\n\tC, t[5] = madd2(x[5], z[5], t[5], C)\n\tC, t[6] = madd2(x[5], z[6], t[6], C)\n\tC, t[7] = madd2(x[5], z[7], t[7], C)\n\tC, t[8] = madd2(x[5], z[8], t[8], C)\n\tC, t[9] = madd2(x[5], z[9], t[9], C)\n\tC, t[10] = madd2(x[5], z[10], t[10], C)\n\tC, t[11] = madd2(x[5], z[11], t[11], C)\n\tC, t[12] = madd2(x[5], z[12], t[12], C)\n\tC, t[13] = madd2(x[5], z[13], t[13], C)\n\tC, t[14] = madd2(x[5], z[14], t[14], C)\n\tC, t[15] = madd2(x[5], z[15], t[15], C)\n\tC, t[16] = madd2(x[5], z[16], t[16], C)\n\tC, t[17] = madd2(x[5], z[17], t[17], C)\n\tC, t[18] = madd2(x[5], z[18], t[18], C)\n\tC, t[19] = madd2(x[5], z[19], t[19], C)\n\tC, t[20] = madd2(x[5], z[20], t[20], C)\n\tC, t[21] = madd2(x[5], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[6], z[0], t[0])\n\tC, t[1] = madd2(x[6], z[1], t[1], C)\n\tC, t[2] = madd2(x[6], z[2], t[2], C)\n\tC, t[3] = madd2(x[6], z[3], t[3], C)\n\tC, t[4] = madd2(x[6], z[4], t[4], C)\n\tC, t[5] = madd2(x[6], z[5], t[5], C)\n\tC, t[6] = madd2(x[6], z[6], t[6], C)\n\tC, t[7] = madd2(x[6], z[7], t[7], C)\n\tC, t[8] = madd2(x[6], z[8], t[8], C)\n\tC, t[9] = madd2(x[6], z[9], t[9], C)\n\tC, t[10] = madd2(x[6], z[10], t[10], C)\n\tC, t[11] = madd2(x[6], z[11], t[11], C)\n\tC, t[12] = madd2(x[6], z[12], t[12], C)\n\tC, t[13] = madd2(x[6], z[13], t[13], C)\n\tC, t[14] = madd2(x[6], z[14], t[14], C)\n\tC, t[15] = madd2(x[6], z[15], t[15], C)\n\tC, t[16] = madd2(x[6], z[16], t[16], C)\n\tC, t[17] = madd2(x[6], z[17], t[17], C)\n\tC, t[18] = madd2(x[6], z[18], t[18], C)\n\tC, t[19] = madd2(x[6], z[19], t[19], C)\n\tC, t[20] = madd2(x[6], z[20], t[20], C)\n\tC, t[21] = madd2(x[6], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[7], z[0], t[0])\n\tC, t[1] = madd2(x[7], z[1], t[1], C)\n\tC, t[2] = madd2(x[7], z[2], t[2], C)\n\tC, t[3] = madd2(x[7], z[3], t[3], C)\n\tC, t[4] = madd2(x[7], z[4], t[4], C)\n\tC, t[5] = madd2(x[7], z[5], t[5], C)\n\tC, t[6] = madd2(x[7], z[6], t[6], C)\n\tC, t[7] = madd2(x[7], z[7], t[7], C)\n\tC, t[8] = madd2(x[7], z[8], t[8], C)\n\tC, t[9] = madd2(x[7], z[9], t[9], C)\n\tC, t[10] = madd2(x[7], z[10], t[10], C)\n\tC, t[11] = madd2(x[7], z[11], t[11], C)\n\tC, t[12] = madd2(x[7], z[12], t[12], C)\n\tC, t[13] = madd2(x[7], z[13], t[13], C)\n\tC, t[14] = madd2(x[7], z[14], t[14], C)\n\tC, t[15] = madd2(x[7], z[15], t[15], C)\n\tC, t[16] = madd2(x[7], z[16], t[16], C)\n\tC, t[17] = madd2(x[7], z[17], t[17], C)\n\tC, t[18] = madd2(x[7], z[18], t[18], C)\n\tC, t[19] = madd2(x[7], z[19], t[19], C)\n\tC, t[20] = madd2(x[7], z[20], t[20], C)\n\tC, t[21] = madd2(x[7], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[8], z[0], t[0])\n\tC, t[1] = madd2(x[8], z[1], t[1], C)\n\tC, t[2] = madd2(x[8], z[2], t[2], C)\n\tC, t[3] = madd2(x[8], z[3], t[3], C)\n\tC, t[4] = madd2(x[8], z[4], t[4], C)\n\tC, t[5] = madd2(x[8], z[5], t[5], C)\n\tC, t[6] = madd2(x[8], z[6], t[6], C)\n\tC, t[7] = madd2(x[8], z[7], t[7], C)\n\tC, t[8] = madd2(x[8], z[8], t[8], C)\n\tC, t[9] = madd2(x[8], z[9], t[9], C)\n\tC, t[10] = madd2(x[8], z[10], t[10], C)\n\tC, t[11] = madd2(x[8], z[11], t[11], C)\n\tC, t[12] = madd2(x[8], z[12], t[12], C)\n\tC, t[13] = madd2(x[8], z[13], t[13], C)\n\tC, t[14] = madd2(x[8], z[14], t[14], C)\n\tC, t[15] = madd2(x[8], z[15], t[15], C)\n\tC, t[16] = madd2(x[8], z[16], t[16], C)\n\tC, t[17] = madd2(x[8], z[17], t[17], C)\n\tC, t[18] = madd2(x[8], z[18], t[18], C)\n\tC, t[19] = madd2(x[8], z[19], t[19], C)\n\tC, t[20] = madd2(x[8], z[20], t[20], C)\n\tC, t[21] = madd2(x[8], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[9], z[0], t[0])\n\tC, t[1] = madd2(x[9], z[1], t[1], C)\n\tC, t[2] = madd2(x[9], z[2], t[2], C)\n\tC, t[3] = madd2(x[9], z[3], t[3], C)\n\tC, t[4] = madd2(x[9], z[4], t[4], C)\n\tC, t[5] = madd2(x[9], z[5], t[5], C)\n\tC, t[6] = madd2(x[9], z[6], t[6], C)\n\tC, t[7] = madd2(x[9], z[7], t[7], C)\n\tC, t[8] = madd2(x[9], z[8], t[8], C)\n\tC, t[9] = madd2(x[9], z[9], t[9], C)\n\tC, t[10] = madd2(x[9], z[10], t[10], C)\n\tC, t[11] = madd2(x[9], z[11], t[11], C)\n\tC, t[12] = madd2(x[9], z[12], t[12], C)\n\tC, t[13] = madd2(x[9], z[13], t[13], C)\n\tC, t[14] = madd2(x[9], z[14], t[14], C)\n\tC, t[15] = madd2(x[9], z[15], t[15], C)\n\tC, t[16] = madd2(x[9], z[16], t[16], C)\n\tC, t[17] = madd2(x[9], z[17], t[17], C)\n\tC, t[18] = madd2(x[9], z[18], t[18], C)\n\tC, t[19] = madd2(x[9], z[19], t[19], C)\n\tC, t[20] = madd2(x[9], z[20], t[20], C)\n\tC, t[21] = madd2(x[9], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[10], z[0], t[0])\n\tC, t[1] = madd2(x[10], z[1], t[1], C)\n\tC, t[2] = madd2(x[10], z[2], t[2], C)\n\tC, t[3] = madd2(x[10], z[3], t[3], C)\n\tC, t[4] = madd2(x[10], z[4], t[4], C)\n\tC, t[5] = madd2(x[10], z[5], t[5], C)\n\tC, t[6] = madd2(x[10], z[6], t[6], C)\n\tC, t[7] = madd2(x[10], z[7], t[7], C)\n\tC, t[8] = madd2(x[10], z[8], t[8], C)\n\tC, t[9] = madd2(x[10], z[9], t[9], C)\n\tC, t[10] = madd2(x[10], z[10], t[10], C)\n\tC, t[11] = madd2(x[10], z[11], t[11], C)\n\tC, t[12] = madd2(x[10], z[12], t[12], C)\n\tC, t[13] = madd2(x[10], z[13], t[13], C)\n\tC, t[14] = madd2(x[10], z[14], t[14], C)\n\tC, t[15] = madd2(x[10], z[15], t[15], C)\n\tC, t[16] = madd2(x[10], z[16], t[16], C)\n\tC, t[17] = madd2(x[10], z[17], t[17], C)\n\tC, t[18] = madd2(x[10], z[18], t[18], C)\n\tC, t[19] = madd2(x[10], z[19], t[19], C)\n\tC, t[20] = madd2(x[10], z[20], t[20], C)\n\tC, t[21] = madd2(x[10], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[11], z[0], t[0])\n\tC, t[1] = madd2(x[11], z[1], t[1], C)\n\tC, t[2] = madd2(x[11], z[2], t[2], C)\n\tC, t[3] = madd2(x[11], z[3], t[3], C)\n\tC, t[4] = madd2(x[11], z[4], t[4], C)\n\tC, t[5] = madd2(x[11], z[5], t[5], C)\n\tC, t[6] = madd2(x[11], z[6], t[6], C)\n\tC, t[7] = madd2(x[11], z[7], t[7], C)\n\tC, t[8] = madd2(x[11], z[8], t[8], C)\n\tC, t[9] = madd2(x[11], z[9], t[9], C)\n\tC, t[10] = madd2(x[11], z[10], t[10], C)\n\tC, t[11] = madd2(x[11], z[11], t[11], C)\n\tC, t[12] = madd2(x[11], z[12], t[12], C)\n\tC, t[13] = madd2(x[11], z[13], t[13], C)\n\tC, t[14] = madd2(x[11], z[14], t[14], C)\n\tC, t[15] = madd2(x[11], z[15], t[15], C)\n\tC, t[16] = madd2(x[11], z[16], t[16], C)\n\tC, t[17] = madd2(x[11], z[17], t[17], C)\n\tC, t[18] = madd2(x[11], z[18], t[18], C)\n\tC, t[19] = madd2(x[11], z[19], t[19], C)\n\tC, t[20] = madd2(x[11], z[20], t[20], C)\n\tC, t[21] = madd2(x[11], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[12], z[0], t[0])\n\tC, t[1] = madd2(x[12], z[1], t[1], C)\n\tC, t[2] = madd2(x[12], z[2], t[2], C)\n\tC, t[3] = madd2(x[12], z[3], t[3], C)\n\tC, t[4] = madd2(x[12], z[4], t[4], C)\n\tC, t[5] = madd2(x[12], z[5], t[5], C)\n\tC, t[6] = madd2(x[12], z[6], t[6], C)\n\tC, t[7] = madd2(x[12], z[7], t[7], C)\n\tC, t[8] = madd2(x[12], z[8], t[8], C)\n\tC, t[9] = madd2(x[12], z[9], t[9], C)\n\tC, t[10] = madd2(x[12], z[10], t[10], C)\n\tC, t[11] = madd2(x[12], z[11], t[11], C)\n\tC, t[12] = madd2(x[12], z[12], t[12], C)\n\tC, t[13] = madd2(x[12], z[13], t[13], C)\n\tC, t[14] = madd2(x[12], z[14], t[14], C)\n\tC, t[15] = madd2(x[12], z[15], t[15], C)\n\tC, t[16] = madd2(x[12], z[16], t[16], C)\n\tC, t[17] = madd2(x[12], z[17], t[17], C)\n\tC, t[18] = madd2(x[12], z[18], t[18], C)\n\tC, t[19] = madd2(x[12], z[19], t[19], C)\n\tC, t[20] = madd2(x[12], z[20], t[20], C)\n\tC, t[21] = madd2(x[12], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[13], z[0], t[0])\n\tC, t[1] = madd2(x[13], z[1], t[1], C)\n\tC, t[2] = madd2(x[13], z[2], t[2], C)\n\tC, t[3] = madd2(x[13], z[3], t[3], C)\n\tC, t[4] = madd2(x[13], z[4], t[4], C)\n\tC, t[5] = madd2(x[13], z[5], t[5], C)\n\tC, t[6] = madd2(x[13], z[6], t[6], C)\n\tC, t[7] = madd2(x[13], z[7], t[7], C)\n\tC, t[8] = madd2(x[13], z[8], t[8], C)\n\tC, t[9] = madd2(x[13], z[9], t[9], C)\n\tC, t[10] = madd2(x[13], z[10], t[10], C)\n\tC, t[11] = madd2(x[13], z[11], t[11], C)\n\tC, t[12] = madd2(x[13], z[12], t[12], C)\n\tC, t[13] = madd2(x[13], z[13], t[13], C)\n\tC, t[14] = madd2(x[13], z[14], t[14], C)\n\tC, t[15] = madd2(x[13], z[15], t[15], C)\n\tC, t[16] = madd2(x[13], z[16], t[16], C)\n\tC, t[17] = madd2(x[13], z[17], t[17], C)\n\tC, t[18] = madd2(x[13], z[18], t[18], C)\n\tC, t[19] = madd2(x[13], z[19], t[19], C)\n\tC, t[20] = madd2(x[13], z[20], t[20], C)\n\tC, t[21] = madd2(x[13], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[14], z[0], t[0])\n\tC, t[1] = madd2(x[14], z[1], t[1], C)\n\tC, t[2] = madd2(x[14], z[2], t[2], C)\n\tC, t[3] = madd2(x[14], z[3], t[3], C)\n\tC, t[4] = madd2(x[14], z[4], t[4], C)\n\tC, t[5] = madd2(x[14], z[5], t[5], C)\n\tC, t[6] = madd2(x[14], z[6], t[6], C)\n\tC, t[7] = madd2(x[14], z[7], t[7], C)\n\tC, t[8] = madd2(x[14], z[8], t[8], C)\n\tC, t[9] = madd2(x[14], z[9], t[9], C)\n\tC, t[10] = madd2(x[14], z[10], t[10], C)\n\tC, t[11] = madd2(x[14], z[11], t[11], C)\n\tC, t[12] = madd2(x[14], z[12], t[12], C)\n\tC, t[13] = madd2(x[14], z[13], t[13], C)\n\tC, t[14] = madd2(x[14], z[14], t[14], C)\n\tC, t[15] = madd2(x[14], z[15], t[15], C)\n\tC, t[16] = madd2(x[14], z[16], t[16], C)\n\tC, t[17] = madd2(x[14], z[17], t[17], C)\n\tC, t[18] = madd2(x[14], z[18], t[18], C)\n\tC, t[19] = madd2(x[14], z[19], t[19], C)\n\tC, t[20] = madd2(x[14], z[20], t[20], C)\n\tC, t[21] = madd2(x[14], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[15], z[0], t[0])\n\tC, t[1] = madd2(x[15], z[1], t[1], C)\n\tC, t[2] = madd2(x[15], z[2], t[2], C)\n\tC, t[3] = madd2(x[15], z[3], t[3], C)\n\tC, t[4] = madd2(x[15], z[4], t[4], C)\n\tC, t[5] = madd2(x[15], z[5], t[5], C)\n\tC, t[6] = madd2(x[15], z[6], t[6], C)\n\tC, t[7] = madd2(x[15], z[7], t[7], C)\n\tC, t[8] = madd2(x[15], z[8], t[8], C)\n\tC, t[9] = madd2(x[15], z[9], t[9], C)\n\tC, t[10] = madd2(x[15], z[10], t[10], C)\n\tC, t[11] = madd2(x[15], z[11], t[11], C)\n\tC, t[12] = madd2(x[15], z[12], t[12], C)\n\tC, t[13] = madd2(x[15], z[13], t[13], C)\n\tC, t[14] = madd2(x[15], z[14], t[14], C)\n\tC, t[15] = madd2(x[15], z[15], t[15], C)\n\tC, t[16] = madd2(x[15], z[16], t[16], C)\n\tC, t[17] = madd2(x[15], z[17], t[17], C)\n\tC, t[18] = madd2(x[15], z[18], t[18], C)\n\tC, t[19] = madd2(x[15], z[19], t[19], C)\n\tC, t[20] = madd2(x[15], z[20], t[20], C)\n\tC, t[21] = madd2(x[15], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[16], z[0], t[0])\n\tC, t[1] = madd2(x[16], z[1], t[1], C)\n\tC, t[2] = madd2(x[16], z[2], t[2], C)\n\tC, t[3] = madd2(x[16], z[3], t[3], C)\n\tC, t[4] = madd2(x[16], z[4], t[4], C)\n\tC, t[5] = madd2(x[16], z[5], t[5], C)\n\tC, t[6] = madd2(x[16], z[6], t[6], C)\n\tC, t[7] = madd2(x[16], z[7], t[7], C)\n\tC, t[8] = madd2(x[16], z[8], t[8], C)\n\tC, t[9] = madd2(x[16], z[9], t[9], C)\n\tC, t[10] = madd2(x[16], z[10], t[10], C)\n\tC, t[11] = madd2(x[16], z[11], t[11], C)\n\tC, t[12] = madd2(x[16], z[12], t[12], C)\n\tC, t[13] = madd2(x[16], z[13], t[13], C)\n\tC, t[14] = madd2(x[16], z[14], t[14], C)\n\tC, t[15] = madd2(x[16], z[15], t[15], C)\n\tC, t[16] = madd2(x[16], z[16], t[16], C)\n\tC, t[17] = madd2(x[16], z[17], t[17], C)\n\tC, t[18] = madd2(x[16], z[18], t[18], C)\n\tC, t[19] = madd2(x[16], z[19], t[19], C)\n\tC, t[20] = madd2(x[16], z[20], t[20], C)\n\tC, t[21] = madd2(x[16], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[17], z[0], t[0])\n\tC, t[1] = madd2(x[17], z[1], t[1], C)\n\tC, t[2] = madd2(x[17], z[2], t[2], C)\n\tC, t[3] = madd2(x[17], z[3], t[3], C)\n\tC, t[4] = madd2(x[17], z[4], t[4], C)\n\tC, t[5] = madd2(x[17], z[5], t[5], C)\n\tC, t[6] = madd2(x[17], z[6], t[6], C)\n\tC, t[7] = madd2(x[17], z[7], t[7], C)\n\tC, t[8] = madd2(x[17], z[8], t[8], C)\n\tC, t[9] = madd2(x[17], z[9], t[9], C)\n\tC, t[10] = madd2(x[17], z[10], t[10], C)\n\tC, t[11] = madd2(x[17], z[11], t[11], C)\n\tC, t[12] = madd2(x[17], z[12], t[12], C)\n\tC, t[13] = madd2(x[17], z[13], t[13], C)\n\tC, t[14] = madd2(x[17], z[14], t[14], C)\n\tC, t[15] = madd2(x[17], z[15], t[15], C)\n\tC, t[16] = madd2(x[17], z[16], t[16], C)\n\tC, t[17] = madd2(x[17], z[17], t[17], C)\n\tC, t[18] = madd2(x[17], z[18], t[18], C)\n\tC, t[19] = madd2(x[17], z[19], t[19], C)\n\tC, t[20] = madd2(x[17], z[20], t[20], C)\n\tC, t[21] = madd2(x[17], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[18], z[0], t[0])\n\tC, t[1] = madd2(x[18], z[1], t[1], C)\n\tC, t[2] = madd2(x[18], z[2], t[2], C)\n\tC, t[3] = madd2(x[18], z[3], t[3], C)\n\tC, t[4] = madd2(x[18], z[4], t[4], C)\n\tC, t[5] = madd2(x[18], z[5], t[5], C)\n\tC, t[6] = madd2(x[18], z[6], t[6], C)\n\tC, t[7] = madd2(x[18], z[7], t[7], C)\n\tC, t[8] = madd2(x[18], z[8], t[8], C)\n\tC, t[9] = madd2(x[18], z[9], t[9], C)\n\tC, t[10] = madd2(x[18], z[10], t[10], C)\n\tC, t[11] = madd2(x[18], z[11], t[11], C)\n\tC, t[12] = madd2(x[18], z[12], t[12], C)\n\tC, t[13] = madd2(x[18], z[13], t[13], C)\n\tC, t[14] = madd2(x[18], z[14], t[14], C)\n\tC, t[15] = madd2(x[18], z[15], t[15], C)\n\tC, t[16] = madd2(x[18], z[16], t[16], C)\n\tC, t[17] = madd2(x[18], z[17], t[17], C)\n\tC, t[18] = madd2(x[18], z[18], t[18], C)\n\tC, t[19] = madd2(x[18], z[19], t[19], C)\n\tC, t[20] = madd2(x[18], z[20], t[20], C)\n\tC, t[21] = madd2(x[18], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[19], z[0], t[0])\n\tC, t[1] = madd2(x[19], z[1], t[1], C)\n\tC, t[2] = madd2(x[19], z[2], t[2], C)\n\tC, t[3] = madd2(x[19], z[3], t[3], C)\n\tC, t[4] = madd2(x[19], z[4], t[4], C)\n\tC, t[5] = madd2(x[19], z[5], t[5], C)\n\tC, t[6] = madd2(x[19], z[6], t[6], C)\n\tC, t[7] = madd2(x[19], z[7], t[7], C)\n\tC, t[8] = madd2(x[19], z[8], t[8], C)\n\tC, t[9] = madd2(x[19], z[9], t[9], C)\n\tC, t[10] = madd2(x[19], z[10], t[10], C)\n\tC, t[11] = madd2(x[19], z[11], t[11], C)\n\tC, t[12] = madd2(x[19], z[12], t[12], C)\n\tC, t[13] = madd2(x[19], z[13], t[13], C)\n\tC, t[14] = madd2(x[19], z[14], t[14], C)\n\tC, t[15] = madd2(x[19], z[15], t[15], C)\n\tC, t[16] = madd2(x[19], z[16], t[16], C)\n\tC, t[17] = madd2(x[19], z[17], t[17], C)\n\tC, t[18] = madd2(x[19], z[18], t[18], C)\n\tC, t[19] = madd2(x[19], z[19], t[19], C)\n\tC, t[20] = madd2(x[19], z[20], t[20], C)\n\tC, t[21] = madd2(x[19], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[20], z[0], t[0])\n\tC, t[1] = madd2(x[20], z[1], t[1], C)\n\tC, t[2] = madd2(x[20], z[2], t[2], C)\n\tC, t[3] = madd2(x[20], z[3], t[3], C)\n\tC, t[4] = madd2(x[20], z[4], t[4], C)\n\tC, t[5] = madd2(x[20], z[5], t[5], C)\n\tC, t[6] = madd2(x[20], z[6], t[6], C)\n\tC, t[7] = madd2(x[20], z[7], t[7], C)\n\tC, t[8] = madd2(x[20], z[8], t[8], C)\n\tC, t[9] = madd2(x[20], z[9], t[9], C)\n\tC, t[10] = madd2(x[20], z[10], t[10], C)\n\tC, t[11] = madd2(x[20], z[11], t[11], C)\n\tC, t[12] = madd2(x[20], z[12], t[12], C)\n\tC, t[13] = madd2(x[20], z[13], t[13], C)\n\tC, t[14] = madd2(x[20], z[14], t[14], C)\n\tC, t[15] = madd2(x[20], z[15], t[15], C)\n\tC, t[16] = madd2(x[20], z[16], t[16], C)\n\tC, t[17] = madd2(x[20], z[17], t[17], C)\n\tC, t[18] = madd2(x[20], z[18], t[18], C)\n\tC, t[19] = madd2(x[20], z[19], t[19], C)\n\tC, t[20] = madd2(x[20], z[20], t[20], C)\n\tC, t[21] = madd2(x[20], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(x[21], z[0], t[0])\n\tC, t[1] = madd2(x[21], z[1], t[1], C)\n\tC, t[2] = madd2(x[21], z[2], t[2], C)\n\tC, t[3] = madd2(x[21], z[3], t[3], C)\n\tC, t[4] = madd2(x[21], z[4], t[4], C)\n\tC, t[5] = madd2(x[21], z[5], t[5], C)\n\tC, t[6] = madd2(x[21], z[6], t[6], C)\n\tC, t[7] = madd2(x[21], z[7], t[7], C)\n\tC, t[8] = madd2(x[21], z[8], t[8], C)\n\tC, t[9] = madd2(x[21], z[9], t[9], C)\n\tC, t[10] = madd2(x[21], z[10], t[10], C)\n\tC, t[11] = madd2(x[21], z[11], t[11], C)\n\tC, t[12] = madd2(x[21], z[12], t[12], C)\n\tC, t[13] = madd2(x[21], z[13], t[13], C)\n\tC, t[14] = madd2(x[21], z[14], t[14], C)\n\tC, t[15] = madd2(x[21], z[15], t[15], C)\n\tC, t[16] = madd2(x[21], z[16], t[16], C)\n\tC, t[17] = madd2(x[21], z[17], t[17], C)\n\tC, t[18] = madd2(x[21], z[18], t[18], C)\n\tC, t[19] = madd2(x[21], z[19], t[19], C)\n\tC, t[20] = madd2(x[21], z[20], t[20], C)\n\tC, t[21] = madd2(x[21], z[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\n\tif t[22] != 0 {\n\t\t// we need to reduce, we have a result on 23 words\n\t\tvar b uint64\n\t\tz[0], b = bits.Sub64(t[0], 9062599614324828209, 0)\n\t\tz[1], b = bits.Sub64(t[1], 952425709649632109, b)\n\t\tz[2], b = bits.Sub64(t[2], 13987751354083916656, b)\n\t\tz[3], b = bits.Sub64(t[3], 9476693002504986527, b)\n\t\tz[4], b = bits.Sub64(t[4], 17899356805776864267, b)\n\t\tz[5], b = bits.Sub64(t[5], 2607080593922027197, b)\n\t\tz[6], b = bits.Sub64(t[6], 6852504016717314360, b)\n\t\tz[7], b = bits.Sub64(t[7], 366248478184989226, b)\n\t\tz[8], b = bits.Sub64(t[8], 2672987780203805083, b)\n\t\tz[9], b = bits.Sub64(t[9], 14115032483094903896, b)\n\t\tz[10], b = bits.Sub64(t[10], 8062699450825609015, b)\n\t\tz[11], b = bits.Sub64(t[11], 8413249848292746549, b)\n\t\tz[12], b = bits.Sub64(t[12], 11172154229712803058, b)\n\t\tz[13], b = bits.Sub64(t[13], 18137346262305431037, b)\n\t\tz[14], b = bits.Sub64(t[14], 123227702747754650, b)\n\t\tz[15], b = bits.Sub64(t[15], 7409464670784690235, b)\n\t\tz[16], b = bits.Sub64(t[16], 243347369443125979, b)\n\t\tz[17], b = bits.Sub64(t[17], 200317109320159479, b)\n\t\tz[18], b = bits.Sub64(t[18], 17492726232193822651, b)\n\t\tz[19], b = bits.Sub64(t[19], 17666595880400198649, b)\n\t\tz[20], b = bits.Sub64(t[20], 1619463007483089584, b)\n\t\tz[21], _ = bits.Sub64(t[21], 7910025299994333900, b)\n\t\treturn z\n\t}\n\n\t// copy t into z\n\tz[0] = t[0]\n\tz[1] = t[1]\n\tz[2] = t[2]\n\tz[3] = t[3]\n\tz[4] = t[4]\n\tz[5] = t[5]\n\tz[6] = t[6]\n\tz[7] = t[7]\n\tz[8] = t[8]\n\tz[9] = t[9]\n\tz[10] = t[10]\n\tz[11] = t[11]\n\tz[12] = t[12]\n\tz[13] = t[13]\n\tz[14] = t[14]\n\tz[15] = t[15]\n\tz[16] = t[16]\n\tz[17] = t[17]\n\tz[18] = t[18]\n\tz[19] = t[19]\n\tz[20] = t[20]\n\tz[21] = t[21]\n\n\t// if z > q --> z -= q\n\tif !(z[21] < 7910025299994333900 || (z[21] == 7910025299994333900 && (z[20] < 1619463007483089584 || (z[20] == 1619463007483089584 && (z[19] < 17666595880400198649 || (z[19] == 17666595880400198649 && (z[18] < 17492726232193822651 || (z[18] == 17492726232193822651 && (z[17] < 200317109320159479 || (z[17] == 200317109320159479 && (z[16] < 243347369443125979 || (z[16] == 243347369443125979 && (z[15] < 7409464670784690235 || (z[15] == 7409464670784690235 && (z[14] < 123227702747754650 || (z[14] == 123227702747754650 && (z[13] < 18137346262305431037 || (z[13] == 18137346262305431037 && (z[12] < 11172154229712803058 || (z[12] == 11172154229712803058 && (z[11] < 8413249848292746549 || (z[11] == 8413249848292746549 && (z[10] < 8062699450825609015 || (z[10] == 8062699450825609015 && (z[9] < 14115032483094903896 || (z[9] == 14115032483094903896 && (z[8] < 2672987780203805083 || (z[8] == 2672987780203805083 && (z[7] < 366248478184989226 || (z[7] == 366248478184989226 && (z[6] < 6852504016717314360 || (z[6] == 6852504016717314360 && (z[5] < 2607080593922027197 || (z[5] == 2607080593922027197 && (z[4] < 17899356805776864267 || (z[4] == 17899356805776864267 && (z[3] < 9476693002504986527 || (z[3] == 9476693002504986527 && (z[2] < 13987751354083916656 || (z[2] == 13987751354083916656 && (z[1] < 952425709649632109 || (z[1] == 952425709649632109 && (z[0] < 9062599614324828209))))))))))))))))))))))))))))))))))))))))))) {\n\t\tvar b uint64\n\t\tz[0], b = bits.Sub64(z[0], 9062599614324828209, 0)\n\t\tz[1], b = bits.Sub64(z[1], 952425709649632109, b)\n\t\tz[2], b = bits.Sub64(z[2], 13987751354083916656, b)\n\t\tz[3], b = bits.Sub64(z[3], 9476693002504986527, b)\n\t\tz[4], b = bits.Sub64(z[4], 17899356805776864267, b)\n\t\tz[5], b = bits.Sub64(z[5], 2607080593922027197, b)\n\t\tz[6], b = bits.Sub64(z[6], 6852504016717314360, b)\n\t\tz[7], b = bits.Sub64(z[7], 366248478184989226, b)\n\t\tz[8], b = bits.Sub64(z[8], 2672987780203805083, b)\n\t\tz[9], b = bits.Sub64(z[9], 14115032483094903896, b)\n\t\tz[10], b = bits.Sub64(z[10], 8062699450825609015, b)\n\t\tz[11], b = bits.Sub64(z[11], 8413249848292746549, b)\n\t\tz[12], b = bits.Sub64(z[12], 11172154229712803058, b)\n\t\tz[13], b = bits.Sub64(z[13], 18137346262305431037, b)\n\t\tz[14], b = bits.Sub64(z[14], 123227702747754650, b)\n\t\tz[15], b = bits.Sub64(z[15], 7409464670784690235, b)\n\t\tz[16], b = bits.Sub64(z[16], 243347369443125979, b)\n\t\tz[17], b = bits.Sub64(z[17], 200317109320159479, b)\n\t\tz[18], b = bits.Sub64(z[18], 17492726232193822651, b)\n\t\tz[19], b = bits.Sub64(z[19], 17666595880400198649, b)\n\t\tz[20], b = bits.Sub64(z[20], 1619463007483089584, b)\n\t\tz[21], _ = bits.Sub64(z[21], 7910025299994333900, b)\n\t}\n\treturn z\n}", "func MulA24(z, x *Elt)", "func prefixSums(A []int) []int {\n result := make([]int, len(A)+1)\n result[0] = 0\n for i := 1; i <= len(A); i++ {\n result[i] = result[i-1] + A[i-1]\n }\n return result\n}", "func cmulSlice(out, a []float64, c float64)", "func ArrayOfProducts(array []int) []int {\n\tresult := 1\n\tresultArr := []int{}\n\tzeroCounter := 0\n\t// For multiplies every number by the next number\n\t// and keeps in a variable\n\tfor i := 0; i < len(array); i++ {\n\t\tswitch {\n\t\tcase zeroCounter > 1:\n\t\t\t// If we find more than 1 zero we stop iterating\n\t\t\t// because everything will be a 0\n\t\t\tbreak\n\t\tcase array[i] == 0:\n\t\t\t// keeps track of how many zeros we have\n\t\t\tzeroCounter++\n\t\tdefault:\n\t\t\tresult = result * array[i]\n\t\t}\n\t}\n\t// For creates our resultArray and fills it up\n\tfor i := 0; i < len(array); i++ {\n\t\tswitch {\n\t\tcase zeroCounter > 1:\n\t\t\t// If zeros > 1 the whole array is filled with 0s\n\t\t\tresultArr = append(resultArr, 0)\n\t\tcase array[i] == 0:\n\t\t\t// on the spot where there's a zero in our\n\t\t\t// input array we put the multiplication of all the numbers\n\t\t\t// except the zero\n\t\t\tresultArr = append(resultArr, result)\n\t\tcase zeroCounter == 1:\n\t\t\t// If there is a zero in our input array\n\t\t\t// make every spot in our output array = 0\n\t\t\tresultArr = append(resultArr, 0)\n\t\tdefault:\n\t\t\tresultArr = append(resultArr, result/array[i])\n\t\t}\n\t}\n\treturn resultArr\n}", "func (z *Element22) Mul(x, y *Element22) *Element22 {\n\n\tvar t [23]uint64\n\tvar D uint64\n\tvar m, C uint64\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = bits.Mul64(y[0], x[0])\n\tC, t[1] = madd1(y[0], x[1], C)\n\tC, t[2] = madd1(y[0], x[2], C)\n\tC, t[3] = madd1(y[0], x[3], C)\n\tC, t[4] = madd1(y[0], x[4], C)\n\tC, t[5] = madd1(y[0], x[5], C)\n\tC, t[6] = madd1(y[0], x[6], C)\n\tC, t[7] = madd1(y[0], x[7], C)\n\tC, t[8] = madd1(y[0], x[8], C)\n\tC, t[9] = madd1(y[0], x[9], C)\n\tC, t[10] = madd1(y[0], x[10], C)\n\tC, t[11] = madd1(y[0], x[11], C)\n\tC, t[12] = madd1(y[0], x[12], C)\n\tC, t[13] = madd1(y[0], x[13], C)\n\tC, t[14] = madd1(y[0], x[14], C)\n\tC, t[15] = madd1(y[0], x[15], C)\n\tC, t[16] = madd1(y[0], x[16], C)\n\tC, t[17] = madd1(y[0], x[17], C)\n\tC, t[18] = madd1(y[0], x[18], C)\n\tC, t[19] = madd1(y[0], x[19], C)\n\tC, t[20] = madd1(y[0], x[20], C)\n\tC, t[21] = madd1(y[0], x[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[1], x[0], t[0])\n\tC, t[1] = madd2(y[1], x[1], t[1], C)\n\tC, t[2] = madd2(y[1], x[2], t[2], C)\n\tC, t[3] = madd2(y[1], x[3], t[3], C)\n\tC, t[4] = madd2(y[1], x[4], t[4], C)\n\tC, t[5] = madd2(y[1], x[5], t[5], C)\n\tC, t[6] = madd2(y[1], x[6], t[6], C)\n\tC, t[7] = madd2(y[1], x[7], t[7], C)\n\tC, t[8] = madd2(y[1], x[8], t[8], C)\n\tC, t[9] = madd2(y[1], x[9], t[9], C)\n\tC, t[10] = madd2(y[1], x[10], t[10], C)\n\tC, t[11] = madd2(y[1], x[11], t[11], C)\n\tC, t[12] = madd2(y[1], x[12], t[12], C)\n\tC, t[13] = madd2(y[1], x[13], t[13], C)\n\tC, t[14] = madd2(y[1], x[14], t[14], C)\n\tC, t[15] = madd2(y[1], x[15], t[15], C)\n\tC, t[16] = madd2(y[1], x[16], t[16], C)\n\tC, t[17] = madd2(y[1], x[17], t[17], C)\n\tC, t[18] = madd2(y[1], x[18], t[18], C)\n\tC, t[19] = madd2(y[1], x[19], t[19], C)\n\tC, t[20] = madd2(y[1], x[20], t[20], C)\n\tC, t[21] = madd2(y[1], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[2], x[0], t[0])\n\tC, t[1] = madd2(y[2], x[1], t[1], C)\n\tC, t[2] = madd2(y[2], x[2], t[2], C)\n\tC, t[3] = madd2(y[2], x[3], t[3], C)\n\tC, t[4] = madd2(y[2], x[4], t[4], C)\n\tC, t[5] = madd2(y[2], x[5], t[5], C)\n\tC, t[6] = madd2(y[2], x[6], t[6], C)\n\tC, t[7] = madd2(y[2], x[7], t[7], C)\n\tC, t[8] = madd2(y[2], x[8], t[8], C)\n\tC, t[9] = madd2(y[2], x[9], t[9], C)\n\tC, t[10] = madd2(y[2], x[10], t[10], C)\n\tC, t[11] = madd2(y[2], x[11], t[11], C)\n\tC, t[12] = madd2(y[2], x[12], t[12], C)\n\tC, t[13] = madd2(y[2], x[13], t[13], C)\n\tC, t[14] = madd2(y[2], x[14], t[14], C)\n\tC, t[15] = madd2(y[2], x[15], t[15], C)\n\tC, t[16] = madd2(y[2], x[16], t[16], C)\n\tC, t[17] = madd2(y[2], x[17], t[17], C)\n\tC, t[18] = madd2(y[2], x[18], t[18], C)\n\tC, t[19] = madd2(y[2], x[19], t[19], C)\n\tC, t[20] = madd2(y[2], x[20], t[20], C)\n\tC, t[21] = madd2(y[2], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[3], x[0], t[0])\n\tC, t[1] = madd2(y[3], x[1], t[1], C)\n\tC, t[2] = madd2(y[3], x[2], t[2], C)\n\tC, t[3] = madd2(y[3], x[3], t[3], C)\n\tC, t[4] = madd2(y[3], x[4], t[4], C)\n\tC, t[5] = madd2(y[3], x[5], t[5], C)\n\tC, t[6] = madd2(y[3], x[6], t[6], C)\n\tC, t[7] = madd2(y[3], x[7], t[7], C)\n\tC, t[8] = madd2(y[3], x[8], t[8], C)\n\tC, t[9] = madd2(y[3], x[9], t[9], C)\n\tC, t[10] = madd2(y[3], x[10], t[10], C)\n\tC, t[11] = madd2(y[3], x[11], t[11], C)\n\tC, t[12] = madd2(y[3], x[12], t[12], C)\n\tC, t[13] = madd2(y[3], x[13], t[13], C)\n\tC, t[14] = madd2(y[3], x[14], t[14], C)\n\tC, t[15] = madd2(y[3], x[15], t[15], C)\n\tC, t[16] = madd2(y[3], x[16], t[16], C)\n\tC, t[17] = madd2(y[3], x[17], t[17], C)\n\tC, t[18] = madd2(y[3], x[18], t[18], C)\n\tC, t[19] = madd2(y[3], x[19], t[19], C)\n\tC, t[20] = madd2(y[3], x[20], t[20], C)\n\tC, t[21] = madd2(y[3], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[4], x[0], t[0])\n\tC, t[1] = madd2(y[4], x[1], t[1], C)\n\tC, t[2] = madd2(y[4], x[2], t[2], C)\n\tC, t[3] = madd2(y[4], x[3], t[3], C)\n\tC, t[4] = madd2(y[4], x[4], t[4], C)\n\tC, t[5] = madd2(y[4], x[5], t[5], C)\n\tC, t[6] = madd2(y[4], x[6], t[6], C)\n\tC, t[7] = madd2(y[4], x[7], t[7], C)\n\tC, t[8] = madd2(y[4], x[8], t[8], C)\n\tC, t[9] = madd2(y[4], x[9], t[9], C)\n\tC, t[10] = madd2(y[4], x[10], t[10], C)\n\tC, t[11] = madd2(y[4], x[11], t[11], C)\n\tC, t[12] = madd2(y[4], x[12], t[12], C)\n\tC, t[13] = madd2(y[4], x[13], t[13], C)\n\tC, t[14] = madd2(y[4], x[14], t[14], C)\n\tC, t[15] = madd2(y[4], x[15], t[15], C)\n\tC, t[16] = madd2(y[4], x[16], t[16], C)\n\tC, t[17] = madd2(y[4], x[17], t[17], C)\n\tC, t[18] = madd2(y[4], x[18], t[18], C)\n\tC, t[19] = madd2(y[4], x[19], t[19], C)\n\tC, t[20] = madd2(y[4], x[20], t[20], C)\n\tC, t[21] = madd2(y[4], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[5], x[0], t[0])\n\tC, t[1] = madd2(y[5], x[1], t[1], C)\n\tC, t[2] = madd2(y[5], x[2], t[2], C)\n\tC, t[3] = madd2(y[5], x[3], t[3], C)\n\tC, t[4] = madd2(y[5], x[4], t[4], C)\n\tC, t[5] = madd2(y[5], x[5], t[5], C)\n\tC, t[6] = madd2(y[5], x[6], t[6], C)\n\tC, t[7] = madd2(y[5], x[7], t[7], C)\n\tC, t[8] = madd2(y[5], x[8], t[8], C)\n\tC, t[9] = madd2(y[5], x[9], t[9], C)\n\tC, t[10] = madd2(y[5], x[10], t[10], C)\n\tC, t[11] = madd2(y[5], x[11], t[11], C)\n\tC, t[12] = madd2(y[5], x[12], t[12], C)\n\tC, t[13] = madd2(y[5], x[13], t[13], C)\n\tC, t[14] = madd2(y[5], x[14], t[14], C)\n\tC, t[15] = madd2(y[5], x[15], t[15], C)\n\tC, t[16] = madd2(y[5], x[16], t[16], C)\n\tC, t[17] = madd2(y[5], x[17], t[17], C)\n\tC, t[18] = madd2(y[5], x[18], t[18], C)\n\tC, t[19] = madd2(y[5], x[19], t[19], C)\n\tC, t[20] = madd2(y[5], x[20], t[20], C)\n\tC, t[21] = madd2(y[5], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[6], x[0], t[0])\n\tC, t[1] = madd2(y[6], x[1], t[1], C)\n\tC, t[2] = madd2(y[6], x[2], t[2], C)\n\tC, t[3] = madd2(y[6], x[3], t[3], C)\n\tC, t[4] = madd2(y[6], x[4], t[4], C)\n\tC, t[5] = madd2(y[6], x[5], t[5], C)\n\tC, t[6] = madd2(y[6], x[6], t[6], C)\n\tC, t[7] = madd2(y[6], x[7], t[7], C)\n\tC, t[8] = madd2(y[6], x[8], t[8], C)\n\tC, t[9] = madd2(y[6], x[9], t[9], C)\n\tC, t[10] = madd2(y[6], x[10], t[10], C)\n\tC, t[11] = madd2(y[6], x[11], t[11], C)\n\tC, t[12] = madd2(y[6], x[12], t[12], C)\n\tC, t[13] = madd2(y[6], x[13], t[13], C)\n\tC, t[14] = madd2(y[6], x[14], t[14], C)\n\tC, t[15] = madd2(y[6], x[15], t[15], C)\n\tC, t[16] = madd2(y[6], x[16], t[16], C)\n\tC, t[17] = madd2(y[6], x[17], t[17], C)\n\tC, t[18] = madd2(y[6], x[18], t[18], C)\n\tC, t[19] = madd2(y[6], x[19], t[19], C)\n\tC, t[20] = madd2(y[6], x[20], t[20], C)\n\tC, t[21] = madd2(y[6], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[7], x[0], t[0])\n\tC, t[1] = madd2(y[7], x[1], t[1], C)\n\tC, t[2] = madd2(y[7], x[2], t[2], C)\n\tC, t[3] = madd2(y[7], x[3], t[3], C)\n\tC, t[4] = madd2(y[7], x[4], t[4], C)\n\tC, t[5] = madd2(y[7], x[5], t[5], C)\n\tC, t[6] = madd2(y[7], x[6], t[6], C)\n\tC, t[7] = madd2(y[7], x[7], t[7], C)\n\tC, t[8] = madd2(y[7], x[8], t[8], C)\n\tC, t[9] = madd2(y[7], x[9], t[9], C)\n\tC, t[10] = madd2(y[7], x[10], t[10], C)\n\tC, t[11] = madd2(y[7], x[11], t[11], C)\n\tC, t[12] = madd2(y[7], x[12], t[12], C)\n\tC, t[13] = madd2(y[7], x[13], t[13], C)\n\tC, t[14] = madd2(y[7], x[14], t[14], C)\n\tC, t[15] = madd2(y[7], x[15], t[15], C)\n\tC, t[16] = madd2(y[7], x[16], t[16], C)\n\tC, t[17] = madd2(y[7], x[17], t[17], C)\n\tC, t[18] = madd2(y[7], x[18], t[18], C)\n\tC, t[19] = madd2(y[7], x[19], t[19], C)\n\tC, t[20] = madd2(y[7], x[20], t[20], C)\n\tC, t[21] = madd2(y[7], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[8], x[0], t[0])\n\tC, t[1] = madd2(y[8], x[1], t[1], C)\n\tC, t[2] = madd2(y[8], x[2], t[2], C)\n\tC, t[3] = madd2(y[8], x[3], t[3], C)\n\tC, t[4] = madd2(y[8], x[4], t[4], C)\n\tC, t[5] = madd2(y[8], x[5], t[5], C)\n\tC, t[6] = madd2(y[8], x[6], t[6], C)\n\tC, t[7] = madd2(y[8], x[7], t[7], C)\n\tC, t[8] = madd2(y[8], x[8], t[8], C)\n\tC, t[9] = madd2(y[8], x[9], t[9], C)\n\tC, t[10] = madd2(y[8], x[10], t[10], C)\n\tC, t[11] = madd2(y[8], x[11], t[11], C)\n\tC, t[12] = madd2(y[8], x[12], t[12], C)\n\tC, t[13] = madd2(y[8], x[13], t[13], C)\n\tC, t[14] = madd2(y[8], x[14], t[14], C)\n\tC, t[15] = madd2(y[8], x[15], t[15], C)\n\tC, t[16] = madd2(y[8], x[16], t[16], C)\n\tC, t[17] = madd2(y[8], x[17], t[17], C)\n\tC, t[18] = madd2(y[8], x[18], t[18], C)\n\tC, t[19] = madd2(y[8], x[19], t[19], C)\n\tC, t[20] = madd2(y[8], x[20], t[20], C)\n\tC, t[21] = madd2(y[8], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[9], x[0], t[0])\n\tC, t[1] = madd2(y[9], x[1], t[1], C)\n\tC, t[2] = madd2(y[9], x[2], t[2], C)\n\tC, t[3] = madd2(y[9], x[3], t[3], C)\n\tC, t[4] = madd2(y[9], x[4], t[4], C)\n\tC, t[5] = madd2(y[9], x[5], t[5], C)\n\tC, t[6] = madd2(y[9], x[6], t[6], C)\n\tC, t[7] = madd2(y[9], x[7], t[7], C)\n\tC, t[8] = madd2(y[9], x[8], t[8], C)\n\tC, t[9] = madd2(y[9], x[9], t[9], C)\n\tC, t[10] = madd2(y[9], x[10], t[10], C)\n\tC, t[11] = madd2(y[9], x[11], t[11], C)\n\tC, t[12] = madd2(y[9], x[12], t[12], C)\n\tC, t[13] = madd2(y[9], x[13], t[13], C)\n\tC, t[14] = madd2(y[9], x[14], t[14], C)\n\tC, t[15] = madd2(y[9], x[15], t[15], C)\n\tC, t[16] = madd2(y[9], x[16], t[16], C)\n\tC, t[17] = madd2(y[9], x[17], t[17], C)\n\tC, t[18] = madd2(y[9], x[18], t[18], C)\n\tC, t[19] = madd2(y[9], x[19], t[19], C)\n\tC, t[20] = madd2(y[9], x[20], t[20], C)\n\tC, t[21] = madd2(y[9], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[10], x[0], t[0])\n\tC, t[1] = madd2(y[10], x[1], t[1], C)\n\tC, t[2] = madd2(y[10], x[2], t[2], C)\n\tC, t[3] = madd2(y[10], x[3], t[3], C)\n\tC, t[4] = madd2(y[10], x[4], t[4], C)\n\tC, t[5] = madd2(y[10], x[5], t[5], C)\n\tC, t[6] = madd2(y[10], x[6], t[6], C)\n\tC, t[7] = madd2(y[10], x[7], t[7], C)\n\tC, t[8] = madd2(y[10], x[8], t[8], C)\n\tC, t[9] = madd2(y[10], x[9], t[9], C)\n\tC, t[10] = madd2(y[10], x[10], t[10], C)\n\tC, t[11] = madd2(y[10], x[11], t[11], C)\n\tC, t[12] = madd2(y[10], x[12], t[12], C)\n\tC, t[13] = madd2(y[10], x[13], t[13], C)\n\tC, t[14] = madd2(y[10], x[14], t[14], C)\n\tC, t[15] = madd2(y[10], x[15], t[15], C)\n\tC, t[16] = madd2(y[10], x[16], t[16], C)\n\tC, t[17] = madd2(y[10], x[17], t[17], C)\n\tC, t[18] = madd2(y[10], x[18], t[18], C)\n\tC, t[19] = madd2(y[10], x[19], t[19], C)\n\tC, t[20] = madd2(y[10], x[20], t[20], C)\n\tC, t[21] = madd2(y[10], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[11], x[0], t[0])\n\tC, t[1] = madd2(y[11], x[1], t[1], C)\n\tC, t[2] = madd2(y[11], x[2], t[2], C)\n\tC, t[3] = madd2(y[11], x[3], t[3], C)\n\tC, t[4] = madd2(y[11], x[4], t[4], C)\n\tC, t[5] = madd2(y[11], x[5], t[5], C)\n\tC, t[6] = madd2(y[11], x[6], t[6], C)\n\tC, t[7] = madd2(y[11], x[7], t[7], C)\n\tC, t[8] = madd2(y[11], x[8], t[8], C)\n\tC, t[9] = madd2(y[11], x[9], t[9], C)\n\tC, t[10] = madd2(y[11], x[10], t[10], C)\n\tC, t[11] = madd2(y[11], x[11], t[11], C)\n\tC, t[12] = madd2(y[11], x[12], t[12], C)\n\tC, t[13] = madd2(y[11], x[13], t[13], C)\n\tC, t[14] = madd2(y[11], x[14], t[14], C)\n\tC, t[15] = madd2(y[11], x[15], t[15], C)\n\tC, t[16] = madd2(y[11], x[16], t[16], C)\n\tC, t[17] = madd2(y[11], x[17], t[17], C)\n\tC, t[18] = madd2(y[11], x[18], t[18], C)\n\tC, t[19] = madd2(y[11], x[19], t[19], C)\n\tC, t[20] = madd2(y[11], x[20], t[20], C)\n\tC, t[21] = madd2(y[11], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[12], x[0], t[0])\n\tC, t[1] = madd2(y[12], x[1], t[1], C)\n\tC, t[2] = madd2(y[12], x[2], t[2], C)\n\tC, t[3] = madd2(y[12], x[3], t[3], C)\n\tC, t[4] = madd2(y[12], x[4], t[4], C)\n\tC, t[5] = madd2(y[12], x[5], t[5], C)\n\tC, t[6] = madd2(y[12], x[6], t[6], C)\n\tC, t[7] = madd2(y[12], x[7], t[7], C)\n\tC, t[8] = madd2(y[12], x[8], t[8], C)\n\tC, t[9] = madd2(y[12], x[9], t[9], C)\n\tC, t[10] = madd2(y[12], x[10], t[10], C)\n\tC, t[11] = madd2(y[12], x[11], t[11], C)\n\tC, t[12] = madd2(y[12], x[12], t[12], C)\n\tC, t[13] = madd2(y[12], x[13], t[13], C)\n\tC, t[14] = madd2(y[12], x[14], t[14], C)\n\tC, t[15] = madd2(y[12], x[15], t[15], C)\n\tC, t[16] = madd2(y[12], x[16], t[16], C)\n\tC, t[17] = madd2(y[12], x[17], t[17], C)\n\tC, t[18] = madd2(y[12], x[18], t[18], C)\n\tC, t[19] = madd2(y[12], x[19], t[19], C)\n\tC, t[20] = madd2(y[12], x[20], t[20], C)\n\tC, t[21] = madd2(y[12], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[13], x[0], t[0])\n\tC, t[1] = madd2(y[13], x[1], t[1], C)\n\tC, t[2] = madd2(y[13], x[2], t[2], C)\n\tC, t[3] = madd2(y[13], x[3], t[3], C)\n\tC, t[4] = madd2(y[13], x[4], t[4], C)\n\tC, t[5] = madd2(y[13], x[5], t[5], C)\n\tC, t[6] = madd2(y[13], x[6], t[6], C)\n\tC, t[7] = madd2(y[13], x[7], t[7], C)\n\tC, t[8] = madd2(y[13], x[8], t[8], C)\n\tC, t[9] = madd2(y[13], x[9], t[9], C)\n\tC, t[10] = madd2(y[13], x[10], t[10], C)\n\tC, t[11] = madd2(y[13], x[11], t[11], C)\n\tC, t[12] = madd2(y[13], x[12], t[12], C)\n\tC, t[13] = madd2(y[13], x[13], t[13], C)\n\tC, t[14] = madd2(y[13], x[14], t[14], C)\n\tC, t[15] = madd2(y[13], x[15], t[15], C)\n\tC, t[16] = madd2(y[13], x[16], t[16], C)\n\tC, t[17] = madd2(y[13], x[17], t[17], C)\n\tC, t[18] = madd2(y[13], x[18], t[18], C)\n\tC, t[19] = madd2(y[13], x[19], t[19], C)\n\tC, t[20] = madd2(y[13], x[20], t[20], C)\n\tC, t[21] = madd2(y[13], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[14], x[0], t[0])\n\tC, t[1] = madd2(y[14], x[1], t[1], C)\n\tC, t[2] = madd2(y[14], x[2], t[2], C)\n\tC, t[3] = madd2(y[14], x[3], t[3], C)\n\tC, t[4] = madd2(y[14], x[4], t[4], C)\n\tC, t[5] = madd2(y[14], x[5], t[5], C)\n\tC, t[6] = madd2(y[14], x[6], t[6], C)\n\tC, t[7] = madd2(y[14], x[7], t[7], C)\n\tC, t[8] = madd2(y[14], x[8], t[8], C)\n\tC, t[9] = madd2(y[14], x[9], t[9], C)\n\tC, t[10] = madd2(y[14], x[10], t[10], C)\n\tC, t[11] = madd2(y[14], x[11], t[11], C)\n\tC, t[12] = madd2(y[14], x[12], t[12], C)\n\tC, t[13] = madd2(y[14], x[13], t[13], C)\n\tC, t[14] = madd2(y[14], x[14], t[14], C)\n\tC, t[15] = madd2(y[14], x[15], t[15], C)\n\tC, t[16] = madd2(y[14], x[16], t[16], C)\n\tC, t[17] = madd2(y[14], x[17], t[17], C)\n\tC, t[18] = madd2(y[14], x[18], t[18], C)\n\tC, t[19] = madd2(y[14], x[19], t[19], C)\n\tC, t[20] = madd2(y[14], x[20], t[20], C)\n\tC, t[21] = madd2(y[14], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[15], x[0], t[0])\n\tC, t[1] = madd2(y[15], x[1], t[1], C)\n\tC, t[2] = madd2(y[15], x[2], t[2], C)\n\tC, t[3] = madd2(y[15], x[3], t[3], C)\n\tC, t[4] = madd2(y[15], x[4], t[4], C)\n\tC, t[5] = madd2(y[15], x[5], t[5], C)\n\tC, t[6] = madd2(y[15], x[6], t[6], C)\n\tC, t[7] = madd2(y[15], x[7], t[7], C)\n\tC, t[8] = madd2(y[15], x[8], t[8], C)\n\tC, t[9] = madd2(y[15], x[9], t[9], C)\n\tC, t[10] = madd2(y[15], x[10], t[10], C)\n\tC, t[11] = madd2(y[15], x[11], t[11], C)\n\tC, t[12] = madd2(y[15], x[12], t[12], C)\n\tC, t[13] = madd2(y[15], x[13], t[13], C)\n\tC, t[14] = madd2(y[15], x[14], t[14], C)\n\tC, t[15] = madd2(y[15], x[15], t[15], C)\n\tC, t[16] = madd2(y[15], x[16], t[16], C)\n\tC, t[17] = madd2(y[15], x[17], t[17], C)\n\tC, t[18] = madd2(y[15], x[18], t[18], C)\n\tC, t[19] = madd2(y[15], x[19], t[19], C)\n\tC, t[20] = madd2(y[15], x[20], t[20], C)\n\tC, t[21] = madd2(y[15], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[16], x[0], t[0])\n\tC, t[1] = madd2(y[16], x[1], t[1], C)\n\tC, t[2] = madd2(y[16], x[2], t[2], C)\n\tC, t[3] = madd2(y[16], x[3], t[3], C)\n\tC, t[4] = madd2(y[16], x[4], t[4], C)\n\tC, t[5] = madd2(y[16], x[5], t[5], C)\n\tC, t[6] = madd2(y[16], x[6], t[6], C)\n\tC, t[7] = madd2(y[16], x[7], t[7], C)\n\tC, t[8] = madd2(y[16], x[8], t[8], C)\n\tC, t[9] = madd2(y[16], x[9], t[9], C)\n\tC, t[10] = madd2(y[16], x[10], t[10], C)\n\tC, t[11] = madd2(y[16], x[11], t[11], C)\n\tC, t[12] = madd2(y[16], x[12], t[12], C)\n\tC, t[13] = madd2(y[16], x[13], t[13], C)\n\tC, t[14] = madd2(y[16], x[14], t[14], C)\n\tC, t[15] = madd2(y[16], x[15], t[15], C)\n\tC, t[16] = madd2(y[16], x[16], t[16], C)\n\tC, t[17] = madd2(y[16], x[17], t[17], C)\n\tC, t[18] = madd2(y[16], x[18], t[18], C)\n\tC, t[19] = madd2(y[16], x[19], t[19], C)\n\tC, t[20] = madd2(y[16], x[20], t[20], C)\n\tC, t[21] = madd2(y[16], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[17], x[0], t[0])\n\tC, t[1] = madd2(y[17], x[1], t[1], C)\n\tC, t[2] = madd2(y[17], x[2], t[2], C)\n\tC, t[3] = madd2(y[17], x[3], t[3], C)\n\tC, t[4] = madd2(y[17], x[4], t[4], C)\n\tC, t[5] = madd2(y[17], x[5], t[5], C)\n\tC, t[6] = madd2(y[17], x[6], t[6], C)\n\tC, t[7] = madd2(y[17], x[7], t[7], C)\n\tC, t[8] = madd2(y[17], x[8], t[8], C)\n\tC, t[9] = madd2(y[17], x[9], t[9], C)\n\tC, t[10] = madd2(y[17], x[10], t[10], C)\n\tC, t[11] = madd2(y[17], x[11], t[11], C)\n\tC, t[12] = madd2(y[17], x[12], t[12], C)\n\tC, t[13] = madd2(y[17], x[13], t[13], C)\n\tC, t[14] = madd2(y[17], x[14], t[14], C)\n\tC, t[15] = madd2(y[17], x[15], t[15], C)\n\tC, t[16] = madd2(y[17], x[16], t[16], C)\n\tC, t[17] = madd2(y[17], x[17], t[17], C)\n\tC, t[18] = madd2(y[17], x[18], t[18], C)\n\tC, t[19] = madd2(y[17], x[19], t[19], C)\n\tC, t[20] = madd2(y[17], x[20], t[20], C)\n\tC, t[21] = madd2(y[17], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[18], x[0], t[0])\n\tC, t[1] = madd2(y[18], x[1], t[1], C)\n\tC, t[2] = madd2(y[18], x[2], t[2], C)\n\tC, t[3] = madd2(y[18], x[3], t[3], C)\n\tC, t[4] = madd2(y[18], x[4], t[4], C)\n\tC, t[5] = madd2(y[18], x[5], t[5], C)\n\tC, t[6] = madd2(y[18], x[6], t[6], C)\n\tC, t[7] = madd2(y[18], x[7], t[7], C)\n\tC, t[8] = madd2(y[18], x[8], t[8], C)\n\tC, t[9] = madd2(y[18], x[9], t[9], C)\n\tC, t[10] = madd2(y[18], x[10], t[10], C)\n\tC, t[11] = madd2(y[18], x[11], t[11], C)\n\tC, t[12] = madd2(y[18], x[12], t[12], C)\n\tC, t[13] = madd2(y[18], x[13], t[13], C)\n\tC, t[14] = madd2(y[18], x[14], t[14], C)\n\tC, t[15] = madd2(y[18], x[15], t[15], C)\n\tC, t[16] = madd2(y[18], x[16], t[16], C)\n\tC, t[17] = madd2(y[18], x[17], t[17], C)\n\tC, t[18] = madd2(y[18], x[18], t[18], C)\n\tC, t[19] = madd2(y[18], x[19], t[19], C)\n\tC, t[20] = madd2(y[18], x[20], t[20], C)\n\tC, t[21] = madd2(y[18], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[19], x[0], t[0])\n\tC, t[1] = madd2(y[19], x[1], t[1], C)\n\tC, t[2] = madd2(y[19], x[2], t[2], C)\n\tC, t[3] = madd2(y[19], x[3], t[3], C)\n\tC, t[4] = madd2(y[19], x[4], t[4], C)\n\tC, t[5] = madd2(y[19], x[5], t[5], C)\n\tC, t[6] = madd2(y[19], x[6], t[6], C)\n\tC, t[7] = madd2(y[19], x[7], t[7], C)\n\tC, t[8] = madd2(y[19], x[8], t[8], C)\n\tC, t[9] = madd2(y[19], x[9], t[9], C)\n\tC, t[10] = madd2(y[19], x[10], t[10], C)\n\tC, t[11] = madd2(y[19], x[11], t[11], C)\n\tC, t[12] = madd2(y[19], x[12], t[12], C)\n\tC, t[13] = madd2(y[19], x[13], t[13], C)\n\tC, t[14] = madd2(y[19], x[14], t[14], C)\n\tC, t[15] = madd2(y[19], x[15], t[15], C)\n\tC, t[16] = madd2(y[19], x[16], t[16], C)\n\tC, t[17] = madd2(y[19], x[17], t[17], C)\n\tC, t[18] = madd2(y[19], x[18], t[18], C)\n\tC, t[19] = madd2(y[19], x[19], t[19], C)\n\tC, t[20] = madd2(y[19], x[20], t[20], C)\n\tC, t[21] = madd2(y[19], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[20], x[0], t[0])\n\tC, t[1] = madd2(y[20], x[1], t[1], C)\n\tC, t[2] = madd2(y[20], x[2], t[2], C)\n\tC, t[3] = madd2(y[20], x[3], t[3], C)\n\tC, t[4] = madd2(y[20], x[4], t[4], C)\n\tC, t[5] = madd2(y[20], x[5], t[5], C)\n\tC, t[6] = madd2(y[20], x[6], t[6], C)\n\tC, t[7] = madd2(y[20], x[7], t[7], C)\n\tC, t[8] = madd2(y[20], x[8], t[8], C)\n\tC, t[9] = madd2(y[20], x[9], t[9], C)\n\tC, t[10] = madd2(y[20], x[10], t[10], C)\n\tC, t[11] = madd2(y[20], x[11], t[11], C)\n\tC, t[12] = madd2(y[20], x[12], t[12], C)\n\tC, t[13] = madd2(y[20], x[13], t[13], C)\n\tC, t[14] = madd2(y[20], x[14], t[14], C)\n\tC, t[15] = madd2(y[20], x[15], t[15], C)\n\tC, t[16] = madd2(y[20], x[16], t[16], C)\n\tC, t[17] = madd2(y[20], x[17], t[17], C)\n\tC, t[18] = madd2(y[20], x[18], t[18], C)\n\tC, t[19] = madd2(y[20], x[19], t[19], C)\n\tC, t[20] = madd2(y[20], x[20], t[20], C)\n\tC, t[21] = madd2(y[20], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\t// -----------------------------------\n\t// First loop\n\n\tC, t[0] = madd1(y[21], x[0], t[0])\n\tC, t[1] = madd2(y[21], x[1], t[1], C)\n\tC, t[2] = madd2(y[21], x[2], t[2], C)\n\tC, t[3] = madd2(y[21], x[3], t[3], C)\n\tC, t[4] = madd2(y[21], x[4], t[4], C)\n\tC, t[5] = madd2(y[21], x[5], t[5], C)\n\tC, t[6] = madd2(y[21], x[6], t[6], C)\n\tC, t[7] = madd2(y[21], x[7], t[7], C)\n\tC, t[8] = madd2(y[21], x[8], t[8], C)\n\tC, t[9] = madd2(y[21], x[9], t[9], C)\n\tC, t[10] = madd2(y[21], x[10], t[10], C)\n\tC, t[11] = madd2(y[21], x[11], t[11], C)\n\tC, t[12] = madd2(y[21], x[12], t[12], C)\n\tC, t[13] = madd2(y[21], x[13], t[13], C)\n\tC, t[14] = madd2(y[21], x[14], t[14], C)\n\tC, t[15] = madd2(y[21], x[15], t[15], C)\n\tC, t[16] = madd2(y[21], x[16], t[16], C)\n\tC, t[17] = madd2(y[21], x[17], t[17], C)\n\tC, t[18] = madd2(y[21], x[18], t[18], C)\n\tC, t[19] = madd2(y[21], x[19], t[19], C)\n\tC, t[20] = madd2(y[21], x[20], t[20], C)\n\tC, t[21] = madd2(y[21], x[21], t[21], C)\n\n\tD = C\n\n\t// m = t[0]n'[0] mod W\n\tm = t[0] * 2085129623399436079\n\n\t// -----------------------------------\n\t// Second loop\n\tC = madd0(m, 9062599614324828209, t[0])\n\n\tC, t[0] = madd2(m, 952425709649632109, t[1], C)\n\n\tC, t[1] = madd2(m, 13987751354083916656, t[2], C)\n\n\tC, t[2] = madd2(m, 9476693002504986527, t[3], C)\n\n\tC, t[3] = madd2(m, 17899356805776864267, t[4], C)\n\n\tC, t[4] = madd2(m, 2607080593922027197, t[5], C)\n\n\tC, t[5] = madd2(m, 6852504016717314360, t[6], C)\n\n\tC, t[6] = madd2(m, 366248478184989226, t[7], C)\n\n\tC, t[7] = madd2(m, 2672987780203805083, t[8], C)\n\n\tC, t[8] = madd2(m, 14115032483094903896, t[9], C)\n\n\tC, t[9] = madd2(m, 8062699450825609015, t[10], C)\n\n\tC, t[10] = madd2(m, 8413249848292746549, t[11], C)\n\n\tC, t[11] = madd2(m, 11172154229712803058, t[12], C)\n\n\tC, t[12] = madd2(m, 18137346262305431037, t[13], C)\n\n\tC, t[13] = madd2(m, 123227702747754650, t[14], C)\n\n\tC, t[14] = madd2(m, 7409464670784690235, t[15], C)\n\n\tC, t[15] = madd2(m, 243347369443125979, t[16], C)\n\n\tC, t[16] = madd2(m, 200317109320159479, t[17], C)\n\n\tC, t[17] = madd2(m, 17492726232193822651, t[18], C)\n\n\tC, t[18] = madd2(m, 17666595880400198649, t[19], C)\n\n\tC, t[19] = madd2(m, 1619463007483089584, t[20], C)\n\n\tC, t[20] = madd3(m, 7910025299994333900, t[21], C, t[22])\n\n\tt[21], t[22] = bits.Add64(D, C, 0)\n\n\tif t[22] != 0 {\n\t\t// we need to reduce, we have a result on 23 words\n\t\tvar b uint64\n\t\tz[0], b = bits.Sub64(t[0], 9062599614324828209, 0)\n\t\tz[1], b = bits.Sub64(t[1], 952425709649632109, b)\n\t\tz[2], b = bits.Sub64(t[2], 13987751354083916656, b)\n\t\tz[3], b = bits.Sub64(t[3], 9476693002504986527, b)\n\t\tz[4], b = bits.Sub64(t[4], 17899356805776864267, b)\n\t\tz[5], b = bits.Sub64(t[5], 2607080593922027197, b)\n\t\tz[6], b = bits.Sub64(t[6], 6852504016717314360, b)\n\t\tz[7], b = bits.Sub64(t[7], 366248478184989226, b)\n\t\tz[8], b = bits.Sub64(t[8], 2672987780203805083, b)\n\t\tz[9], b = bits.Sub64(t[9], 14115032483094903896, b)\n\t\tz[10], b = bits.Sub64(t[10], 8062699450825609015, b)\n\t\tz[11], b = bits.Sub64(t[11], 8413249848292746549, b)\n\t\tz[12], b = bits.Sub64(t[12], 11172154229712803058, b)\n\t\tz[13], b = bits.Sub64(t[13], 18137346262305431037, b)\n\t\tz[14], b = bits.Sub64(t[14], 123227702747754650, b)\n\t\tz[15], b = bits.Sub64(t[15], 7409464670784690235, b)\n\t\tz[16], b = bits.Sub64(t[16], 243347369443125979, b)\n\t\tz[17], b = bits.Sub64(t[17], 200317109320159479, b)\n\t\tz[18], b = bits.Sub64(t[18], 17492726232193822651, b)\n\t\tz[19], b = bits.Sub64(t[19], 17666595880400198649, b)\n\t\tz[20], b = bits.Sub64(t[20], 1619463007483089584, b)\n\t\tz[21], _ = bits.Sub64(t[21], 7910025299994333900, b)\n\t\treturn z\n\t}\n\n\t// copy t into z\n\tz[0] = t[0]\n\tz[1] = t[1]\n\tz[2] = t[2]\n\tz[3] = t[3]\n\tz[4] = t[4]\n\tz[5] = t[5]\n\tz[6] = t[6]\n\tz[7] = t[7]\n\tz[8] = t[8]\n\tz[9] = t[9]\n\tz[10] = t[10]\n\tz[11] = t[11]\n\tz[12] = t[12]\n\tz[13] = t[13]\n\tz[14] = t[14]\n\tz[15] = t[15]\n\tz[16] = t[16]\n\tz[17] = t[17]\n\tz[18] = t[18]\n\tz[19] = t[19]\n\tz[20] = t[20]\n\tz[21] = t[21]\n\n\t// if z > q --> z -= q\n\tif !(z[21] < 7910025299994333900 || (z[21] == 7910025299994333900 && (z[20] < 1619463007483089584 || (z[20] == 1619463007483089584 && (z[19] < 17666595880400198649 || (z[19] == 17666595880400198649 && (z[18] < 17492726232193822651 || (z[18] == 17492726232193822651 && (z[17] < 200317109320159479 || (z[17] == 200317109320159479 && (z[16] < 243347369443125979 || (z[16] == 243347369443125979 && (z[15] < 7409464670784690235 || (z[15] == 7409464670784690235 && (z[14] < 123227702747754650 || (z[14] == 123227702747754650 && (z[13] < 18137346262305431037 || (z[13] == 18137346262305431037 && (z[12] < 11172154229712803058 || (z[12] == 11172154229712803058 && (z[11] < 8413249848292746549 || (z[11] == 8413249848292746549 && (z[10] < 8062699450825609015 || (z[10] == 8062699450825609015 && (z[9] < 14115032483094903896 || (z[9] == 14115032483094903896 && (z[8] < 2672987780203805083 || (z[8] == 2672987780203805083 && (z[7] < 366248478184989226 || (z[7] == 366248478184989226 && (z[6] < 6852504016717314360 || (z[6] == 6852504016717314360 && (z[5] < 2607080593922027197 || (z[5] == 2607080593922027197 && (z[4] < 17899356805776864267 || (z[4] == 17899356805776864267 && (z[3] < 9476693002504986527 || (z[3] == 9476693002504986527 && (z[2] < 13987751354083916656 || (z[2] == 13987751354083916656 && (z[1] < 952425709649632109 || (z[1] == 952425709649632109 && (z[0] < 9062599614324828209))))))))))))))))))))))))))))))))))))))))))) {\n\t\tvar b uint64\n\t\tz[0], b = bits.Sub64(z[0], 9062599614324828209, 0)\n\t\tz[1], b = bits.Sub64(z[1], 952425709649632109, b)\n\t\tz[2], b = bits.Sub64(z[2], 13987751354083916656, b)\n\t\tz[3], b = bits.Sub64(z[3], 9476693002504986527, b)\n\t\tz[4], b = bits.Sub64(z[4], 17899356805776864267, b)\n\t\tz[5], b = bits.Sub64(z[5], 2607080593922027197, b)\n\t\tz[6], b = bits.Sub64(z[6], 6852504016717314360, b)\n\t\tz[7], b = bits.Sub64(z[7], 366248478184989226, b)\n\t\tz[8], b = bits.Sub64(z[8], 2672987780203805083, b)\n\t\tz[9], b = bits.Sub64(z[9], 14115032483094903896, b)\n\t\tz[10], b = bits.Sub64(z[10], 8062699450825609015, b)\n\t\tz[11], b = bits.Sub64(z[11], 8413249848292746549, b)\n\t\tz[12], b = bits.Sub64(z[12], 11172154229712803058, b)\n\t\tz[13], b = bits.Sub64(z[13], 18137346262305431037, b)\n\t\tz[14], b = bits.Sub64(z[14], 123227702747754650, b)\n\t\tz[15], b = bits.Sub64(z[15], 7409464670784690235, b)\n\t\tz[16], b = bits.Sub64(z[16], 243347369443125979, b)\n\t\tz[17], b = bits.Sub64(z[17], 200317109320159479, b)\n\t\tz[18], b = bits.Sub64(z[18], 17492726232193822651, b)\n\t\tz[19], b = bits.Sub64(z[19], 17666595880400198649, b)\n\t\tz[20], b = bits.Sub64(z[20], 1619463007483089584, b)\n\t\tz[21], _ = bits.Sub64(z[21], 7910025299994333900, b)\n\t}\n\treturn z\n}", "func RepeatBy[T any](count int, predicate func(int) T) []T {\n\tresult := make([]T, 0, count)\n\n\tfor i := 0; i < count; i++ {\n\t\tresult = append(result, predicate(i))\n\t}\n\n\treturn result\n}", "func product(vals []*big.Int, sub *big.Int, skip int) *big.Int {\n\tp := big.NewInt(1)\n\tfor i := range vals {\n\t\tif i == skip {\n\t\t\tcontinue\n\t\t}\n\t\tv := cp(vals[i])\n\t\tif sub != nil {\n\t\t\tv.Sub(sub, v)\n\t\t}\n\t\tp.Mul(p, v)\n\t}\n\treturn p\n}", "func Repeat[T any](t T, n int) (tt []T) {\n\tfor i := 0; i < n; i++ {\n\t\ttt = append(tt, t)\n\t}\n\treturn tt\n}", "func Product(a, b Chain) Chain {\n\tc := a.Clone()\n\tlast := c.End()\n\tfor _, x := range b[1:] {\n\t\ty := new(big.Int).Mul(last, x)\n\t\tc = append(c, y)\n\t}\n\treturn c\n}", "func productExceptSelf(nums []int) []int {\n\tresults := make([]int, len(nums))\n\tresults[0] = 1\n\tfor i := 1; i < len(nums); i++ {\n\t\tresults[i] = results[i-1] * nums[i-1]\n\t}\n\n\tmultiple := nums[len(nums)-1]\n\tfor i := len(nums) - 2; i >= 0; i-- {\n\t\tresults[i] = results[i] * multiple\n\t\tmultiple = multiple * nums[i]\n\t}\n\n\treturn results\n}", "func eval2(list []*Item) int {\n\tfor len(list) > 1 {\n\n\t\tfor i := 0; i < len(list); i++ {\n\t\t\tif list[i].Typ == Operation {\n\t\t\t\tleft := list[i-2]\n\t\t\t\tright := list[i-1]\n\t\t\t\tvar val int\n\t\t\t\tswitch list[i].Operation {\n\t\t\t\tcase \"+\":\n\t\t\t\t\tval = left.Value + right.Value\n\t\t\t\tcase \"-\":\n\t\t\t\t\tval = left.Value - right.Value\n\t\t\t\tcase \"/\":\n\t\t\t\t\t// Watch for div-by-zero\n\t\t\t\t\tval = left.Value / right.Value\n\t\t\t\tcase \"*\":\n\t\t\t\t\tval = left.Value * right.Value\n\t\t\t\t}\n\t\t\t\tlist[i] = &Item{Typ: Number, Value: val}\n\t\t\t\t// The only tricky part: excising the two Number-type\n\t\t\t\t// elements of the slice list\n\t\t\t\tlist = append(list[:i-2], list[i:]...)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn list[0].Value\n}", "func Sum(sl []float64) float64 {\n\tres := float64(0)\n\tfor _, val := range sl {\n\t\tres += val\n\t}\n\treturn res\n}", "func main() {\n\tlistNums := []float64{1,2,3,4,5}\n\n\tfmt.Println(\"sum : \", addThemUp(listNums))\n}", "func MultiplyForUint8Slice(nums []uint8, multiplier uint8) {\n\tfor i := range nums {\n\t\tnums[i] *= multiplier\n\t}\n}", "func Repeat(input string, repeatCount int) (rs string) {\n\tfor i := 0; i < repeatCount; i++ {\n\t\trs += input\n\t}\n\treturn\n}", "func Accumulate(s []string, fun func(string) string) []string {\n\tvar res []string\n\tfor _, ss := range s {\n\t\tres = append(res, fun(ss))\n\t}\n\treturn res\n}", "func prodFrom(min int64, max int64, resultChannel chan int64) {\r\n\tvar prod int64 = 1\r\n\r\n\tfor i := min; i <= max; i++ {\r\n\t\tprod *= i\r\n\t}\r\n\r\n\tresultChannel <- prod\r\n}", "func newPrefixSums(s string) prefixSums {\n\tps := prefixSums(make([]charSums, len(s)+1))\n\tps[0] = newCharSums()\n\tfor i := 1; i <= len(s); i++ {\n\t\tps[i] = newCharSums()\n\t\tcopy(ps[i], ps[i-1])\n\t\tps[i].add([]rune(s)[i-1])\n\t}\n\n\treturn ps\n}", "func (z nat) mulRange(a, b uint64) nat {\n\tswitch {\n\tcase a == 0:\n\t\t// cut long ranges short (optimization)\n\t\treturn z.setUint64(0)\n\tcase a > b:\n\t\treturn z.setUint64(1)\n\tcase a == b:\n\t\treturn z.setUint64(a)\n\tcase a+1 == b:\n\t\treturn z.mul(nat(nil).setUint64(a), nat(nil).setUint64(b))\n\t}\n\tm := (a + b) / 2\n\treturn z.mul(nat(nil).mulRange(a, m), nat(nil).mulRange(m+1, b))\n}", "func accumulate(nums []int) int {\n\tanswer := 0\n\ttotalLength := len(nums)\n\tfor i := 0; i < totalLength; i++ {\n\t\tif nums[i] == nums[(i+1)%totalLength] {\n\t\t\tanswer += nums[i]\n\t\t}\n\t}\n\treturn answer\n}", "func Multiplier(arr []int) []int {\n\tlength := len(arr)\n\tprod := ArrayProduct(arr)\n\tresult := make([]int, length)\n\n\tfor i, v := range arr {\n\t\tresult[i] = prod / v\n\t}\n\n\treturn result\n}", "func productExceptSelf(nums []int) []int {\n\n\toutput := make([]int, len(nums))\n\ttmp := 1\n\tfor i := 0; i < len(nums); i++ {\n\t\toutput[i] = tmp\n\t\ttmp *= nums[i] // store uptil the previous product\n\t}\n\ttmp = 1\n\tfor i := len(nums) - 1; i >= 0; i-- {\n\t\toutput[i] *= tmp // store uptil the previous product and multipy with the existing value\n\t\ttmp *= nums[i]\n\t}\n\treturn output\n}", "func Reduce(elements []Value, memo Value, reductor BiMapper) Value {\n\tfor _, elem := range elements {\n\t\tmemo = reductor(memo, elem)\n\t}\n\treturn memo\n}", "func (fn *formulaFuncs) sumproduct(argsList *list.List) formulaArg {\n\tvar (\n\t\targType ArgType\n\t\tn int\n\t\tres []float64\n\t\tsum float64\n\t)\n\tfor arg := argsList.Front(); arg != nil; arg = arg.Next() {\n\t\ttoken := arg.Value.(formulaArg)\n\t\tif argType == ArgUnknown {\n\t\t\targType = token.Type\n\t\t}\n\t\tif token.Type != argType {\n\t\t\treturn newErrorFormulaArg(formulaErrorVALUE, formulaErrorVALUE)\n\t\t}\n\t\tswitch token.Type {\n\t\tcase ArgString, ArgNumber:\n\t\t\tif num := token.ToNumber(); num.Type == ArgNumber {\n\t\t\t\tsum = fn.PRODUCT(argsList).Number\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn newErrorFormulaArg(formulaErrorVALUE, formulaErrorVALUE)\n\t\tcase ArgMatrix:\n\t\t\targs := token.ToList()\n\t\t\tif res == nil {\n\t\t\t\tn = len(args)\n\t\t\t\tres = make([]float64, n)\n\t\t\t\tfor i := range res {\n\t\t\t\t\tres[i] = 1.0\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(args) != n {\n\t\t\t\treturn newErrorFormulaArg(formulaErrorVALUE, formulaErrorVALUE)\n\t\t\t}\n\t\t\tfor i, value := range args {\n\t\t\t\tnum := value.ToNumber()\n\t\t\t\tif num.Type != ArgNumber && value.Value() != \"\" {\n\t\t\t\t\treturn newErrorFormulaArg(formulaErrorVALUE, formulaErrorVALUE)\n\t\t\t\t}\n\t\t\t\tres[i] = res[i] * num.Number\n\t\t\t}\n\t\t}\n\t}\n\tfor _, r := range res {\n\t\tsum += r\n\t}\n\treturn newNumberFormulaArg(sum)\n}", "func Accumulate(words []string, converter Converter) []string {\n\tresult := make([]string, len(words))\n\tfor i, word := range words {\n\t\tresult[i] = converter(word)\n\t}\n\treturn result\n}", "func Dasum(N int, X []float64, incX int) float64", "func multiples(of []int, below int)[]int {\n\t// Using a hash as a poor man's set\n\tvalues := make(map[int]bool)\n\tfor _, num := range of {\n\t\tfor i := num; i < below; i += num {\n\t\t\tvalues[i] = true\n\t\t}\n\t}\n\tresults := make([]int, len(values))\n\tvar i int = 0\n\tfor n, _ := range values {\n\t\tresults[i] = n\n\t\ti++\n\t}\n\treturn results\n}", "func SquareList(numbers []int) []int {\n\tresult := make([]int, len(numbers))\n\n\tfor i, n := range numbers {\n\t\tresult[i] = n * n\n\t}\n\n\treturn result\n}", "func compute(intcode []int) []int {\n\tfor i, code := range intcode {\n\t\tif i%4 == 0 {\n\t\t\tswitch code {\n\t\t\tcase 1:\n\t\t\t\tintcode[intcode[i+3]] = intcode[intcode[i+1]] + intcode[intcode[i+2]]\n\t\t\tcase 2:\n\t\t\t\tintcode[intcode[i+3]] = intcode[intcode[i+1]] * intcode[intcode[i+2]]\n\t\t\tcase 99:\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn intcode\n}", "func mulSlice(out, a, b []float64)", "func SumLists(n1 *Node, n2 *Node) *Node {\n\thead := New(0) // result sum list\n\ttail := head\n\n\tfor {\n\t\t// tail.data is a carrier for (1 || 0) of the next digit\n\t\tvalue := tail.data\n\n\t\tif n1 != nil {\n\t\t\tvalue += n1.data\n\t\t\tn1 = n1.next\n\t\t}\n\n\t\tif n2 != nil {\n\t\t\tvalue += n2.data\n\t\t\tn2 = n2.next\n\t\t}\n\n\t\ttail.data = value % 10\n\t\ttail.next = New(value / 10)\n\n\t\tif n1 == nil && n2 == nil {\n\t\t\t// remove leading zero\n\t\t\tif tail.next.data == 0 {\n\t\t\t\ttail.next = nil\n\t\t\t}\n\t\t\t// no more digits to sum\n\t\t\tbreak\n\t\t}\n\n\t\ttail = tail.next\n\t}\n\n\treturn head\n}", "func Accumulate(target []string, operation func(string) string) []string {\n\toutput := make([]string, len(target))\n\tfor pos, word := range target {\n\t\toutput[pos] = operation(word)\n\t}\n\n\treturn output\n}", "func multiplyBy(n int) func(int) int {\n\treturn func(val int) int {\n\t\treturn n * val\n\t}\n}", "func (a SumAggregator) Aggregate(values []float64) float64 {\n\tresult := 0.0\n\tfor _, v := range values {\n\t\tresult += v\n\t}\n\treturn result\n}", "func constructArr(a []int) []int {\n\tvar zeroCount, lastZeroIndex int\n\ttotalProduct := 1\n\tfor i, v := range a {\n\t\tif v == 0 {\n\t\t\tzeroCount += 1\n\t\t\tlastZeroIndex = i\n\t\t} else {\n\t\t\ttotalProduct *= v\n\t\t}\n\t}\n\tb := make([]int, len(a), len(a))\n\tif zeroCount == 0 {\n\t\tfor i := range b {\n\t\t\tb[i] = totalProduct / a[i]\n\t\t}\n\t} else if zeroCount == 1 {\n\t\tb[lastZeroIndex] = totalProduct\n\t}\n\treturn b\n}", "func makeMultiples(d1, d2, d3, d4 chan int64, base int64, count int) {\n\t// 0 as a divisor might sound bad, but chanDivides() always adds a value to\n\t// the raw divisor we send:\n\tvar res int64=0\n\td1 <- res\n\td2 <- res\n\td3 <- res\n\td4 <- res\n\tfor i:=1; i<count; i++ {\n\t\tres+=base\n\t\td1 <- res\n\t\td2 <- res\n\t\td3 <- res\n\t\td4 <- res\n\t\t//fmt.Print(\"m\")\n\t}\n\tfmt.Println(\"Multiples complete\")\n\tclose(d1)\n\tclose(d2)\n\tclose(d3)\n\tclose(d4)\n}", "func ScaleAddRepeated(v, scalers, biases Res) Res {\n\tout := v.Output().Copy()\n\tanyvec.ScaleRepeated(out, scalers.Output())\n\tanyvec.AddRepeated(out, biases.Output())\n\tvars := MergeVarSets(v.Vars(), scalers.Vars(), biases.Vars())\n\treturn &scaleAddRepeatedRes{\n\t\tIn: v,\n\t\tScalers: scalers,\n\t\tBiases: biases,\n\t\tOutVec: out,\n\t\tV: vars,\n\t}\n}", "func getTotalSizes(argPatterns []ellipses.ArgPattern) []uint64 {\n\tvar totalSizes []uint64\n\tfor _, argPattern := range argPatterns {\n\t\tvar totalSize uint64 = 1\n\t\tfor _, p := range argPattern {\n\t\t\ttotalSize = totalSize * uint64(len(p.Seq))\n\t\t}\n\t\ttotalSizes = append(totalSizes, totalSize)\n\t}\n\treturn totalSizes\n}", "func Repeat(input string, n int) (repeated string) {\n\tfor i := 0; i < n; i++ {\n\t\trepeated += input\n\t}\n\treturn\n}", "func Product(a, b []uint8) []uint8 {\n\tvar result []uint8 = a\n\tstringNumberB := Stringify(b)\n\tuintNumberB, err := strconv.ParseUint(stringNumberB, 10, 64)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar i uint64\n\tfor i = uintNumberB - 1; i != 0; i-- {\n\t\tresult = Sum(result, a)\n\t}\n\treturn result\n}", "func Multiply(t Tuplelike, n float64) Tuplelike {\n\tresult := []float64{}\n\n\tfor _, value := range t.Values() {\n\t\tresult = append(result, value*n)\n\t}\n\n\treturn Tuple(result)\n}", "func NewFloatList(lst []float64) FloatList {\n\tcpy := []float64{}\n\ttotal := 0.0\n\n\tfor _, val := range lst {\n\t\tcpy = append(cpy, val)\n\t\ttotal += val\n\t}\n\n\tsort.Float64s(cpy)\n\n\treturn FloatList{cpy, len(cpy), total}\n}", "func (h *RunList) reduce(less func(x, y interface{}) bool, newRun func(h *RunList, i, j int) interface{}) *RunList {\n\t// create runs of entries with equal values\n\th.sort(less);\n\n\t// for each run, make a new run object and collect them in a new RunList\n\tvar hh RunList;\n\ti := 0;\n\tfor j := 0; j < h.Len(); j++ {\n\t\tif less(h.At(i), h.At(j)) {\n\t\t\thh.Push(newRun(h, i, j));\n\t\t\ti = j;\t// start a new run\n\t\t}\n\t}\n\t// add final run, if any\n\tif i < h.Len() {\n\t\thh.Push(newRun(h, i, h.Len()))\n\t}\n\n\treturn &hh;\n}", "func (list IntList) Map(fn unaryFunc) IntList {\n\tr := []int{}\n\tfor _, e := range list {\n\t\tr = append(r, fn(e))\n\t}\n\treturn IntList(r)\n}", "func Sum[T Number](items []T) T {\n\tvar sum T\n\tfor _, item := range items {\n\t\tsum += item\n\t}\n\treturn sum\n}", "func Map(dst, src []float64, f func(v float64) float64) []float64 {\n\n\tif dst == nil {\n\t\tdst = make([]float64, len(src))\n\t}\n\n\tif len(src) != len(dst) {\n\t\tpanic(errLength)\n\t}\n\n\tfor i, x := range src {\n\t\tdst[i] = f(x)\n\t}\n\treturn dst\n}", "func productExceptSelf(nums []int) []int {\n\tl := len(nums)\n\tres := make([]int, l)\n\tres[0] = 1\n\tfor i := 1; i < l; i++ {\n\t\tres[i] = res[i-1] * nums[i-1]\n\t}\n\tR := 1\n\tfor i := l - 2; i >= 0; i-- {\n\t\tR *= nums[i+1]\n\t\tres[i] = res[i] * R\n\t}\n\treturn res\n}", "func Add(nums ...float64) (total float64) {\n\tfor _, n := range nums {\n\t\ttotal += n\n\t}\n\treturn\n}", "func (v Vec) MapBy(f func(int, float64) float64) Vec {\n\tfor i, val := range v {\n\t\tv[i] = f(i, val)\n\t}\n\treturn v\n}", "func Accumulate(data []float64, initValue float64, f Operation) float64 {\r\n\tres := initValue\r\n\tfor _, v := range data {\r\n\t\tres = f(res, v)\r\n\t}\r\n\treturn res\r\n}" ]
[ "0.5405258", "0.5152699", "0.5114688", "0.5112069", "0.5109156", "0.5105612", "0.5098415", "0.5061247", "0.49656245", "0.4962712", "0.49513575", "0.49395883", "0.49349442", "0.49143258", "0.49084", "0.4908162", "0.48922318", "0.4881153", "0.48788738", "0.48718312", "0.48426563", "0.48388946", "0.4834988", "0.48048854", "0.47958073", "0.4785432", "0.47843826", "0.47792044", "0.4777721", "0.47373146", "0.4734307", "0.47257546", "0.47247005", "0.47237834", "0.47177047", "0.47170344", "0.47169596", "0.47107905", "0.47047675", "0.47021076", "0.4695317", "0.4694505", "0.46845716", "0.46845716", "0.46845716", "0.46828023", "0.46746758", "0.46693307", "0.46632794", "0.4661937", "0.4651146", "0.46411562", "0.46396655", "0.46383554", "0.460936", "0.46075475", "0.46044418", "0.460299", "0.4601705", "0.45989534", "0.45946133", "0.4585384", "0.45848808", "0.4583469", "0.45798138", "0.45797044", "0.45790118", "0.45659983", "0.45641765", "0.45608693", "0.45544198", "0.4546411", "0.45430452", "0.45374012", "0.4534758", "0.45299232", "0.45251325", "0.45242003", "0.4523417", "0.45213106", "0.4518505", "0.45169666", "0.4498257", "0.44975382", "0.4496295", "0.44922462", "0.44921917", "0.44878438", "0.44866633", "0.44842207", "0.4480671", "0.44781762", "0.44764253", "0.44758242", "0.4467879", "0.4455684", "0.4453979", "0.44534266", "0.44517452", "0.4448306", "0.4443457" ]
0.0
-1
IsAMPCustomElement returns true if the node is an AMP custom element.
func IsAMPCustomElement(n *html.Node) bool { return n.Type == html.ElementNode && strings.HasPrefix(n.Data, "amp-") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (decl SomeDecl) IsCustom() bool {\n\t_, is := decl.Properties.(CustomProperties)\n\treturn is\n}", "func IsScriptAMPExtension(n *html.Node) bool {\n\t_, ok := AMPExtensionName(n)\n\treturn ok\n}", "func (t *Type) IsCustom() bool {\n\treturn !t.IsPrimitive() && !t.IsContainer()\n}", "func IsScriptAMPRuntime(n *html.Node) bool {\n\tif n.DataAtom != atom.Script {\n\t\treturn false\n\t}\n\tif v, ok := htmlnode.GetAttributeVal(n, \"\", \"src\"); ok {\n\t\treturn htmlnode.HasAttribute(n, \"\", \"async\") &&\n\t\t\t!IsScriptAMPExtension(n) &&\n\t\t\tstrings.HasPrefix(v, AMPCacheRootURL) &&\n\t\t\t(strings.HasSuffix(v, \"/v0.js\") ||\n\t\t\t\tstrings.HasSuffix(v, \"/v0.mjs\") ||\n\t\t\t\tstrings.HasSuffix(v, \"/amp4ads-v0.js\") ||\n\t\t\t\tstrings.HasSuffix(v, \"/amp4ads-v0.mjs\"))\n\t}\n\treturn false\n}", "func IsScriptAMPViewer(n *html.Node) bool {\n\tif n.DataAtom != atom.Script {\n\t\treturn false\n\t}\n\ta, ok := htmlnode.FindAttribute(n, \"\", \"src\")\n\treturn ok &&\n\t\t!IsScriptAMPExtension(n) &&\n\t\tstrings.HasPrefix(a.Val,\n\t\t\tAMPCacheSchemeAndHost+\"/v0/amp-viewer-integration-\") &&\n\t\tstrings.HasSuffix(a.Val, \".js\") &&\n\t\thtmlnode.HasAttribute(n, \"\", \"async\")\n}", "func AMPExtensionName(n *html.Node) (string, bool) {\n\tif n.DataAtom != atom.Script {\n\t\treturn \"\", false\n\t}\n\tfor _, attr := range n.Attr {\n\t\tfor _, k := range []string{AMPCustomElement, AMPCustomTemplate, AMPHostService} {\n\t\t\tif attr.Key == k {\n\t\t\t\treturn attr.Val, true\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\", false\n}", "func CfnCustomResource_IsCfnElement(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"aws-cdk-lib.aws_cloudformation.CfnCustomResource\",\n\t\t\"isCfnElement\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func (ts TaskSpec) HasLaunchPolicyCustom() bool {\n\tif ts.LaunchPolicy == \"\" {\n\t\treturn len(ts.Inputs) == 0\n\t}\n\treturn ts.LaunchPolicy == LaunchPolicyCustom\n}", "func (o *WorkflowCustomDataProperty) HasCustomDataTypeName() bool {\n\tif o != nil && o.CustomDataTypeName != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func isEmbed(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"embed\"\n}", "func (o *DataExportQuery) HasCustomBitlink() bool {\n\tif o != nil && o.CustomBitlink != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (tm *CustagMan) GetCustomTag(elem jq.JQuery) (ct bind.CustomTag, ok bool) {\n\tct, ok = tm.custags[strings.ToUpper(elem.Prop(\"tagName\").(string))]\n\treturn\n}", "func (o *WorkflowCustomDataProperty) HasCustomDataTypeId() bool {\n\tif o != nil && o.CustomDataTypeId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *ShortenBitlinkBodyAllOf) HasCustomBitlinks() bool {\n\tif o != nil && o.CustomBitlinks != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *EnvironmentUsageDto) HasCustomMetrics() bool {\n\tif o != nil && o.CustomMetrics != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (a *AgentPoolProfile) IsCustomVNET() bool {\n\treturn len(a.VnetSubnetID) > 0\n}", "func ContainsCustomResources(resources []string) bool {\n\tfor _, resource := range resources {\n\t\tif IsCustomResource(resource) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (me *XsdGoPkgHasElem_CustomsInformationsequenceTxsdSourceDocumentsSequenceSalesInvoicesSequenceInvoiceSequenceLineLinesequenceTxsdSourceDocumentsSequenceSalesInvoicesSequenceInvoiceInvoicesequenceTxsdSourceDocumentsSequenceSalesInvoicesSalesInvoicessequenceTxsdSourceDocumentsSourceDocumentsschema_CustomsInformation_TCustomsInformation_) Walk() (err error) {\n\tif fn := WalkHandlers.XsdGoPkgHasElem_CustomsInformationsequenceTxsdSourceDocumentsSequenceSalesInvoicesSequenceInvoiceSequenceLineLinesequenceTxsdSourceDocumentsSequenceSalesInvoicesSequenceInvoiceInvoicesequenceTxsdSourceDocumentsSequenceSalesInvoicesSalesInvoicessequenceTxsdSourceDocumentsSourceDocumentsschema_CustomsInformation_TCustomsInformation_; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err = me.CustomsInformation.Walk(); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\treturn\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (t ResolvedPipelineRunTask) IsCustomTask() bool {\n\treturn t.CustomTask\n}", "func (p *WindowsProfile) HasCustomImage() bool {\n\treturn p.OSImage != nil && len(p.OSImage.URL) > 0\n}", "func (o KubernetesClusterDefaultNodePoolOutput) CustomCaTrustEnabled() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v KubernetesClusterDefaultNodePool) *bool { return v.CustomCaTrustEnabled }).(pulumi.BoolPtrOutput)\n}", "func (o KubernetesClusterDefaultNodePoolPtrOutput) CustomCaTrustEnabled() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *KubernetesClusterDefaultNodePool) *bool {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.CustomCaTrustEnabled\n\t}).(pulumi.BoolPtrOutput)\n}", "func (p *VnetProfile) IsCustomVNET() bool {\n\treturn len(p.VnetResourceGroup) > 0 && len(p.VnetName) > 0 && len(p.SubnetName) > 0\n}", "func (a *Assertion) HasCustomNamespaceTemplate(targetCluster, templateRef, template string) *Assertion {\n\terr := a.loadUaAssertion()\n\trequire.NoError(a.t, err)\n\tfor _, ua := range a.masterUserRecord.Spec.UserAccounts {\n\t\tif ua.TargetCluster == targetCluster {\n\t\t\tfor _, ns := range ua.Spec.NSTemplateSet.Namespaces {\n\t\t\t\tif ns.TemplateRef == templateRef {\n\t\t\t\t\tassert.Equal(a.t, template, ns.Template)\n\t\t\t\t\treturn a\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\ta.t.Fatalf(\"no match for the given target cluster '%s' and templateRef '%s'\", targetCluster, templateRef)\n\treturn a\n}", "func (ref *UIElement) AMPMField() *UIElement {\n\tret, _ := ref.UIElementAttr(AMPMFieldAttribute)\n\treturn ret\n}", "func (o *CustomfieldRequest) HasCustomfield() bool {\n\tif o != nil && o.Customfield != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *NotificationConfig) HasCustomMessage() bool {\n\tif o != nil && o.CustomMessage != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TxsdPresentationAttributesTextContentElementsUnicodeBidi) IsEmbed() bool {\n\treturn me.String() == \"embed\"\n}", "func IsACMEPolicyLevel(ctx context.Context) bool {\n\treturn isPolicyLevel(ctx, acmePolicyLevel)\n}", "func (me TxsdAddressSimpleContentExtensionCategory) IsAtm() bool { return me.String() == \"atm\" }", "func (j *DSRocketchat) CustomEnrich() bool {\n\treturn false\n}", "func CustomType(t Type) bool {\n\tif _, err := url.ParseRequestURI(string(t)); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func hasCustomMarshaler(t reflect.Type) bool {\n\tt = reflect.PtrTo(t)\n\treturn t.Implements(reflect.TypeOf((*easyjson.Marshaler)(nil)).Elem()) ||\n\t\tt.Implements(reflect.TypeOf((*json.Marshaler)(nil)).Elem()) ||\n\t\tt.Implements(reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem())\n}", "func (me TArtIdTypeUnion4) IsPmc() bool { return me.String() == \"pmc\" }", "func (o *CustomfieldCustomFieldsResponse) HasCustomfields() bool {\n\tif o != nil && o.Customfields != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TAttlistOtherAbstractType) IsAamc() bool { return me.String() == \"AAMC\" }", "func (o *AlertingEventTypeFilter) HasCustomEventFilter() bool {\n\tif o != nil && o.CustomEventFilter != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TxsdAddressSimpleContentExtensionCategory) IsAsn() bool { return me.String() == \"asn\" }", "func (o *ViewCustomFieldTask) HasCustomfield() bool {\n\tif o != nil && o.Customfield != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsScriptRenderDelaying(n *html.Node) bool {\n\tif n.DataAtom != atom.Script {\n\t\treturn false\n\t}\n\tif IsScriptAMPViewer(n) {\n\t\treturn true\n\t}\n\tif v, ok := htmlnode.GetAttributeVal(n, \"\", AMPCustomElement); ok {\n\t\t// TODO(b/77581738): Remove amp-story from this list.\n\t\treturn (v == AMPDynamicCSSClasses ||\n\t\t\tv == AMPExperiment ||\n\t\t\tv == AMPStory)\n\t}\n\treturn false\n}", "func isAudio(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"audio\"\n}", "func (me *XsdGoPkgHasElem_CustomsDetailssequenceTxsdProductProductschema_CustomsDetails_TCustomsDetails_) Walk() (err error) {\n\tif fn := WalkHandlers.XsdGoPkgHasElem_CustomsDetailssequenceTxsdProductProductschema_CustomsDetails_TCustomsDetails_; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err = me.CustomsDetails.Walk(); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\treturn\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (me TxsdNodeRoleSimpleContentExtensionCategory) IsApplication() bool {\n\treturn me.String() == \"application\"\n}", "func (o *DataExportQuery) GetCustomBitlinkOk() (*string, bool) {\n\tif o == nil || o.CustomBitlink == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CustomBitlink, true\n}", "func (o *MicrosoftGraphSharedPcConfiguration) HasAccountManagerPolicy() bool {\n\tif o != nil && o.AccountManagerPolicy != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o AuthenticationPtrOutput) CustomAccount() CustomAccountPtrOutput {\n\treturn o.ApplyT(func(v *Authentication) *CustomAccount {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.CustomAccount\n\t}).(CustomAccountPtrOutput)\n}", "func (o AuthenticationPtrOutput) CustomAccount() CustomAccountPtrOutput {\n\treturn o.ApplyT(func(v *Authentication) *CustomAccount {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.CustomAccount\n\t}).(CustomAccountPtrOutput)\n}", "func (c *Config) IsCustomCloudProfile() bool {\n\t// c.ClusterDefinition is only set for new deployments\n\t// Not for upgrade/scale operations\n\treturn os.Getenv(\"CUSTOM_CLOUD_NAME\") != \"\"\n}", "func isApplet(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"applet\"\n}", "func (o AuthenticationResponsePtrOutput) CustomAccount() CustomAccountResponsePtrOutput {\n\treturn o.ApplyT(func(v *AuthenticationResponse) *CustomAccountResponse {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.CustomAccount\n\t}).(CustomAccountResponsePtrOutput)\n}", "func (p *LinuxProfile) HasCustomImage() bool {\n\treturn p.OSImage != nil && len(p.OSImage.URL) > 0\n}", "func (WasmMsgParser) ParseCustom(_ sdk.AccAddress, _ json.RawMessage) (sdk.Msg, error) {\n\treturn nil, nil\n}", "func (o *MicrosoftGraphTeamFunSettings) HasAllowCustomMemes() bool {\n\tif o != nil && o.AllowCustomMemes != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TxsdCounterSimpleContentExtensionType) IsEvent() bool { return me.String() == \"event\" }", "func IsCustomResource(resourceName string) bool {\n\t// hack: we assume anything which is not cpu/memory to be a gpu.\n\t// we are not getting anything more that a map string->limits from the user\n\treturn resourceName != ResourceNameCores && resourceName != ResourceNameMemory\n}", "func (o AccessCustomPageOutput) CustomHtml() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *AccessCustomPage) pulumi.StringPtrOutput { return v.CustomHtml }).(pulumi.StringPtrOutput)\n}", "func (me TEventType) IsHITExtended() bool { return me.String() == \"HITExtended\" }", "func (me TxsdFeCompositeTypeOperator) IsAtop() bool { return me.String() == \"atop\" }", "func CfnApp_IsCfnElement(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_opsworks.CfnApp\",\n\t\t\"isCfnElement\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func (n Noun) IsAtom() bool { return n.atom != nil }", "func (mr *MockToggleMockRecorder) IsCustom() *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"IsCustom\", reflect.TypeOf((*MockToggle)(nil).IsCustom))\n}", "func (typ *Type) Embed(n string) bool {\n\t_, ok := typ.embedIdx[n]\n\treturn ok\n}", "func (me TxsdCounterSimpleContentExtensionType) IsAlert() bool { return me.String() == \"alert\" }", "func (me TxsdCounterSimpleContentExtensionType) IsOrganization() bool {\n\treturn me.String() == \"organization\"\n}", "func CfnApplication_IsCfnElement(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_appconfig.CfnApplication\",\n\t\t\"isCfnElement\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func (me *XsdGoPkgHasElems_CustomsInformationsequenceLinesequenceWorkDocumentsequenceWorkingDocumentssequenceSourceDocumentsschema_CustomsInformation_TCustomsInformation_) Walk() (err error) {\n\tif fn := WalkHandlers.XsdGoPkgHasElems_CustomsInformationsequenceLinesequenceWorkDocumentsequenceWorkingDocumentssequenceSourceDocumentsschema_CustomsInformation_TCustomsInformation_; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tfor _, x := range me.CustomsInformations {\n\t\t\tif err = x.Walk(); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (o AuthenticationOutput) CustomAccount() CustomAccountPtrOutput {\n\treturn o.ApplyT(func(v Authentication) *CustomAccount { return v.CustomAccount }).(CustomAccountPtrOutput)\n}", "func (o AuthenticationOutput) CustomAccount() CustomAccountPtrOutput {\n\treturn o.ApplyT(func(v Authentication) *CustomAccount { return v.CustomAccount }).(CustomAccountPtrOutput)\n}", "func CfnApplication_IsCfnElement(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_codedeploy.CfnApplication\",\n\t\t\"isCfnElement\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func hasCustomUnmarshaler(t reflect.Type) bool {\n\tt = reflect.PtrTo(t)\n\treturn t.Implements(reflect.TypeOf((*easyjson.Unmarshaler)(nil)).Elem()) ||\n\t\tt.Implements(reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()) ||\n\t\tt.Implements(reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem())\n}", "func (g *generator) isCustomOp(m *descriptor.MethodDescriptorProto, info *httpInfo) bool {\n\treturn g.opts.diregapic && // Generator in DIREGAPIC mode.\n\t\tg.aux.customOp != nil && // API Defines a custom operation.\n\t\tm.GetOutputType() == g.customOpProtoName() && // Method returns the custom operation.\n\t\tinfo.verb != \"get\" && // Method is not a GET (polling methods).\n\t\tm.GetName() != \"Wait\" // Method is not a Wait (uses POST).\n}", "func (m *NodeMetrics) IsNil() bool {\n\treturn m == nil\n}", "func (me TxsdGroupingCategory) IsAm() bool { return me.String() == \"AM\" }", "func (list *ArrayList[T]) Contains(ele T) bool {\n\tfor _, o := range list.elems {\n\t\tif o == ele {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func hasMeta(elem string) bool {\n\tesc := false\n\tfor _, r := range elem {\n\t\tif esc {\n\t\t\tesc = false\n\t\t\tcontinue\n\t\t}\n\t\tswitch r {\n\t\tcase '\\\\':\n\t\t\tesc = true\n\t\tcase '*', '[', '?':\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (me TxsdAddressSimpleContentExtensionCategory) IsEMail() bool { return me.String() == \"e-mail\" }", "func (ta TouristAttraction) AsBasicCreativeWork() (BasicCreativeWork, bool) {\n\treturn nil, false\n}", "func (o *WorkflowCustomDataProperty) GetCustomDataTypeNameOk() (*string, bool) {\n\tif o == nil || o.CustomDataTypeName == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CustomDataTypeName, true\n}", "func (a *Assertion) HasCustomClusterResourcesTemplate(targetCluster, template string) *Assertion {\n\terr := a.loadUaAssertion()\n\trequire.NoError(a.t, err)\n\tfor _, ua := range a.masterUserRecord.Spec.UserAccounts {\n\t\tif ua.TargetCluster == targetCluster {\n\t\t\trequire.NotNil(a.t, ua.Spec.NSTemplateSet.ClusterResources)\n\t\t\tassert.Equal(a.t, template, ua.Spec.NSTemplateSet.ClusterResources.Template)\n\t\t\treturn a\n\t\t}\n\t}\n\ta.t.Fatalf(\"no match for the given target cluster and templateRef\")\n\treturn a\n}", "func (me TxsdType) IsExtended() bool { return me == \"extended\" }", "func (o *CustomfieldRequest) GetCustomfieldOk() (*CustomfieldCustomField, bool) {\n\tif o == nil || o.Customfield == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Customfield, true\n}", "func (twe *TxSizeEstimator) AddCustomInput(sigScriptSize int64) *TxSizeEstimator {\n\tscriptLenSerSize := int64(wire.VarIntSerializeSize(uint64(sigScriptSize)))\n\ttwe.InputSize += InputSize + scriptLenSerSize + sigScriptSize\n\ttwe.inputCount++\n\n\treturn twe\n}", "func isTrack(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"track\"\n}", "func (_Eth *EthSession) IsPeerAPublisher(a common.Address, topic string) (bool, error) {\n\treturn _Eth.Contract.IsPeerAPublisher(&_Eth.CallOpts, a, topic)\n}", "func (e *AnnotatedElement) IsElementType(et annogo.ElementType) bool {\n\tfor _, e := range e.ApplicableTypes {\n\t\tif e == et {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (me TxsdShow) IsEmbed() bool { return me == \"embed\" }", "func (obj *identifier) IsElement() bool {\n\treturn obj.element != nil\n}", "func (me TxsdTimeImpactSimpleContentExtensionMetric) IsElapsed() bool {\n\treturn me.String() == \"elapsed\"\n}", "func (me TAttlistArticlePubModel) IsElectronicECollection() bool {\n\treturn me.String() == \"Electronic-eCollection\"\n}", "func isIFrame(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"iframe\"\n}", "func (cn ComponentName) IsAddon() bool {\n\treturn cn == AddonComponentName\n}", "func ContainsNodeServiceCapability(nodeCaps []*csi.NodeServiceCapability, subCap csi.NodeServiceCapability_RPC_Type) bool {\n\tfor _, v := range nodeCaps {\n\t\tif strings.Contains(v.String(), subCap.String()) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func CfnUserProfile_IsCfnElement(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_opsworks.CfnUserProfile\",\n\t\t\"isCfnElement\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func (o *ViewCustomFieldTask) HasCustomfieldId() bool {\n\tif o != nil && o.CustomfieldId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TpubStatusInt) IsPmc() bool { return me.String() == \"pmc\" }", "func (o NetworkPeeringResponseOutput) ImportCustomRoutes() pulumi.BoolOutput {\n\treturn o.ApplyT(func(v NetworkPeeringResponse) bool { return v.ImportCustomRoutes }).(pulumi.BoolOutput)\n}", "func (me TxsdImpactSimpleContentExtensionType) IsSocialEngineering() bool {\n\treturn me.String() == \"social-engineering\"\n}", "func (_Eth *EthCaller) IsPeerAPublisher(opts *bind.CallOpts, a common.Address, topic string) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Eth.contract.Call(opts, out, \"isPeerAPublisher\", a, topic)\n\treturn *ret0, err\n}", "func (c *Tag) Own(name string) bool {\n\tif c.Name == name {\n\t\treturn true\n\t}\n\talias := c.Value(OptAlias)\n\tif util.ListIndex(alias, name) > -1 {\n\t\treturn true\n\t}\n\treturn false\n}", "func IsHtmlNode(n *html.Node, name string) bool {\n\tif n == nil {\n\t\treturn false\n\t}\n\treturn n.Type == html.ElementNode && n.Data == name\n}" ]
[ "0.61564237", "0.6037972", "0.57607794", "0.5394972", "0.5383893", "0.5306295", "0.5250998", "0.4929128", "0.49054834", "0.4893038", "0.48832464", "0.47255784", "0.4680999", "0.4679311", "0.46730384", "0.46495634", "0.4607126", "0.45523682", "0.4546803", "0.4519753", "0.44830775", "0.44710663", "0.44604328", "0.44460517", "0.44368148", "0.4432061", "0.4350615", "0.43239453", "0.43001574", "0.42770922", "0.42725202", "0.42686775", "0.42566338", "0.42532954", "0.42252195", "0.42133614", "0.41988122", "0.41951334", "0.41932768", "0.41920763", "0.41830996", "0.41678685", "0.4163293", "0.41479692", "0.41432574", "0.4132267", "0.4132267", "0.41289333", "0.4106129", "0.40841675", "0.40826198", "0.4069419", "0.40654254", "0.4062507", "0.40426552", "0.40382573", "0.40261704", "0.40068442", "0.3998398", "0.39823464", "0.3980508", "0.39757347", "0.39670137", "0.39646804", "0.39615083", "0.39551237", "0.39526412", "0.39526412", "0.39333043", "0.3930752", "0.392644", "0.3900278", "0.38966238", "0.38943586", "0.3892268", "0.38920993", "0.38915524", "0.38911447", "0.38904983", "0.38876837", "0.3887181", "0.38829678", "0.38818797", "0.3867952", "0.38651502", "0.38640144", "0.38619438", "0.38588387", "0.38521594", "0.3852153", "0.38494107", "0.3845624", "0.38419357", "0.38410625", "0.38341922", "0.38317704", "0.38290843", "0.3828726", "0.38284162", "0.38278666" ]
0.90020335
0
AMPExtensionScriptDefinition returns a unique script definition that takes into account the extension name, version and if it is module/nomodule. Example (ampad): ampad0.1.js (regular/nomodule), ampad0.1.mjs (module). The AMP Validator prevents a mix of regular and nomodule extensions. If the pattern is not found then uses value of "src" attribute. Returns ok=false if this isn't an extension.
func AMPExtensionScriptDefinition(n *html.Node) (string, bool) { if n.DataAtom != atom.Script { return "", false } src, hasSrc := htmlnode.GetAttributeVal(n, "", "src") if hasSrc { m := srcURLRE.FindStringSubmatch(src) if len(m) < 2 { return src, true } return m[1], true } return "", false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IsScriptAMPExtension(n *html.Node) bool {\n\t_, ok := AMPExtensionName(n)\n\treturn ok\n}", "func AMPExtensionName(n *html.Node) (string, bool) {\n\tif n.DataAtom != atom.Script {\n\t\treturn \"\", false\n\t}\n\tfor _, attr := range n.Attr {\n\t\tfor _, k := range []string{AMPCustomElement, AMPCustomTemplate, AMPHostService} {\n\t\t\tif attr.Key == k {\n\t\t\t\treturn attr.Val, true\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\", false\n}", "func IsScriptAMPRuntime(n *html.Node) bool {\n\tif n.DataAtom != atom.Script {\n\t\treturn false\n\t}\n\tif v, ok := htmlnode.GetAttributeVal(n, \"\", \"src\"); ok {\n\t\treturn htmlnode.HasAttribute(n, \"\", \"async\") &&\n\t\t\t!IsScriptAMPExtension(n) &&\n\t\t\tstrings.HasPrefix(v, AMPCacheRootURL) &&\n\t\t\t(strings.HasSuffix(v, \"/v0.js\") ||\n\t\t\t\tstrings.HasSuffix(v, \"/v0.mjs\") ||\n\t\t\t\tstrings.HasSuffix(v, \"/amp4ads-v0.js\") ||\n\t\t\t\tstrings.HasSuffix(v, \"/amp4ads-v0.mjs\"))\n\t}\n\treturn false\n}", "func (*Matcher) IsSupportedManifestFormat(filename string) bool {\n\tlog.Debug().Msgf(\"Executing: IsSupportedManifestFormat\")\n\tbasename := filepath.Base(filename)\n\tmatch, _ := regexp.MatchString(\"pom.xml$\", basename)\n\tlog.Debug().Bool(\"regex\", match).Str(\"path\", filename).Msg(\"IsSupportedManifest\")\n\treturn match\n}", "func isValidExtension(ext string, extensions []string) bool {\n\tfor _, ex := range extensions {\n\t\tif ex == ext {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func extensionPattern(pattern *regexp.Regexp) *regexp.Regexp {\n\treturn suffixPattern(regexp.MustCompile(\"(^|/)[^/]+.\" + pattern.String()))\n}", "func registerScript(n *html.Node, hn *headNodes) {\n\tif amphtml.IsScriptAMPRuntime(n) {\n\t\thn.scriptAMPRuntime = append(hn.scriptAMPRuntime, n)\n\t\treturn\n\t}\n\tif amphtml.IsScriptAMPViewer(n) {\n\t\thn.scriptAMPViewer = n\n\t\treturn\n\t}\n\tif amphtml.IsScriptAMPExtension(n) {\n\t\tif amphtml.IsScriptRenderDelaying(n) {\n\t\t\thn.scriptRenderDelaying = append(hn.scriptRenderDelaying, n)\n\t\t\treturn\n\t\t}\n\t\thn.scriptNonRenderDelaying = append(hn.scriptNonRenderDelaying, n)\n\t\treturn\n\t}\n\thn.other = append(hn.other, n)\n}", "func IsScriptAMPViewer(n *html.Node) bool {\n\tif n.DataAtom != atom.Script {\n\t\treturn false\n\t}\n\ta, ok := htmlnode.FindAttribute(n, \"\", \"src\")\n\treturn ok &&\n\t\t!IsScriptAMPExtension(n) &&\n\t\tstrings.HasPrefix(a.Val,\n\t\t\tAMPCacheSchemeAndHost+\"/v0/amp-viewer-integration-\") &&\n\t\tstrings.HasSuffix(a.Val, \".js\") &&\n\t\thtmlnode.HasAttribute(n, \"\", \"async\")\n}", "func (lm LinksManager) AutoAssignExtension(url *url.URL, t resource.Type) bool {\n\treturn true\n}", "func IsManifest(filename string) (bool, error) {\n\tmatched, err := filepath.Match(machineConfigFileNamePattern, filename)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn matched, nil\n}", "func (m *Media) IsValid() bool {\n if ext := filepath.Ext(m.FullPath); len(ext) > 0 {\n for _, pattern := range extpatterns {\n match, err := filepath.Match(\".\"+pattern, ext)\n if err != nil {\n fmt.Println(\"malfoemd pattern?\")\n return false\n }\n if match {\n return true\n }\n }\n }\n\n return false\n}", "func MatchExt(exts ...string) MatcherFunc { return MatchExts(exts) }", "func (l *Language) MatchFilename(name string) bool {\n\t// If you adjust this implementation, remember to update CompileByFilename\n\tfor _, n := range l.Filenames {\n\t\tif name == n {\n\t\t\treturn true\n\t\t}\n\t}\n\tif ext := path.Ext(name); ext != \"\" {\n\t\tfor _, x := range l.Extensions {\n\t\t\tif strings.EqualFold(ext, x) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func NewExtensionManifest(raw *string) graphqlbackend.ExtensionManifest {\n\tif raw == nil {\n\t\treturn nil\n\t}\n\treturn &extensionManifest{raw: *raw}\n}", "func (m *modulePat) match(file string) bool {\n\tif m.literal {\n\t\treturn file == m.pattern\n\t}\n\tmatch, _ := filepath.Match(m.pattern, file)\n\treturn match\n}", "func HasAdaptationFieldExtension(pkt *packet.Packet) bool {\n\treturn pkt[5]&0x01 != 0\n}", "func IsScriptRenderDelaying(n *html.Node) bool {\n\tif n.DataAtom != atom.Script {\n\t\treturn false\n\t}\n\tif IsScriptAMPViewer(n) {\n\t\treturn true\n\t}\n\tif v, ok := htmlnode.GetAttributeVal(n, \"\", AMPCustomElement); ok {\n\t\t// TODO(b/77581738): Remove amp-story from this list.\n\t\treturn (v == AMPDynamicCSSClasses ||\n\t\t\tv == AMPExperiment ||\n\t\t\tv == AMPStory)\n\t}\n\treturn false\n}", "func GetExtension(uri string) (extension int) {\n\textension = STRING\n\tif strings.HasSuffix(uri, \"json\") {\n\t\textension = JSON\n\t}\n\n\tif strings.HasSuffix(uri, \"yaml\") || strings.HasSuffix(uri, \"yml\") {\n\t\textension = YAML\n\t}\n\n\tif strings.HasSuffix(uri, \"xml\") {\n\t\textension = XML\n\t}\n\treturn\n}", "func IsExtFormatValid(ext string) bool {\n\tif string(ext[0]) != \".\" {\n\t\treturn false\n\t}\n\n\tfor _, letter := range ext[1:] {\n\t\tif !unicode.IsLetter(rune(letter)) && !unicode.IsDigit(rune(letter)) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func HasTemplateExt(paths string) bool {\n\tfor _, v := range beeTemplateExt {\n\t\tif strings.HasSuffix(paths, \".\"+v) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (me TxsdRecordPatternSimpleContentExtensionType) IsRegex() bool { return me.String() == \"regex\" }", "func RegisterExtensionModule(nsURL string, m Module) {\n\tns := strings.TrimPrefix(strings.TrimPrefix(nsURL, \"http://\"), \"https://\")\n\tb := modules[:0]\n\tfor _, v := range modules {\n\t\tif lookupModule(ns) == nil {\n\t\t\tb = append(b, v)\n\t\t}\n\t}\n\tb = append(b, module{ns, m})\n\tmodules = b\n}", "func ValidFileExtension(fp string, ext string) bool {\n\tif filepath.Ext(fp) != ext {\n\t\treturn false\n\t}\n\treturn true\n}", "func FormatFromExt(path string, mapping map[string]string) (string, string) {\n\tbase := strings.Trim(filepath.Ext(path), \".\")\n\tkind := getFormat(\".\" + base)\n\n\tif format, found := mapping[base]; found {\n\t\tif kind == \"code\" {\n\t\t\t// NOTE: This is a special case of embedded markup within code.\n\t\t\treturn format, \"fragment\"\n\t\t}\n\t\tbase = format\n\t}\n\n\tbase = \".\" + base\n\tfor r, f := range FormatByExtension {\n\t\tm, _ := regexp.MatchString(r, base)\n\t\tif m {\n\t\t\treturn f[0], f[1]\n\t\t}\n\t}\n\n\treturn \"unknown\", \"unknown\"\n}", "func HasTemplateExt(paths string) bool {\n\tfor _, v := range templateExt {\n\t\tif strings.HasSuffix(paths, \".\"+v) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (f *FileSpec) Match(input string) bool {\n\n\tfext := strings.ToLower(filepath.Ext(input))\n\n\tfor i := 0; i < len(f.Extensions); i++ {\n\n\t\tif f.Extensions[i] == fext {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func HasExtension(baseFileName string, extensions []string) bool {\n\n\tvar lowerExtension = filepath.Ext(baseFileName)\n\tfor _, extension := range extensions {\n\t\tif lowerExtension == strings.ToLower(extension) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (c *Client) PackageExtension(ctx context.Context, opts PackageBuildpackOptions) error {\n\tif opts.Format == \"\" {\n\t\topts.Format = FormatImage\n\t}\n\n\tif opts.Config.Platform.OS == \"windows\" && !c.experimental {\n\t\treturn NewExperimentError(\"Windows extensionpackage support is currently experimental.\")\n\t}\n\n\terr := c.validateOSPlatform(ctx, opts.Config.Platform.OS, opts.Publish, opts.Format)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twriterFactory, err := layer.NewWriterFactory(opts.Config.Platform.OS)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"creating layer writer factory\")\n\t}\n\n\tpackageBuilder := buildpack.NewBuilder(c.imageFactory)\n\n\texURI := opts.Config.Extension.URI\n\tif exURI == \"\" {\n\t\treturn errors.New(\"extension URI must be provided\")\n\t}\n\n\tmainBlob, err := c.downloadBuildpackFromURI(ctx, exURI, opts.RelativeBaseDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tex, err := buildpack.FromExtensionRootBlob(mainBlob, writerFactory)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"creating extension from %s\", style.Symbol(exURI))\n\t}\n\n\tpackageBuilder.SetExtension(ex)\n\n\tswitch opts.Format {\n\tcase FormatFile:\n\t\treturn packageBuilder.SaveAsFile(opts.Name, opts.Config.Platform.OS)\n\tcase FormatImage:\n\t\t_, err = packageBuilder.SaveAsImage(opts.Name, opts.Publish, opts.Config.Platform.OS)\n\t\treturn errors.Wrapf(err, \"saving image\")\n\tdefault:\n\t\treturn errors.Errorf(\"unknown format: %s\", style.Symbol(opts.Format))\n\t}\n}", "func isSupportedType(fileName string) bool {\n\tparts := strings.Split(fileName, \".\")\n\textension := parts[len(parts)-1]\n\tsupported := false\n\tif len(parts) > 1 && len(extension) > 0 {\n\t\tfor _, el := range supportedExtensions {\n\t\t\tif extension == el {\n\t\t\t\tsupported = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn supported\n}", "func PlaceholderExtension() gval.Language {\n\treturn placeholderExtension\n}", "func ScriptHasSuffix(v string) predicate.AllocationStrategy {\n\treturn predicate.AllocationStrategy(func(s *sql.Selector) {\n\t\ts.Where(sql.HasSuffix(s.C(FieldScript), v))\n\t})\n}", "func (me TxsdImpactSimpleContentExtensionType) IsExtValue() bool { return me.String() == \"ext-value\" }", "func packageHasVersionSuffix(pkg string) bool {\n\tif pkg == \"\" {\n\t\treturn false\n\t}\n\tparts := strings.Split(pkg, \".\")\n\tif len(parts) < 2 {\n\t\treturn false\n\t}\n\tlastPart := parts[len(parts)-1]\n\tif len(lastPart) < 2 {\n\t\treturn false\n\t}\n\tif lastPart[0] != 'v' {\n\t\treturn false\n\t}\n\tversion := lastPart[1:]\n\tif strings.Contains(version, \"test\") {\n\t\tsplit := strings.SplitN(version, \"test\", 2)\n\t\tif len(split) != 2 {\n\t\t\treturn false\n\t\t}\n\t\treturn stringIsPositiveNumber(split[0])\n\t}\n\tif strings.Contains(version, \"alpha\") {\n\t\treturn packageVersionIsValidAlphaOrBeta(version, \"alpha\")\n\t}\n\tif strings.Contains(version, \"beta\") {\n\t\treturn packageVersionIsValidAlphaOrBeta(version, \"beta\")\n\t}\n\treturn stringIsPositiveNumber(version)\n}", "func (c *Client) Extension(ext string) (bool, string) {\n\tif err := c.hello(); err != nil {\n\t\treturn false, \"\"\n\t}\n\tif c.ext == nil {\n\t\treturn false, \"\"\n\t}\n\text = strings.ToUpper(ext)\n\tparam, ok := c.ext[ext]\n\treturn ok, param\n}", "func (artifact *Artifact) extension() string {\n\tconst unknown = \"\"\n\ti := strings.Index(artifact.File, \".\")\n\n\tif i != -1 {\n\t\ti = 1 + i\n\t\treturn artifact.File[i:]\n\t}\n\treturn unknown\n}", "func HasExt(fname, suffix string) bool {\n\treturn strings.HasSuffix(fname, \".\"+suffix) && (!strings.HasPrefix(fname, \".\") || strings.HasPrefix(fname, \"_\"))\n}", "func FileNameIsGlob(pattern string) bool {\n\t_, err := regexp.Compile(pattern)\n\treturn err == nil\n}", "func Extension() string {\n\treturn getRandValue([]string{\"file\", \"extension\"})\n}", "func (m *ExtensionMetadata) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateBundleInfo(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDeprecationNotice(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLink(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLinkDocs(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateProvidedServiceAPIs(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRestricted(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (p Platform) Extension() string {\n\tif p.IsWindows() {\n\t\treturn \"zip\"\n\t}\n\treturn \"tar.gz\"\n}", "func altScript(l language.Language, s language.Script) language.Script {\n\tfor _, alt := range matchScript {\n\t\t// TODO: also match cases where language is not the same.\n\t\tif (language.Language(alt.wantLang) == l || language.Language(alt.haveLang) == l) &&\n\t\t\tlanguage.Script(alt.haveScript) == s {\n\t\t\treturn language.Script(alt.wantScript)\n\t\t}\n\t}\n\treturn 0\n}", "func (me TxsdImpactSimpleContentExtensionType) IsExtortion() bool { return me.String() == \"extortion\" }", "func matchGVK(definitionName, gvk string) bool {\n\tpaths := strings.Split(definitionName, \".\")\n\n\tgvkMap := make(map[string]bool)\n\tfor _, p := range paths {\n\t\tgvkMap[p] = true\n\t}\n\n\tgroup, version, kind := parseGVK(gvk)\n\n\tok := gvkMap[kind]\n\tif !ok {\n\t\treturn false\n\t}\n\tok = gvkMap[version]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tif !groupMatches(gvkMap, group, kind) {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (me TxsdImpactSimpleContentExtensionType) IsFile() bool { return me.String() == \"file\" }", "func (t *Track) buildAudioManifestAdaptation() string {\n\tchunksDuration := int64(0)\n\tfor i:= 0; i < len(t.chunksDuration); i++ {\n\t\tchunksDuration += t.chunksDuration[i]\n\t}\n\tres := `\n <SegmentTemplate\n timescale=\"` + strconv.Itoa(t.timescale) + `\"\n initialization=\"init_$RepresentationID$.mp4\"\n media=\"chunk_$RepresentationID$_$Time$.mp4\"\n startNumber=\"1\">\n <SegmentTimeline>`\n\t/* Build each chunk entry */\n\tfor i, duration := range t.chunksDuration {\n\t\tif i == 0 {\n\t\t\tres += `\n <S t=\"` + strconv.FormatInt(t.currentDuration - chunksDuration, 10) + `\" d=\"` + strconv.FormatInt(duration, 10) + `\" />`\n\t\t} else {\n\t\t\tres += `\n <S d=\"` + strconv.FormatInt(duration, 10) + `\" />`\n\t\t}\n\t}\n\tres += `\n </SegmentTimeline>\n </SegmentTemplate>`\n\treturn res\n}", "func (me TxsdCounterSimpleContentExtensionType) IsExtValue() bool { return me.String() == \"ext-value\" }", "func IsHandler(name string, lang string) bool {\n\tbasename := filepath.Base(name)\n\tnameWithoutExt := strings.TrimSuffix(basename, filepath.Ext(basename))\n\tif constants.ExtLangMapping[filepath.Ext(basename)] != lang {\n\t\treturn false\n\t}\n\n\treturn (nameWithoutExt == \"fx\" ||\n\t\t// Fx is for Java\n\t\tnameWithoutExt == \"Fx\" ||\n\t\t// mod.rs is for Rust)\n\t\tnameWithoutExt == \"mod\")\n}", "func ExtensionByType(typ string) string {\n\tmimeLock.RLock()\n\textension := mimeExtensions[typ]\n\tmimeLock.RUnlock()\n\treturn extension\n}", "func getScript(name string) (script, bool) {\n\tscript, ok := scriptRegistry[name]\n\treturn script, ok\n}", "func AvMatchExt(f, e string) int {\n\tcf := C.CString(f)\n\tdefer C.free(unsafe.Pointer(cf))\n\tce := C.CString(e)\n\tdefer C.free(unsafe.Pointer(ce))\n\treturn int(C.av_match_ext(cf, ce))\n}", "func (this *RTPPacket) HasExtension() bool {\n\treturn this.header.extension != 0\n}", "func getExtension(name string) string {\n\tname = strings.ToLower(name)\n\textension, ok := extensionCache.Load(name)\n\n\tif ok {\n\t\treturn extension.(string)\n\t}\n\n\text := filepath.Ext(name)\n\n\tif ext == \"\" || strings.LastIndex(name, \".\") == 0 {\n\t\textension = name\n\t} else {\n\t\t// Handling multiple dots or multiple extensions only needs to delete the last extension\n\t\t// and then call filepath.Ext.\n\t\t// If there are multiple extensions, it is the value of subExt,\n\t\t// otherwise subExt is an empty string.\n\t\tsubExt := filepath.Ext(strings.TrimSuffix(name, ext))\n\t\textension = strings.TrimPrefix(subExt+ext, \".\")\n\t}\n\n\textensionCache.Store(name, extension)\n\treturn extension.(string)\n}", "func RequireScriptURL(url string, priority int) View {\n\treturn RenderView(\n\t\tfunc(ctx *Context) error {\n\t\t\tctx.Response.RequireScriptURL(url, priority)\n\t\t\treturn nil\n\t\t},\n\t)\n}", "func isGoMod(uri span.URI) bool {\n\treturn filepath.Base(uri.Filename()) == \"go.mod\"\n}", "func (me TdurationType) IsExtValue() bool { return me.String() == \"ext-value\" }", "func isMetadataFile(filename string) bool {\n\treturn metadataMatch.MatchString(filename)\n}", "func AvMatchExt(filename, extensions string) int {\n\tCfilename := C.CString(filename)\n\tdefer C.free(unsafe.Pointer(Cfilename))\n\n\tCextensions := C.CString(extensions)\n\tdefer C.free(unsafe.Pointer(Cextensions))\n\n\treturn int(C.av_match_ext(Cfilename, Cextensions))\n}", "func (d *Device) HasExtension(extension string) bool {\n\tfor _, v := range d.Extensions {\n\t\tif v == extension {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (t *Track) buildVideoManifestAdaptation() string {\n\tchunksDuration := int64(0)\n\tfor i:= 0; i < len(t.chunksDuration); i++ {\n\t\tchunksDuration += t.chunksDuration[i]\n\t}\n\tres := `\n <SegmentTemplate\n timescale=\"` + strconv.Itoa(t.timescale) + `\"\n initialization=\"init_$RepresentationID$.mp4\"\n media=\"chunk_$RepresentationID$_$Time$.mp4\"\n startNumber=\"1\">\n <SegmentTimeline>`\n\t/* Build each chunk entry */\n\tfor i, duration := range t.chunksDuration {\n\t\tif i == 0 {\n\t\t\tres += `\n <S t=\"` + strconv.FormatInt(t.currentDuration - chunksDuration, 10) + `\" d=\"` + strconv.FormatInt(duration, 10) + `\" />`\n\t\t} else {\n\t\t\tres += `\n <S d=\"` + strconv.FormatInt(duration, 10) + `\" />`\n\t\t}\n\t}\n\tres += `\n </SegmentTimeline>\n </SegmentTemplate>`\n\treturn res\n}", "func (d *Deployment) HMACPresent(version string) bool {\n\n\t// Generate the filename, and check whether file already exists.\n\t_, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)\n\treturn exists\n}", "func maybeAppendFormattedExtension(\n\tnumber *PhoneNumber,\n\tmetadata *PhoneMetadata,\n\tnumberFormat PhoneNumberFormat,\n\tformattedNumber *Builder) {\n\n\textension := number.GetExtension()\n\tif len(extension) == 0 {\n\t\treturn\n\t}\n\n\tprefExtn := metadata.GetPreferredExtnPrefix()\n\tif numberFormat == RFC3966 {\n\t\tformattedNumber.WriteString(RFC3966_EXTN_PREFIX)\n\t} else if len(prefExtn) > 0 {\n\t\tformattedNumber.WriteString(prefExtn)\n\t} else {\n\t\tformattedNumber.WriteString(DEFAULT_EXTN_PREFIX)\n\t}\n\tformattedNumber.WriteString(extension)\n}", "func isAllowedFileType(mimeType string) bool {\n\t_, exists := validFileTypes[mimeType]\n\n\treturn exists\n}", "func TestFormats25to26Minversion(t *testing.T) {\n\tts := newTestSwupd(t, \"format25to26minversion\")\n\tdefer ts.cleanup()\n\n\tts.Bundles = []string{\"test-bundle\"}\n\n\t// format25 MoM should NOT have minversion in header, which is introduced\n\t// in format26. (It should also not have it because minversion is set to 0)\n\tts.Format = 25\n\tts.addFile(10, \"test-bundle\", \"/foo\", \"content\")\n\tts.createManifests(10)\n\n\texpSubs := []string{\n\t\t\"MANIFEST\\t25\",\n\t\t\"version:\\t10\",\n\t\t\"previous:\\t0\",\n\t\t\"filecount:\\t2\",\n\t\t\"timestamp:\\t\",\n\t\t\"contentsize:\\t\",\n\t\t\"includes:\\tos-core\",\n\t\t\"10\\t/foo\",\n\t\t\"10\\t/usr/share\",\n\t}\n\tcheckManifestContains(t, ts.Dir, \"10\", \"test-bundle\", expSubs...)\n\n\tnExpSubs := []string{\n\t\t\"\\t0\\t/foo\",\n\t\t\".d..\\t\",\n\t\t\"minversion:\\t\",\n\t}\n\tcheckManifestNotContains(t, ts.Dir, \"10\", \"test-bundle\", nExpSubs...)\n\n\t// minversion now set to 20, but the MoM should still NOT have minversion\n\t// in header due to format25 being used\n\tts.MinVersion = 20\n\tts.addFile(20, \"test-bundle\", \"/foo\", \"new content\")\n\tts.createManifests(20)\n\n\texpSubs = []string{\n\t\t\"MANIFEST\\t25\",\n\t\t\"version:\\t20\",\n\t\t\"previous:\\t10\",\n\t\t\"filecount:\\t2\",\n\t\t\"includes:\\tos-core\",\n\t\t\"20\\t/foo\",\n\t}\n\tcheckManifestContains(t, ts.Dir, \"20\", \"test-bundle\", expSubs...)\n\tcheckManifestNotContains(t, ts.Dir, \"20\", \"MoM\", \"minversion:\\t\")\n\n\t// updated to format26, minversion still set to 20, so we should see\n\t// minversion header in the MoM\n\tts.Format = 26\n\tts.addFile(30, \"test-bundle\", \"/foo\", \"even newer content\")\n\tts.createManifests(30)\n\texpSubs = []string{\n\t\t\"MANIFEST\\t26\",\n\t\t\"version:\\t30\",\n\t\t\"previous:\\t20\",\n\t\t\"filecount:\\t2\",\n\t\t\"includes:\\tos-core\",\n\t}\n\tcheckManifestContains(t, ts.Dir, \"30\", \"test-bundle\", expSubs...)\n\tcheckManifestContains(t, ts.Dir, \"30\", \"MoM\", \"minversion:\\t20\")\n}", "func isValidVersionFormat(version string) bool {\n\tmatch, _ := regexp.MatchString(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", version)\n\treturn match\n}", "func (ps *PlatformStrings) HasExt(ext string) bool {\n\treturn ps.firstExtFile(ext) != \"\"\n}", "func (g *Group) ScriptName() string {\n\treturn fmt.Sprintf(\"%s-%s.min.js\", g.name, g.scripthash)\n}", "func (v *parameter) Extension(key string) (interface{}, bool) {\n\te, ok := v.extensions[key]\n\treturn e, ok\n}", "func HasApiExtension(extension string) bool {\n\tif runtimeLiblxcVersionAtLeast(3, 1, 0) {\n\t\tapiExtension := C.CString(extension)\n\t\tdefer C.free(unsafe.Pointer(apiExtension))\n\t\treturn bool(C.go_lxc_has_api_extension(apiExtension))\n\t}\n\treturn false\n}", "func isScript(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"script\"\n}", "func RequireHeadScriptURL(url string, priority int) View {\n\treturn RenderView(\n\t\tfunc(ctx *Context) error {\n\t\t\tctx.Response.RequireHeadScriptURL(url, priority)\n\t\t\treturn nil\n\t\t},\n\t)\n}", "func MustLookup(extension string) (contentType string) {\n\tvar e error\n\tif contentType, e = Lookup(extension); e != nil {\n\t\tpanic(fmt.Sprintf(\"Lookup failed: %s\\n\", e))\n\t}\n\treturn contentType\n}", "func GetChromeExtensionMatchSet() MatchSet {\n\treturn MatchSet{\n\t\t[]string{\"http\", \"https\", \"file\", \"ftp\", \"urn\"},\n\t\t[]string{\"http\", \"https\"},\n\t}\n}", "func ExecutableExtension(platform string) string {\n\tswitch platform {\n\tcase \"windows\":\n\t\treturn \".exe\"\n\tdefault:\n\t\treturn \"\"\n\t}\n}", "func (suite *testSuite) TestGetRuntimeNameFromBuildPathFailsOnUnknownExtension() {\n\tsuite.builder.options.FunctionConfig.Spec.Runtime = \"\"\n\tsuite.builder.options.FunctionConfig.Spec.Build.Path = \"/foo.bar\"\n\t_, err := suite.builder.getRuntimeName()\n\n\tsuite.Require().Error(err, \"Unsupported file extension: %s\", \"bar\")\n}", "func checkExtOfOutputFile(fileName string) error {\n\n\tfileNameArray := strings.Split(fileName, \".\")\n\text := fileNameArray[len(fileNameArray)-1]\n\n\tif ext != \"zip\" {\n\t\treturn errors.New(\"You must use a valid extension ! Ex : something.zip\")\n\t}\n\n\treturn nil\n}", "func (cfg *Config) Spec() (*runtime.RawExtension, error) {\n\text := &runtime.RawExtension{}\n\tb, err := json.Marshal(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\text.Raw = b\n\treturn ext, nil\n}", "func (d *Decoder) Extension() decryptor.Extension {\n\treturn \"mp4\"\n}", "func (t *Templates) check(ext string) bool {\n\tif len(t.Extensions) == 0 {\n\t\treturn true\n\t}\n\n\tfor x := range t.Extensions {\n\t\tif ext == x {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func isMetadataFile(filename string) bool {\n\treturn strings.HasSuffix(filename, metaFileExt)\n}", "func nameMatch(path, pattern string) (bool, error) {\n\tbase := filepath.Base(path)\n\n\treturn filepath.Match(pattern, base)\n}", "func CreateSchemaExtensionFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {\n return NewSchemaExtension(), nil\n}", "func (l *Language) primaryMatchFilename(name string) bool {\n\tfor _, n := range l.Filenames {\n\t\tif name == n {\n\t\t\treturn true\n\t\t}\n\t}\n\tif ext := path.Ext(name); ext != \"\" && len(l.Extensions) > 0 {\n\t\treturn strings.EqualFold(l.Extensions[0], ext)\n\t}\n\treturn false\n}", "func (p *CertProfile) IsAllowedExtention(oid csr.OID) bool {\n\tfor _, allowed := range p.AllowedExtensions {\n\t\tif allowed.Equal(oid) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func checkForNonceExtension(exts []pkix.Extension) *pkix.Extension {\n\tnonce_oid := asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 2}\n\tfor _, ext := range exts {\n\t\tif ext.Id.Equal(nonce_oid) {\n\t\t\treturn &ext\n\t\t}\n\t}\n\treturn nil\n}", "func TestDefinition(t *testing.T) {\n\tt.Parallel()\n\n\ttree := writeTree(t, `\n-- go.mod --\nmodule example.com\ngo 1.18\n\n-- a.go --\npackage a\nimport \"fmt\"\nfunc f() {\n\tfmt.Println()\n}\nfunc g() {\n\tf()\n}\n`)\n\t// missing position\n\t{\n\t\tres := gopls(t, tree, \"definition\")\n\t\tres.checkExit(false)\n\t\tres.checkStderr(\"expects 1 argument\")\n\t}\n\t// intra-package\n\t{\n\t\tres := gopls(t, tree, \"definition\", \"a.go:7:2\") // \"f()\"\n\t\tres.checkExit(true)\n\t\tres.checkStdout(\"a.go:3:6-7: defined here as func f\")\n\t}\n\t// cross-package\n\t{\n\t\tres := gopls(t, tree, \"definition\", \"a.go:4:7\") // \"Println\"\n\t\tres.checkExit(true)\n\t\tres.checkStdout(\"print.go.* defined here as func fmt.Println\")\n\t\tres.checkStdout(\"Println formats using the default formats for its operands\")\n\t}\n\t// -json and -markdown\n\t{\n\t\tres := gopls(t, tree, \"definition\", \"-json\", \"-markdown\", \"a.go:4:7\")\n\t\tres.checkExit(true)\n\t\tvar defn cmd.Definition\n\t\tif res.toJSON(&defn) {\n\t\t\tif !strings.HasPrefix(defn.Description, \"```go\\nfunc fmt.Println\") {\n\t\t\t\tt.Errorf(\"Description does not start with markdown code block. Got: %s\", defn.Description)\n\t\t\t}\n\t\t}\n\t}\n}", "func (me TxsdRecordPatternSimpleContentExtensionType) IsExtValue() bool {\n\treturn me.String() == \"ext-value\"\n}", "func looksLikeFilename(s string) bool {\n\tif len(strings.Fields(s)) > 1 {\n\t\treturn false\n\t}\n\treturn PatFilenameLike.MatchString(s)\n}", "func isValidVersion(version string) bool {\n\tif len(version) == 0 {\n\t\treturn true\n\t}\n\n\tisOk, _ := regexp.MatchString(\"^[1-9]\\\\.([8-9]\\\\d*|[1-9]\\\\d+)|^[1-9]\\\\d+\\\\.|^[2-9]\\\\.\", version)\n\treturn isOk\n}", "func (a ImageRegistryAnonymizer) GetExtension() string {\n\treturn \"json\"\n}", "func MatchExts(exts []string) MatcherFunc {\n\treturn func(el Elem) bool {\n\t\telExt := path.Ext(el.Name())\n\t\tfor _, ext := range exts {\n\t\t\tif ext == elExt {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n}", "func IsSuffixSupported(value string) bool {\n\tfor _, ext := range supportedExtensions {\n\t\tif strings.HasSuffix(value, ext) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func MimeTypeByExtension(extension string) string {\n\tvar m string\n\n\tif !strings.HasPrefix(extension, \".\") {\n\t\textension = \".\" + extension\n\t}\n\tif m = mime.TypeByExtension(extension); m != \"\" {\n\t\treturn m\n\t}\n\n\treturn mimeType[strings.TrimLeft(extension, \".\")]\n}", "func TestModuleName(t *testing.T) {\n\tmodule := \"telegram\"\n\twant := regexp.MustCompile(Name)\n\tif !want.MatchString(Name) {\n\t\tt.Fatalf(`The module name does not match: %q != %q`, module, want)\n\t}\n}", "func IsValidModuleIdentifier(id ModuleIdentifier) bool {\n\treturn id >= 'a' && id <= 'z'\n}", "func checkForNonceExtension(exts []pkix.Extension) *pkix.Extension {\n\tnonce_oid := asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 2}\n\tfor _, ext := range exts {\n\t\tif ext.Id.Equal(nonce_oid) {\n\t\t\tlog.Println(\"Detected nonce extension\")\n\t\t\treturn &ext\n\t\t}\n\t}\n\treturn nil\n}", "func isImage(extension string) bool {\n\tfor _, v := range imageTypes {\n\t\tif strings.Compare(v, extension) == 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func NewPolicyDefinitionAtManagementGroup(ctx *pulumi.Context,\n\tname string, args *PolicyDefinitionAtManagementGroupArgs, opts ...pulumi.ResourceOption) (*PolicyDefinitionAtManagementGroup, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ManagementGroupId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ManagementGroupId'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20190101:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20161201:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20161201:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20180301:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20180301:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20180501:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20180501:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20190601:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20190601:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20190901:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20190901:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20200301:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20200301:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20200901:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20200901:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource PolicyDefinitionAtManagementGroup\n\terr := ctx.RegisterResource(\"azure-native:authorization/v20190101:PolicyDefinitionAtManagementGroup\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (t *Track) buildAudioManifestRepresentation() string {\n\tres := `\n <Representation\n id=\"audio` + strconv.Itoa(t.index) + `\"\n bandwidth=\"` + strconv.Itoa(t.bandwidth) + `\"\n codecs=\"` + t.codec + `\"\n audioSamplingRate=\"` + strconv.Itoa(t.sampleRate) + `\">\n <AudioChannelConfiguration\n schemeIdUri=\"urn:mpeg:dash:23003:3:audio_channel_configuration:2011\"\n value=\"2\">\n </AudioChannelConfiguration>\n </Representation>`\n\treturn res\n}", "func baseDeploymentNameValid(name string) bool {\n\tre := regexp.MustCompile(`^base_deployment_(.+)$`)\n\tif ok := re.MatchString(name); ok {\n\t\treturn true\n\t}\n\treturn false\n}", "func GetFormat(src string) (Format, bool) {\n\t// is it a filename with an extension?\n\ttmp := filepath.Ext(src)\n\tisFile := true\n\t// fmt.Println(\"tmp\", tmp)\n\tif tmp == \"\" {\n\t\ttmp = src\n\t\tisFile = false\n\t}\n\n\tswitch tmp {\n\tcase \"\":\n\t\treturn FormatUnknown, isFile\n\n\tcase \"FZP\", \"fzp\", \".FZP\", \".fzp\":\n\t\treturn FormatFzp, isFile\n\n\tcase \"JSON\", \"json\", \".JSON\", \".json\":\n\t\treturn FormatJSON, isFile\n\n\tcase \"YAML\", \"yaml\", \"yml\", \".YAML\", \".yaml\", \".yml\":\n\t\treturn FormatYAML, isFile\n\n\tdefault:\n\t\treturn FormatNotSupported, isFile\n\t}\n}" ]
[ "0.5773764", "0.53484195", "0.4909956", "0.4629277", "0.45030597", "0.44573566", "0.4399511", "0.43457377", "0.4294057", "0.42767256", "0.4269526", "0.4240586", "0.41823775", "0.41584232", "0.4150533", "0.41313267", "0.40750483", "0.40618613", "0.40592343", "0.40531242", "0.40344554", "0.40271696", "0.40246508", "0.40174693", "0.39905635", "0.39863506", "0.39741355", "0.3972535", "0.39684814", "0.39663446", "0.39623493", "0.39605442", "0.39421874", "0.39182594", "0.38973185", "0.38920116", "0.38653788", "0.38637298", "0.38447052", "0.38095814", "0.37889728", "0.37837535", "0.37749454", "0.3774403", "0.37705433", "0.37492788", "0.37427047", "0.374102", "0.37343192", "0.37306902", "0.3718493", "0.37141845", "0.37061945", "0.37045804", "0.36978826", "0.36964023", "0.36893272", "0.36703378", "0.3668874", "0.3666022", "0.36566493", "0.36554328", "0.36491054", "0.3645462", "0.36405158", "0.36366868", "0.36355028", "0.36340788", "0.3632627", "0.36292645", "0.3625025", "0.36243573", "0.36160398", "0.36152238", "0.36106724", "0.36065426", "0.36057365", "0.36023927", "0.35978657", "0.3584022", "0.35825062", "0.35757068", "0.357414", "0.35662463", "0.3564115", "0.35569745", "0.35516483", "0.35425213", "0.35422897", "0.354037", "0.353786", "0.35342503", "0.3533709", "0.35334584", "0.35277325", "0.35239702", "0.35223505", "0.35158268", "0.35142115", "0.3509469" ]
0.7917034
0
AMPExtensionName returns the name of the extension this node represents. For most extensions this is the value of the "customelement" attribute. Returns ok=false if this isn't an extension.
func AMPExtensionName(n *html.Node) (string, bool) { if n.DataAtom != atom.Script { return "", false } for _, attr := range n.Attr { for _, k := range []string{AMPCustomElement, AMPCustomTemplate, AMPHostService} { if attr.Key == k { return attr.Val, true } } } return "", false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IsScriptAMPExtension(n *html.Node) bool {\n\t_, ok := AMPExtensionName(n)\n\treturn ok\n}", "func (me TxsdImpactSimpleContentExtensionType) IsExtValue() bool { return me.String() == \"ext-value\" }", "func (me TxsdCounterSimpleContentExtensionType) IsExtValue() bool { return me.String() == \"ext-value\" }", "func IsAMPCustomElement(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && strings.HasPrefix(n.Data, \"amp-\")\n}", "func (me TactionType) IsExtValue() bool { return me.String() == \"ext-value\" }", "func (this *RTPPacket) HasExtension() bool {\n\treturn this.header.extension != 0\n}", "func (*visibilityExtension) Name() string {\n\treturn _extName\n}", "func (o *WhatsAppNameWhatsAppApiContent) GetNameSuffixOk() (*string, bool) {\n\tif o == nil || o.NameSuffix == nil {\n\t\treturn nil, false\n\t}\n\treturn o.NameSuffix, true\n}", "func (m middleware) ExtensionName() string {\n\treturn \"OperationsExtension\"\n}", "func (me TxsdNodeRoleSimpleContentExtensionCategory) IsName() bool { return me.String() == \"name\" }", "func (me TxsdContactType) IsExtValue() bool { return me.String() == \"ext-value\" }", "func (fr *fakeRequest) Extension(name graphsync.ExtensionName) (datamodel.Node, bool) {\n\tdata, has := fr.extensions[name]\n\treturn data, has\n}", "func (fr *fakeResponse) Extension(name graphsync.ExtensionName) (datamodel.Node, bool) {\n\tdata, has := fr.extensions[name]\n\treturn data, has\n}", "func (me TxsdTimeImpactSimpleContentExtensionMetric) IsExtValue() bool {\n\treturn me.String() == \"ext-value\"\n}", "func (c *Client) Extension(ext string) (bool, string) {\n\tif err := c.hello(); err != nil {\n\t\treturn false, \"\"\n\t}\n\tif c.ext == nil {\n\t\treturn false, \"\"\n\t}\n\text = strings.ToUpper(ext)\n\tparam, ok := c.ext[ext]\n\treturn ok, param\n}", "func (me TdurationType) IsExtValue() bool { return me.String() == \"ext-value\" }", "func (me TxsdContactRole) IsExtValue() bool { return me.String() == \"ext-value\" }", "func (me TxsdSystemCategory) IsExtValue() bool { return me.String() == \"ext-value\" }", "func (me TxsdImpactSimpleContentExtensionType) IsExtortion() bool { return me.String() == \"extortion\" }", "func (o *AddOn) GetName() (value string, ok bool) {\n\tok = o != nil && o.bitmap_&2048 != 0\n\tif ok {\n\t\tvalue = o.name\n\t}\n\treturn\n}", "func (me TxsdRecordPatternSimpleContentExtensionOffsetunit) IsExtValue() bool {\n\treturn me.String() == \"ext-value\"\n}", "func (me TdtypeType) IsExtValue() bool { return me.String() == \"ext-value\" }", "func (o *KubernetesClusterAddonProfile) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (me TxsdImpactSimpleContentExtensionType) String() string { return xsdt.Nmtoken(me).String() }", "func (me TxsdNodeRoleSimpleContentExtensionCategory) IsExtValue() bool {\n\treturn me.String() == \"ext-value\"\n}", "func (o *IntrospectedOAuth2Token) GetExtOk() (map[string]interface{}, bool) {\n\tif o == nil || o.Ext == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Ext, true\n}", "func (o *WebhooksIntegrationCustomVariableResponse) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *WafPolicyGroup) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *TenantWithOfferWeb) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (me TxsdRecordPatternSimpleContentExtensionType) IsExtValue() bool {\n\treturn me.String() == \"ext-value\"\n}", "func (e Encoding) Ext() string {\n\tif e == \"\" {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\".%s\", e)\n}", "func (o *NotificationAccountSubscriptionAllOf) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *WhatsAppNameWhatsAppApiContent) HasNameSuffix() bool {\n\tif o != nil && o.NameSuffix != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *PluginMount) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (me TxsdIncidentPurpose) IsExtValue() bool { return me.String() == \"ext-value\" }", "func (o *V0037Node) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (m *OnPremisesExtensionAttributes) GetExtensionAttribute11()(*string) {\n return m.extensionAttribute11\n}", "func (me TxsdAddressSimpleContentExtensionCategory) IsExtValue() bool {\n\treturn me.String() == \"ext-value\"\n}", "func (o KubernetesClusterExtensionPlanOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v KubernetesClusterExtensionPlan) string { return v.Name }).(pulumi.StringOutput)\n}", "func (c *SimpleXmlCodec) FileExtension() string {\n\treturn constants.FileExtensionXML\n}", "func AMPExtensionScriptDefinition(n *html.Node) (string, bool) {\n\tif n.DataAtom != atom.Script {\n\t\treturn \"\", false\n\t}\n\tsrc, hasSrc := htmlnode.GetAttributeVal(n, \"\", \"src\")\n\tif hasSrc {\n\t\tm := srcURLRE.FindStringSubmatch(src)\n\t\tif len(m) < 2 {\n\t\t\treturn src, true\n\t\t}\n\t\treturn m[1], true\n\t}\n\treturn \"\", false\n}", "func (o *Replication) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (me TxsdRegistryHandleSimpleContentExtensionRegistry) IsExtValue() bool {\n\treturn me.String() == \"ext-value\"\n}", "func (o *CredentialsResponseElement) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *WhatsAppNameWhatsAppApiContent) GetNameSuffix() string {\n\tif o == nil || o.NameSuffix == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.NameSuffix\n}", "func GetExtension(uri string) (extension int) {\n\textension = STRING\n\tif strings.HasSuffix(uri, \"json\") {\n\t\textension = JSON\n\t}\n\n\tif strings.HasSuffix(uri, \"yaml\") || strings.HasSuffix(uri, \"yml\") {\n\t\textension = YAML\n\t}\n\n\tif strings.HasSuffix(uri, \"xml\") {\n\t\textension = XML\n\t}\n\treturn\n}", "func (o *Application) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *ContentProvider2) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (r RawReport) GetExtension() string {\n\treturn \"\"\n}", "func (p Platform) Extension() string {\n\tif p.IsWindows() {\n\t\treturn \"zip\"\n\t}\n\treturn \"tar.gz\"\n}", "func (me TxsdImpactSimpleContentExtensionType) IsUnknown() bool { return me.String() == \"unknown\" }", "func (t MimeType) Ext() string {\n\text, ok := Supported[t]\n\tif !ok {\n\t\tlogger.Error(\"Error returning extension from mime type\")\n\t}\n\treturn ext\n}", "func (o *SyntheticsBrowserTest) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *Content) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (d *Device) HasExtension(extension string) bool {\n\tfor _, v := range d.Extensions {\n\t\tif v == extension {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o *WorkflowCustomDataProperty) GetCustomDataTypeNameOk() (*string, bool) {\n\tif o == nil || o.CustomDataTypeName == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CustomDataTypeName, true\n}", "func (artifact *Artifact) extension() string {\n\tconst unknown = \"\"\n\ti := strings.Index(artifact.File, \".\")\n\n\tif i != -1 {\n\t\ti = 1 + i\n\t\treturn artifact.File[i:]\n\t}\n\treturn unknown\n}", "func (o *WorkbookChart) GetNameOk() (string, bool) {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.Name, true\n}", "func (o *EventSubcategory) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *DeeplinkApp) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (compression *Compression) Extension() string {\n\tswitch *compression {\n\tcase Uncompressed:\n\t\treturn \"tar\"\n\tcase Bzip2:\n\t\treturn \"tar.bz2\"\n\tcase Gzip:\n\t\treturn \"tar.gz\"\n\tcase Xz:\n\t\treturn \"tar.xz\"\n\t}\n\treturn \"\"\n}", "func (me TxsdAddressSimpleContentExtensionCategory) IsAsn() bool { return me.String() == \"asn\" }", "func (o *KubernetesAddonDefinitionAllOf) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *NotificationConfig) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *UiNodeInputAttributes) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *TrashStructureApplication) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (r RawInvalidReport) GetExtension() string {\n\treturn \"\"\n}", "func (o *PartnerCustomerCreateRequest) GetApplicationNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ApplicationName, true\n}", "func (o *BackupUnitProperties) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\n\treturn o.Name, true\n}", "func (o *RelatedAssetSerializerWithPermission) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (r *SimpleExtensionResource) AzureName() string {\n\treturn r.Spec.AzureName\n}", "func (o *IntrospectedOAuth2Token) HasExt() bool {\n\tif o != nil && o.Ext != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TxsdType) IsExtended() bool { return me.String() == \"extended\" }", "func (t Type) Ext() string {\n\tif t == \"\" {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\".%s\", t)\n}", "func Extension() string {\n\treturn getRandValue([]string{\"file\", \"extension\"})\n}", "func (o *Ga4ghFeature) GetNameOk() (string, bool) {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.Name, true\n}", "func (p *CertProfile) IsAllowedExtention(oid csr.OID) bool {\n\tfor _, allowed := range p.AllowedExtensions {\n\t\tif allowed.Equal(oid) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o *Tag) GetNameOk() (string, bool) {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.Name, true\n}", "func (o *HttpDelivery) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (m JSONMarshaller) GetExtension() string {\n\treturn \"json\"\n}", "func (me TxsdCounterSimpleContentExtensionType) IsAlert() bool { return me.String() == \"alert\" }", "func (me TxsdImpactSimpleContentExtensionType) ToXsdtNmtoken() xsdt.Nmtoken { return xsdt.Nmtoken(me) }", "func (o *Site3) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (m *OnPremisesExtensionAttributes) GetExtensionAttribute14()(*string) {\n return m.extensionAttribute14\n}", "func (f genHelperEncoder) Extension(v interface{}) (xfn *extTypeTagFn) {\n\treturn f.e.h.getExtForI(v)\n}", "func (o *OAuthApp) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (me TxsdCounterSimpleContentExtensionType) String() string { return xsdt.Nmtoken(me).String() }", "func (o *PaymentInitiationRecipient) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *Account) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *Ga4ghChemotherapy) GetNameOk() (string, bool) {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.Name, true\n}", "func (o *TenantExternalView) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *IdentityAccount) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (me TxsdType) IsExtended() bool { return me == \"extended\" }", "func (self File) Extension() string { return filepath.Ext(Path(self).String()) }", "func (o *EventTypeIn) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *Channel) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (r *AttachmentPreview) HasExt() bool {\n\treturn r.hasExt\n}", "func (a ImageRegistryAnonymizer) GetExtension() string {\n\treturn \"json\"\n}", "func (o *CustomFilterChartSeriesDimensionConfig) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (a ArtifactID) Ext(ext string) string {\n\treturn string(a) + \".\" + ext\n}" ]
[ "0.5660772", "0.5495332", "0.53589237", "0.52444386", "0.52043265", "0.51868117", "0.51730925", "0.5127122", "0.512419", "0.5119247", "0.5100169", "0.50610256", "0.50594693", "0.50342685", "0.49987003", "0.49817717", "0.49522945", "0.49516153", "0.4924233", "0.49242046", "0.48955038", "0.48841524", "0.4878613", "0.4857844", "0.48500574", "0.4836013", "0.48335743", "0.47942635", "0.4771246", "0.47668728", "0.47649536", "0.4763793", "0.4752653", "0.474147", "0.4715189", "0.47056076", "0.46954286", "0.46942464", "0.46868324", "0.46858656", "0.46849483", "0.46672708", "0.4660816", "0.4659418", "0.46498415", "0.46455166", "0.46358055", "0.46319702", "0.46207547", "0.46177503", "0.4608099", "0.4607681", "0.46069372", "0.4581107", "0.457532", "0.45741078", "0.45548445", "0.4554682", "0.45544323", "0.45535284", "0.45515573", "0.45515126", "0.4549259", "0.45452458", "0.45407856", "0.45356676", "0.4530177", "0.45103788", "0.45070693", "0.45023608", "0.44953942", "0.44922367", "0.44878423", "0.4486804", "0.4475432", "0.4473358", "0.4466624", "0.4464092", "0.44635716", "0.44626307", "0.4458482", "0.44581196", "0.4456486", "0.44562095", "0.44529933", "0.4449328", "0.44439223", "0.44402197", "0.44397378", "0.4436841", "0.44354415", "0.44314006", "0.44268015", "0.44250885", "0.44198212", "0.44162875", "0.44138032", "0.44092897", "0.44067723", "0.43971968" ]
0.74925065
0
IsScriptAMPExtension returns true if the node is a script tag representing an extension.
func IsScriptAMPExtension(n *html.Node) bool { _, ok := AMPExtensionName(n) return ok }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IsScriptAMPRuntime(n *html.Node) bool {\n\tif n.DataAtom != atom.Script {\n\t\treturn false\n\t}\n\tif v, ok := htmlnode.GetAttributeVal(n, \"\", \"src\"); ok {\n\t\treturn htmlnode.HasAttribute(n, \"\", \"async\") &&\n\t\t\t!IsScriptAMPExtension(n) &&\n\t\t\tstrings.HasPrefix(v, AMPCacheRootURL) &&\n\t\t\t(strings.HasSuffix(v, \"/v0.js\") ||\n\t\t\t\tstrings.HasSuffix(v, \"/v0.mjs\") ||\n\t\t\t\tstrings.HasSuffix(v, \"/amp4ads-v0.js\") ||\n\t\t\t\tstrings.HasSuffix(v, \"/amp4ads-v0.mjs\"))\n\t}\n\treturn false\n}", "func IsScriptAMPViewer(n *html.Node) bool {\n\tif n.DataAtom != atom.Script {\n\t\treturn false\n\t}\n\ta, ok := htmlnode.FindAttribute(n, \"\", \"src\")\n\treturn ok &&\n\t\t!IsScriptAMPExtension(n) &&\n\t\tstrings.HasPrefix(a.Val,\n\t\t\tAMPCacheSchemeAndHost+\"/v0/amp-viewer-integration-\") &&\n\t\tstrings.HasSuffix(a.Val, \".js\") &&\n\t\thtmlnode.HasAttribute(n, \"\", \"async\")\n}", "func AMPExtensionName(n *html.Node) (string, bool) {\n\tif n.DataAtom != atom.Script {\n\t\treturn \"\", false\n\t}\n\tfor _, attr := range n.Attr {\n\t\tfor _, k := range []string{AMPCustomElement, AMPCustomTemplate, AMPHostService} {\n\t\t\tif attr.Key == k {\n\t\t\t\treturn attr.Val, true\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\", false\n}", "func AMPExtensionScriptDefinition(n *html.Node) (string, bool) {\n\tif n.DataAtom != atom.Script {\n\t\treturn \"\", false\n\t}\n\tsrc, hasSrc := htmlnode.GetAttributeVal(n, \"\", \"src\")\n\tif hasSrc {\n\t\tm := srcURLRE.FindStringSubmatch(src)\n\t\tif len(m) < 2 {\n\t\t\treturn src, true\n\t\t}\n\t\treturn m[1], true\n\t}\n\treturn \"\", false\n}", "func isScript(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"script\"\n}", "func IsAMPCustomElement(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && strings.HasPrefix(n.Data, \"amp-\")\n}", "func IsScriptRenderDelaying(n *html.Node) bool {\n\tif n.DataAtom != atom.Script {\n\t\treturn false\n\t}\n\tif IsScriptAMPViewer(n) {\n\t\treturn true\n\t}\n\tif v, ok := htmlnode.GetAttributeVal(n, \"\", AMPCustomElement); ok {\n\t\t// TODO(b/77581738): Remove amp-story from this list.\n\t\treturn (v == AMPDynamicCSSClasses ||\n\t\t\tv == AMPExperiment ||\n\t\t\tv == AMPStory)\n\t}\n\treturn false\n}", "func isAudio(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"audio\"\n}", "func (this *RTPPacket) HasExtension() bool {\n\treturn this.header.extension != 0\n}", "func (me TxsdAddressSimpleContentExtensionCategory) IsAsn() bool { return me.String() == \"asn\" }", "func isEmbed(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"embed\"\n}", "func (p *CertProfile) IsAllowedExtention(oid csr.OID) bool {\n\tfor _, allowed := range p.AllowedExtensions {\n\t\tif allowed.Equal(oid) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (me TxsdImpactSimpleContentExtensionType) IsPolicy() bool { return me.String() == \"policy\" }", "func (me TxsdCounterSimpleContentExtensionType) IsEvent() bool { return me.String() == \"event\" }", "func isApplet(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"applet\"\n}", "func (me TxsdImpactSimpleContentExtensionType) IsExtortion() bool { return me.String() == \"extortion\" }", "func (cn ComponentName) IsAddon() bool {\n\treturn cn == AddonComponentName\n}", "func (me TxsdCounterSimpleContentExtensionType) IsAlert() bool { return me.String() == \"alert\" }", "func (me TEventType) IsHITExtended() bool { return me.String() == \"HITExtended\" }", "func (me TxsdImpactSimpleContentExtensionType) IsExtValue() bool { return me.String() == \"ext-value\" }", "func registerScript(n *html.Node, hn *headNodes) {\n\tif amphtml.IsScriptAMPRuntime(n) {\n\t\thn.scriptAMPRuntime = append(hn.scriptAMPRuntime, n)\n\t\treturn\n\t}\n\tif amphtml.IsScriptAMPViewer(n) {\n\t\thn.scriptAMPViewer = n\n\t\treturn\n\t}\n\tif amphtml.IsScriptAMPExtension(n) {\n\t\tif amphtml.IsScriptRenderDelaying(n) {\n\t\t\thn.scriptRenderDelaying = append(hn.scriptRenderDelaying, n)\n\t\t\treturn\n\t\t}\n\t\thn.scriptNonRenderDelaying = append(hn.scriptNonRenderDelaying, n)\n\t\treturn\n\t}\n\thn.other = append(hn.other, n)\n}", "func (me TxsdCounterSimpleContentExtensionType) IsPacket() bool { return me.String() == \"packet\" }", "func (o *Post) HasExtensions() bool {\n\tif o != nil && o.Extensions != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TxsdCounterSimpleContentExtensionType) IsExtValue() bool { return me.String() == \"ext-value\" }", "func (me TxsdMimeTypeSequenceType) IsAudio() bool { return me.String() == \"audio\" }", "func (me TxsdTimeImpactSimpleContentExtensionMetric) IsExtValue() bool {\n\treturn me.String() == \"ext-value\"\n}", "func (me TxsdType) IsExtended() bool { return me.String() == \"extended\" }", "func (me TxsdType) IsExtended() bool { return me == \"extended\" }", "func (me TxsdImpactSimpleContentExtensionType) IsAdmin() bool { return me.String() == \"admin\" }", "func (me TxsdNodeRoleSimpleContentExtensionCategory) IsApplication() bool {\n\treturn me.String() == \"application\"\n}", "func (me TactionType) IsExtValue() bool { return me.String() == \"ext-value\" }", "func (s *Script) IsPayToScriptHash() bool {\n\tss := *s\n\tif len(ss) != p2SHScriptLen {\n\t\treturn false\n\t}\n\treturn ss[0] == byte(OPHASH160) && ss[1] == ripemd160.Size && ss[22] == byte(OPEQUAL)\n}", "func HasAdaptationFieldExtension(pkt *packet.Packet) bool {\n\treturn pkt[5]&0x01 != 0\n}", "func (me TxsdImpactSimpleContentExtensionType) IsSocialEngineering() bool {\n\treturn me.String() == \"social-engineering\"\n}", "func (me TxsdAddressSimpleContentExtensionCategory) IsAtm() bool { return me.String() == \"atm\" }", "func (me TxsdCounterSimpleContentExtensionType) IsSite() bool { return me.String() == \"site\" }", "func (me TdurationType) IsExtValue() bool { return me.String() == \"ext-value\" }", "func (api *PrivateExtensionAPI) checkIfContractUnderExtension(ctx context.Context, toExtend common.Address) bool {\n\tfor _, v := range api.ActiveExtensionContracts(ctx) {\n\t\tif v.ContractExtended == toExtend {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func isValidExtension(ext string, extensions []string) bool {\n\tfor _, ex := range extensions {\n\t\tif ex == ext {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (me TxsdRecordPatternSimpleContentExtensionType) IsExtValue() bool {\n\treturn me.String() == \"ext-value\"\n}", "func (me TxsdPresentationAttributesTextContentElementsUnicodeBidi) IsEmbed() bool {\n\treturn me.String() == \"embed\"\n}", "func HasExtension(baseFileName string, extensions []string) bool {\n\n\tvar lowerExtension = filepath.Ext(baseFileName)\n\tfor _, extension := range extensions {\n\t\tif lowerExtension == strings.ToLower(extension) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (d *Device) HasExtension(extension string) bool {\n\tfor _, v := range d.Extensions {\n\t\tif v == extension {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (me TxsdRecordPatternSimpleContentExtensionOffsetunit) IsExtValue() bool {\n\treturn me.String() == \"ext-value\"\n}", "func (me TdtypeType) IsExtValue() bool { return me.String() == \"ext-value\" }", "func (me TxsdCounterSimpleContentExtensionType) IsFlow() bool { return me.String() == \"flow\" }", "func (me TxsdImpactSimpleContentExtensionType) IsFile() bool { return me.String() == \"file\" }", "func (me TxsdNodeRoleSimpleContentExtensionCategory) IsP2P() bool { return me.String() == \"p2p\" }", "func (me TxsdNodeRoleSimpleContentExtensionCategory) IsExtValue() bool {\n\treturn me.String() == \"ext-value\"\n}", "func (me TxsdAddressSimpleContentExtensionCategory) IsExtValue() bool {\n\treturn me.String() == \"ext-value\"\n}", "func (me TxsdCounterSimpleContentExtensionType) IsHost() bool { return me.String() == \"host\" }", "func (p *Part) IsAttachment() bool {\n\tif p.gmimePart == nil {\n\t\treturn false\n\t}\n\tif !gobool(C.gmime_is_part(p.gmimePart)) || gobool(C.gmime_is_multi_part(p.gmimePart)) {\n\t\treturn false\n\t}\n\tif gobool(C.g_mime_part_is_attachment((*C.GMimePart)(unsafe.Pointer(p.gmimePart)))) {\n\t\treturn true\n\t}\n\tif len(p.Filename()) > 0 {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s *Script) IsPayToPubKeyHashCLTVScript() bool {\n\tss := *s\n\tl := len(ss)\n\treturn l >= 27 && ss[l-1] == byte(OPCHECKSIG) && ss[l-2] == byte(OPEQUALVERIFY) &&\n\t\tss[l-23] == ripemd160.Size && ss[l-24] == byte(OPHASH160) &&\n\t\tss[l-25] == byte(OPDUP) && ss[l-26] == byte(OPCHECKLOCKTIMEVERIFY)\n}", "func isServerChunkedTransferEncodingDirective(directive string) bool {\n\tif isEqualString(directive, ServerChunkedTransferEncodingDirective) {\n\t\treturn true\n\t}\n\treturn false\n}", "func HasTemplateExt(paths string) bool {\n\tfor _, v := range beeTemplateExt {\n\t\tif strings.HasSuffix(paths, \".\"+v) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (me TxsdAddressSimpleContentExtensionCategory) IsEMail() bool { return me.String() == \"e-mail\" }", "func (ps *PlatformStrings) HasExt(ext string) bool {\n\treturn ps.firstExtFile(ext) != \"\"\n}", "func (sd *ScriptData) IsAllowedP2SH() bool {\n\treturn IsAllowedP2shType(sd.Type)\n}", "func (s *Script) IsContractSig() bool {\n\treturn len(*s) == 1 && (*s)[0] == byte(OPCONTRACT)\n}", "func (me TxsdIncidentPurpose) IsExtValue() bool { return me.String() == \"ext-value\" }", "func (d *DeclarationSpecifier) IsExtern() bool {\n\tif d == nil {\n\t\treturn false\n\t}\n\n\tfor _, v := range d.StorageClassSpecifiers {\n\t\tif v.Case == StorageClassSpecifierExtern {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func HasExt(fname, suffix string) bool {\n\treturn strings.HasSuffix(fname, \".\"+suffix) && (!strings.HasPrefix(fname, \".\") || strings.HasPrefix(fname, \"_\"))\n}", "func IsScriptHashAddrID(id byte) bool {\n\t_, ok := scriptHashAddrIDs[id]\n\treturn ok\n}", "func IsScriptHashAddrID(id byte) bool {\n\t_, ok := scriptHashAddrIDs[id]\n\treturn ok\n}", "func (e Event) IsAppMention() bool {\n\treturn e.Is(AppMention)\n}", "func isEscrowScript(script []byte) bool {\n\tif len(script) != 77 {\n\t\treturn false\n\t}\n\tif script[0] == txscript.OP_IF &&\n\t\tscript[1] == txscript.OP_DATA_33 &&\n\t\tscript[35] == txscript.OP_ELSE &&\n\t\tscript[36] == txscript.OP_DATA_2 &&\n\t\tscript[39] == txscript.OP_CHECKSEQUENCEVERIFY &&\n\t\tscript[40] == txscript.OP_DROP &&\n\t\tscript[41] == txscript.OP_DATA_33 &&\n\t\tscript[75] == txscript.OP_ENDIF &&\n\t\tscript[76] == txscript.OP_CHECKSIG {\n\n\t\treturn true\n\t}\n\treturn false\n}", "func (me TxsdCounterSimpleContentExtensionType) IsOrganization() bool {\n\treturn me.String() == \"organization\"\n}", "func (c *Client) Extension(ext string) (bool, string) {\n\tif err := c.hello(); err != nil {\n\t\treturn false, \"\"\n\t}\n\tif c.ext == nil {\n\t\treturn false, \"\"\n\t}\n\text = strings.ToUpper(ext)\n\tparam, ok := c.ext[ext]\n\treturn ok, param\n}", "func (me TisoLanguageCodes) IsPt() bool { return me.String() == \"PT\" }", "func (f *DialectMessageField) GetIsExtension() bool {\n\treturn f.isExtension\n}", "func (me TxsdShow) IsEmbed() bool { return me.String() == \"embed\" }", "func (me TxsdRegistryHandleSimpleContentExtensionRegistry) IsExtValue() bool {\n\treturn me.String() == \"ext-value\"\n}", "func HasTemplateExt(paths string) bool {\n\tfor _, v := range templateExt {\n\t\tif strings.HasSuffix(paths, \".\"+v) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (me TxsdShow) IsEmbed() bool { return me == \"embed\" }", "func (k Keeper) HasExtTransaction(ctx sdk.Context, id uint64) bool {\n\tstore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.ExtTransactionKey))\n\treturn store.Has(GetExtTransactionIDBytes(id))\n}", "func isIFrame(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"iframe\"\n}", "func (me TxsdContactType) IsExtValue() bool { return me.String() == \"ext-value\" }", "func (me TisoLanguageCodes) IsPs() bool { return me.String() == \"PS\" }", "func (es *externalSigner) IsApplicable(sig *model.PdfSignature) bool {\n\tif sig == nil || sig.Filter == nil || sig.SubFilter == nil {\n\t\treturn false\n\t}\n\n\treturn (*sig.Filter == \"Adobe.PPKMS\" || *sig.Filter == \"Adobe.PPKLite\") && *sig.SubFilter == \"adbe.pkcs7.detached\"\n}", "func IsFunction(t Type) bool {\n\tif v, ok := t.(*Operator); ok {\n\t\treturn v.Name == functionName\n\t}\n\treturn false\n}", "func (me TxsdNodeRoleSimpleContentExtensionCategory) IsMail() bool { return me.String() == \"mail\" }", "func (p *Package) IsBuiltin() bool {\n\treturn p.Name == pkgBuiltin\n}", "func (o *IntrospectedOAuth2Token) HasExt() bool {\n\tif o != nil && o.Ext != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TxsdNodeRoleSimpleContentExtensionCategory) IsWww() bool { return me.String() == \"www\" }", "func isServerSendFileDirective(directive string) bool {\n\tif isEqualString(directive, ServerSendFileDirective) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (me TxsdPresentationAttributesTextContentElementsDominantBaseline) IsAutosenseScript() bool {\n\treturn me.String() == \"autosense-script\"\n}", "func IsExtendedHelp(input string) bool {\n\tpattern := \"^[1-9][0-9]*\\\\?$\"\n\tmatch, e := regexp.Match(pattern, []byte(input))\n\tpanicNonNil(e)\n\treturn match\n}", "func IsManifest(filename string) (bool, error) {\n\tmatched, err := filepath.Match(machineConfigFileNamePattern, filename)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn matched, nil\n}", "func (n Noun) IsAtom() bool { return n.atom != nil }", "func (me TxsdNodeRoleSimpleContentExtensionCategory) IsVoice() bool { return me.String() == \"voice\" }", "func isSupportedType(fileName string) bool {\n\tparts := strings.Split(fileName, \".\")\n\textension := parts[len(parts)-1]\n\tsupported := false\n\tif len(parts) > 1 && len(extension) > 0 {\n\t\tfor _, el := range supportedExtensions {\n\t\t\tif extension == el {\n\t\t\t\tsupported = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn supported\n}", "func (sd *ScriptData) CanSign() bool {\n\treturn CanSignType(sd.Type)\n}", "func isVoteTx(tx *wire.MsgTx) bool {\n\tif len(tx.TxOut) < 3 {\n\t\treturn false\n\t}\n\ttxOut := tx.TxOut[2]\n\tscriptClass := txscript.GetScriptClass(txOut.Version, txOut.PkScript)\n\treturn scriptClass == txscript.StakeGenTy\n}", "func isServerSendFileMaxChunkDirective(directive string) bool {\n\tif isEqualString(directive, ServerSendFileMaxChunkDirective) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (me TxsdSystemCategory) IsExtValue() bool { return me.String() == \"ext-value\" }", "func (te *TreeEntry) IsSubModule() bool {\n\treturn te.gogitTreeEntry.Mode == filemode.Submodule\n}", "func (a *AddressScriptHash) IsForNet(net *Params) bool {\n\treturn a.magic == net.ScriptHashMagic\n}", "func (r ExtensionPage) IsEmpty() (bool, error) {\n\tis, err := ExtractExtensions(r)\n\treturn len(is) == 0, err\n}", "func (me TxsdRegistryHandleSimpleContentExtensionRegistry) IsApnic() bool {\n\treturn me.String() == \"apnic\"\n}", "func (me TAttlistLocationLabelType) IsAppendix() bool { return me.String() == \"appendix\" }" ]
[ "0.6694262", "0.6552518", "0.6336672", "0.61952287", "0.61614895", "0.5731267", "0.5551006", "0.5041479", "0.5015922", "0.5001815", "0.4956", "0.49291924", "0.48919377", "0.48352566", "0.482244", "0.47968212", "0.47773254", "0.47717315", "0.47310606", "0.47221884", "0.46425334", "0.46222332", "0.46000654", "0.45815742", "0.4548356", "0.4547244", "0.45256922", "0.4518496", "0.4496465", "0.4495074", "0.4471794", "0.4460783", "0.44534457", "0.4450146", "0.44429383", "0.4407978", "0.44059068", "0.43974295", "0.4390101", "0.43450034", "0.43368626", "0.43293217", "0.4279231", "0.42694092", "0.42318997", "0.4224584", "0.42156306", "0.42136747", "0.42078838", "0.41809365", "0.41680604", "0.41676706", "0.41468978", "0.41425753", "0.41278738", "0.41132486", "0.4112358", "0.41086906", "0.40901658", "0.4090136", "0.40897858", "0.40815496", "0.40806484", "0.40806484", "0.40614206", "0.40603766", "0.40598804", "0.40404838", "0.40395927", "0.40251976", "0.40236777", "0.40198743", "0.40146115", "0.40033367", "0.40003544", "0.39986977", "0.39949495", "0.39929035", "0.3976478", "0.3974383", "0.39715698", "0.39689896", "0.39671892", "0.39660364", "0.39638337", "0.39637777", "0.3961871", "0.39597532", "0.3947412", "0.3945669", "0.39439744", "0.39375544", "0.39347124", "0.39255634", "0.39227033", "0.3918542", "0.39181784", "0.39099512", "0.3908798", "0.39079896" ]
0.89359015
0
IsScriptAMPRuntime returns true if the node is of the form <script async src=
func IsScriptAMPRuntime(n *html.Node) bool { if n.DataAtom != atom.Script { return false } if v, ok := htmlnode.GetAttributeVal(n, "", "src"); ok { return htmlnode.HasAttribute(n, "", "async") && !IsScriptAMPExtension(n) && strings.HasPrefix(v, AMPCacheRootURL) && (strings.HasSuffix(v, "/v0.js") || strings.HasSuffix(v, "/v0.mjs") || strings.HasSuffix(v, "/amp4ads-v0.js") || strings.HasSuffix(v, "/amp4ads-v0.mjs")) } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IsScriptAMPViewer(n *html.Node) bool {\n\tif n.DataAtom != atom.Script {\n\t\treturn false\n\t}\n\ta, ok := htmlnode.FindAttribute(n, \"\", \"src\")\n\treturn ok &&\n\t\t!IsScriptAMPExtension(n) &&\n\t\tstrings.HasPrefix(a.Val,\n\t\t\tAMPCacheSchemeAndHost+\"/v0/amp-viewer-integration-\") &&\n\t\tstrings.HasSuffix(a.Val, \".js\") &&\n\t\thtmlnode.HasAttribute(n, \"\", \"async\")\n}", "func IsScriptAMPExtension(n *html.Node) bool {\n\t_, ok := AMPExtensionName(n)\n\treturn ok\n}", "func IsScriptRenderDelaying(n *html.Node) bool {\n\tif n.DataAtom != atom.Script {\n\t\treturn false\n\t}\n\tif IsScriptAMPViewer(n) {\n\t\treturn true\n\t}\n\tif v, ok := htmlnode.GetAttributeVal(n, \"\", AMPCustomElement); ok {\n\t\t// TODO(b/77581738): Remove amp-story from this list.\n\t\treturn (v == AMPDynamicCSSClasses ||\n\t\t\tv == AMPExperiment ||\n\t\t\tv == AMPStory)\n\t}\n\treturn false\n}", "func isScript(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"script\"\n}", "func AMPExtensionScriptDefinition(n *html.Node) (string, bool) {\n\tif n.DataAtom != atom.Script {\n\t\treturn \"\", false\n\t}\n\tsrc, hasSrc := htmlnode.GetAttributeVal(n, \"\", \"src\")\n\tif hasSrc {\n\t\tm := srcURLRE.FindStringSubmatch(src)\n\t\tif len(m) < 2 {\n\t\t\treturn src, true\n\t\t}\n\t\treturn m[1], true\n\t}\n\treturn \"\", false\n}", "func registerScript(n *html.Node, hn *headNodes) {\n\tif amphtml.IsScriptAMPRuntime(n) {\n\t\thn.scriptAMPRuntime = append(hn.scriptAMPRuntime, n)\n\t\treturn\n\t}\n\tif amphtml.IsScriptAMPViewer(n) {\n\t\thn.scriptAMPViewer = n\n\t\treturn\n\t}\n\tif amphtml.IsScriptAMPExtension(n) {\n\t\tif amphtml.IsScriptRenderDelaying(n) {\n\t\t\thn.scriptRenderDelaying = append(hn.scriptRenderDelaying, n)\n\t\t\treturn\n\t\t}\n\t\thn.scriptNonRenderDelaying = append(hn.scriptNonRenderDelaying, n)\n\t\treturn\n\t}\n\thn.other = append(hn.other, n)\n}", "func (me TxsdPresentationAttributesTextContentElementsDominantBaseline) IsAutosenseScript() bool {\n\treturn me.String() == \"autosense-script\"\n}", "func isAudio(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"audio\"\n}", "func IsAMPCustomElement(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && strings.HasPrefix(n.Data, \"amp-\")\n}", "func (m *Basic) IframeScript() []byte {\n\tdata, err := Asset(\"script.js\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn data\n}", "func (p *Probe) IsRuntimeCompiled() bool {\n\treturn p.runtimeCompiled\n}", "func isApplet(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"applet\"\n}", "func isEmbed(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"embed\"\n}", "func AMPExtensionName(n *html.Node) (string, bool) {\n\tif n.DataAtom != atom.Script {\n\t\treturn \"\", false\n\t}\n\tfor _, attr := range n.Attr {\n\t\tfor _, k := range []string{AMPCustomElement, AMPCustomTemplate, AMPHostService} {\n\t\t\tif attr.Key == k {\n\t\t\t\treturn attr.Val, true\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\", false\n}", "func (s *Script) IsPayToScriptHash() bool {\n\tss := *s\n\tif len(ss) != p2SHScriptLen {\n\t\treturn false\n\t}\n\treturn ss[0] == byte(OPHASH160) && ss[1] == ripemd160.Size && ss[22] == byte(OPEQUAL)\n}", "func playScript(fsys fs.FS, transport string) {\n\tmodTime := time.Now()\n\tvar buf bytes.Buffer\n\tfor _, p := range scripts {\n\t\tb, err := fs.ReadFile(fsys, \"static/\"+p)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tbuf.Write(b)\n\t}\n\tfmt.Fprintf(&buf, \"\\ninitPlayground(new %v());\\n\", transport)\n\tb := buf.Bytes()\n\thttp.HandleFunc(\"/play.js\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-type\", \"application/javascript\")\n\t\thttp.ServeContent(w, r, \"\", modTime, bytes.NewReader(b))\n\t})\n}", "func ParseScript(vm *otto.Otto, scriptPath string) (program *otto.Script, subs []string, err error) {\n\tscript, err := os.Open(scriptPath)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar src string\n\tvar subsFound bool\n\tprefix := `\"igor.subs`\n\n\tscriptReader := bufio.NewReader(script)\n\tfor {\n\t\tline, err := scriptReader.ReadString(byte('\\n'))\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\ttrimmed := strings.TrimSpace(line)\n\t\tif strings.HasPrefix(trimmed, prefix) {\n\t\t\tsubs = igor.ParseSubscriptionDirective(trimmed, prefix)\n\t\t\tsubsFound = true\n\t\t}\n\t\tif len(trimmed) > 0 {\n\t\t\tsrc = src + line\n\t\t}\n\t}\n\n\tif !subsFound {\n\t\terr = errors.New(\"GetSubscriptions: no subscription directive found\")\n\t\treturn\n\t}\n\n\tprogram, err = vm.Compile(scriptPath, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (s *Script) IsPayToPubKeyHashCLTVScript() bool {\n\tss := *s\n\tl := len(ss)\n\treturn l >= 27 && ss[l-1] == byte(OPCHECKSIG) && ss[l-2] == byte(OPEQUALVERIFY) &&\n\t\tss[l-23] == ripemd160.Size && ss[l-24] == byte(OPHASH160) &&\n\t\tss[l-25] == byte(OPDUP) && ss[l-26] == byte(OPCHECKLOCKTIMEVERIFY)\n}", "func (a *scriptAddress) Imported() bool {\n\treturn true\n}", "func (sd *ScriptData) IsAllowedP2SH() bool {\n\treturn IsAllowedP2shType(sd.Type)\n}", "func isExecutableScript(item *DisplayItem) bool {\n\tif item.info.Mode()&0111 != 0 && item.info.Mode().IsRegular() {\n\t\treturn true\n\t}\n\treturn false\n}", "func (s *Script) IsStandard() bool {\n\t//if !s.IsPayToPubKeyHash() &&\n\t//\t!s.IsContractPubkey() &&\n\t//\t!s.IsTokenTransfer() &&\n\t//\t!s.IsTokenIssue() &&\n\t//\t!s.IsSplitAddrScript() &&\n\t//\t!s.IsPayToScriptHash() &&\n\t//\t!s.IsPayToPubKeyHashCLTVScript() {\n\t//\treturn false\n\t//}\n\t_, err := s.ExtractAddress()\n\tif err != nil {\n\t\tlogger.Errorf(\"Failed to extract address. script: %s, Err: %v\", s.Disasm(), err)\n\t\treturn false\n\t}\n\treturn true\n}", "func (fn *Function) exportedRuntime() bool {\n\tname := fn.Name\n\tconst n = len(\"runtime.\")\n\treturn len(name) > n && name[:n] == \"runtime.\" && 'A' <= name[n] && name[n] <= 'Z'\n}", "func streamcode_bodyscripts(qw422016 *qt422016.Writer, lang string) {\n//line template/code.qtpl:16\n\tqw422016.N().S(`\n `)\n//line template/code.qtpl:17\n\tqw422016.N().S(`<script src='`)\n//line template/code.qtpl:18\n\tqw422016.E().S(prefix)\n//line template/code.qtpl:18\n\tqw422016.N().S(`/prism.js'></script>`)\n//line template/code.qtpl:19\n\tif lang != \"\" && lang != \"none\" {\n//line template/code.qtpl:19\n\t\tqw422016.N().S(`<script src='`)\n//line template/code.qtpl:20\n\t\tqw422016.E().S(prefix)\n//line template/code.qtpl:20\n\t\tqw422016.N().S(`/components/prism-`)\n//line template/code.qtpl:20\n\t\tqw422016.E().S(lang)\n//line template/code.qtpl:20\n\t\tqw422016.N().S(`.js'></script>`)\n//line template/code.qtpl:21\n\t}\n//line template/code.qtpl:22\n\tqw422016.N().S(`\n`)\n//line template/code.qtpl:23\n}", "func isEscrowScript(script []byte) bool {\n\tif len(script) != 77 {\n\t\treturn false\n\t}\n\tif script[0] == txscript.OP_IF &&\n\t\tscript[1] == txscript.OP_DATA_33 &&\n\t\tscript[35] == txscript.OP_ELSE &&\n\t\tscript[36] == txscript.OP_DATA_2 &&\n\t\tscript[39] == txscript.OP_CHECKSEQUENCEVERIFY &&\n\t\tscript[40] == txscript.OP_DROP &&\n\t\tscript[41] == txscript.OP_DATA_33 &&\n\t\tscript[75] == txscript.OP_ENDIF &&\n\t\tscript[76] == txscript.OP_CHECKSIG {\n\n\t\treturn true\n\t}\n\treturn false\n}", "func IsSubscribeAsync(m Mono) bool {\n\treturn mono.IsSubscribeAsync(m.Raw())\n}", "func IsHTTPRouterRequired(mode *string) bool {\n\tmodes := []string{Hybrid, Receiver, Rest, TokenServer, HTTPOnly, HTTPWithNoRest}\n\treturn StrContains(modes, *mode)\n}", "func RequireScriptURL(url string, priority int) View {\n\treturn RenderView(\n\t\tfunc(ctx *Context) error {\n\t\t\tctx.Response.RequireScriptURL(url, priority)\n\t\t\treturn nil\n\t\t},\n\t)\n}", "func Script(attrs []htmlgo.Attribute, children ...HTML) HTML {\n\treturn &htmlgo.Tree{Tag: \"script\", Attributes: attrs, Children: children}\n}", "func (o *DisplayInfo) HasScript() bool {\n\tif o != nil && o.Script != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func streamcode_scripts(qw422016 *qt422016.Writer, lang string) {\n//line template/code.qtpl:11\n\tqw422016.N().S(`\n <link rel='stylesheet' crossorigin='anonymous' href='`)\n//line template/code.qtpl:12\n\tqw422016.E().S(prefix)\n//line template/code.qtpl:12\n\tqw422016.N().S(`/themes/prism.css' />\n`)\n//line template/code.qtpl:13\n}", "func StripJS(e *Context) error {\n\tfor n := e.DOM.RootNode; n != nil; n = htmlnode.Next(n) {\n\t\tif n.Type != html.ElementNode {\n\t\t\tcontinue\n\t\t}\n\n\t\tif n.DataAtom == atom.Script {\n\t\t\tsrcVal, srcOk := htmlnode.GetAttributeVal(n, \"\", \"src\")\n\t\t\tvar isCacheSrc bool\n\t\t\tif srcOk {\n\t\t\t\tif !strings.HasPrefix(strings.ToLower(srcVal), amphtml.AMPCacheRootURL) {\n\t\t\t\t\thtmlnode.RemoveNode(&n)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tisCacheSrc = true\n\t\t\t}\n\t\t\ttypeVal, typeOk := htmlnode.GetAttributeVal(n, \"\", \"type\")\n\t\t\tif !srcOk && !typeOk {\n\t\t\t\thtmlnode.RemoveNode(&n)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif typeOk {\n\t\t\t\tswitch strings.ToLower(typeVal) {\n\t\t\t\tcase \"application/json\", \"application/ld+json\", \"text/plain\":\n\t\t\t\t\t// ok to keep\n\t\t\t\tcase \"text/javascript\", \"module\":\n\t\t\t\t\t// ok to keep only for AMP Cache scripts\n\t\t\t\t\tif !isCacheSrc {\n\t\t\t\t\t\thtmlnode.RemoveNode(&n)\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\thtmlnode.RemoveNode(&n)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfor _, attr := range n.Attr {\n\t\t\t\tif attr.Namespace == \"\" {\n\t\t\t\t\tif match := eventRE.MatchString(attr.Key); match {\n\t\t\t\t\t\thtmlnode.RemoveAttribute(n, &attr)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func altScript(l language.Language, s language.Script) language.Script {\n\tfor _, alt := range matchScript {\n\t\t// TODO: also match cases where language is not the same.\n\t\tif (language.Language(alt.wantLang) == l || language.Language(alt.haveLang) == l) &&\n\t\t\tlanguage.Script(alt.haveScript) == s {\n\t\t\treturn language.Script(alt.wantScript)\n\t\t}\n\t}\n\treturn 0\n}", "func TestScripts(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tscript []byte\n\t\tstrictSigs bool\n\t\tshouldPass bool\n\t\tshouldFail error\n\t}{\n\t\t// does nothing, but doesn't put a true on the stack, should fail\n\t\t{script: []byte{txscript.OP_NOP}, shouldPass: false},\n\t\t// should just put true on the stack, thus passes.\n\t\t{script: []byte{txscript.OP_TRUE}, shouldPass: true},\n\t\t// should just put false on the stack, thus fails.\n\t\t{script: []byte{txscript.OP_FALSE}, shouldPass: false},\n\t\t// tests OP_VERIFY (true). true is needed since else stack is empty.\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_VERIFY,\n\t\t\ttxscript.OP_TRUE}, shouldPass: true},\n\t\t// tests OP_VERIFY (false), will error out.\n\t\t{script: []byte{txscript.OP_FALSE, txscript.OP_VERIFY,\n\t\t\ttxscript.OP_TRUE}, shouldPass: false},\n\t\t// tests OP_VERIFY with empty stack (errors)\n\t\t{script: []byte{txscript.OP_VERIFY}, shouldPass: false},\n\t\t// test OP_RETURN immediately fails the script (empty stack)\n\t\t{script: []byte{txscript.OP_RETURN}, shouldPass: false},\n\t\t// test OP_RETURN immediately fails the script (full stack)\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_RETURN},\n\t\t\tshouldPass: false},\n\t\t// tests numequal with a trivial example (passing)\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_NUMEQUAL}, shouldPass: true},\n\t\t// tests numequal with a trivial example (failing)\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_FALSE,\n\t\t\ttxscript.OP_NUMEQUAL}, shouldPass: false},\n\t\t// tests numequal with insufficient arguments (1/2)\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_NUMEQUAL},\n\t\t\tshouldPass: false},\n\t\t// tests numequal with insufficient arguments (0/2)\n\t\t{script: []byte{txscript.OP_NUMEQUAL}, shouldPass: false},\n\t\t// tests numnotequal with a trivial example (passing)\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_FALSE,\n\t\t\ttxscript.OP_NUMNOTEQUAL}, shouldPass: true},\n\t\t// tests numnotequal with a trivial example (failing)\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_NUMNOTEQUAL}, shouldPass: false},\n\t\t// tests numnotequal with insufficient arguments (1/2)\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_NUMNOTEQUAL},\n\t\t\tshouldPass: false},\n\t\t// tests numnotequal with insufficient arguments (0/2)\n\t\t{script: []byte{txscript.OP_NUMNOTEQUAL}, shouldPass: false},\n\t\t// test numequal_verify with a trivial example (passing)\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_NUMEQUALVERIFY, txscript.OP_TRUE},\n\t\t\tshouldPass: true},\n\t\t// test numequal_verify with a trivial example (failing)\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_FALSE,\n\t\t\ttxscript.OP_NUMEQUALVERIFY, txscript.OP_TRUE},\n\t\t\tshouldPass: false},\n\t\t// test OP_1ADD by adding 1 to 0\n\t\t{script: []byte{txscript.OP_FALSE, txscript.OP_1ADD},\n\t\t\tshouldPass: true},\n\t\t// test OP_1ADD without args (should error)\n\t\t{script: []byte{txscript.OP_1ADD}, shouldPass: false},\n\t\t// test OP_1NEGATE by adding 1 to -1\n\t\t{script: []byte{txscript.OP_1NEGATE, txscript.OP_1ADD},\n\t\t\tshouldPass: false},\n\t\t// test OP_1NEGATE by adding negating -1\n\t\t{script: []byte{txscript.OP_1NEGATE, txscript.OP_NEGATE},\n\t\t\tshouldPass: true},\n\t\t// test OP_NEGATE by adding 1 to -1\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_NEGATE,\n\t\t\ttxscript.OP_1ADD}, shouldPass: false},\n\t\t// test OP_NEGATE with no args\n\t\t{script: []byte{txscript.OP_NEGATE}, shouldPass: false},\n\t\t// test OP_1SUB -> 1 - 1 = 0\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_1SUB},\n\t\t\tshouldPass: false},\n\t\t// test OP_1SUB -> negate(0 -1) = 1\n\t\t{script: []byte{txscript.OP_FALSE, txscript.OP_1SUB,\n\t\t\ttxscript.OP_NEGATE}, shouldPass: true},\n\t\t// test OP_1SUB with empty stack\n\t\t{script: []byte{txscript.OP_1SUB}, shouldPass: false},\n\t\t// OP_DEPTH with empty stack, means 0 on stack at end\n\t\t{script: []byte{txscript.OP_DEPTH}, shouldPass: false},\n\t\t// 1 +1 -1 = 1. tests depth + add\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_DEPTH, txscript.OP_ADD,\n\t\t\ttxscript.OP_1SUB}, shouldPass: true},\n\t\t// 1 +1 -1 = 0 . tests dept + add\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_DEPTH,\n\t\t\ttxscript.OP_ADD, txscript.OP_1SUB, txscript.OP_1SUB},\n\t\t\tshouldPass: false},\n\t\t// OP_ADD with only one thing on stack should error\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_ADD},\n\t\t\tshouldPass: false},\n\t\t// OP_ADD with nothing on stack should error\n\t\t{script: []byte{txscript.OP_ADD}, shouldPass: false},\n\t\t// OP_SUB: 1-1=0\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_SUB}, shouldPass: false},\n\t\t// OP_SUB: 1+1-1=1\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_TRUE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_ADD, txscript.OP_SUB}, shouldPass: true},\n\t\t// OP_SUB with only one thing on stack should error\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_SUB},\n\t\t\tshouldPass: false},\n\t\t// OP_SUB with nothing on stack should error\n\t\t{script: []byte{txscript.OP_SUB}, shouldPass: false},\n\t\t// OP_LESSTHAN 1 < 1 == false\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_LESSTHAN}, shouldPass: false},\n\t\t// OP_LESSTHAN 1 < 0 == false\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_FALSE,\n\t\t\ttxscript.OP_LESSTHAN}, shouldPass: false},\n\t\t// OP_LESSTHAN 0 < 1 == true\n\t\t{script: []byte{txscript.OP_FALSE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_LESSTHAN}, shouldPass: true},\n\t\t// OP_LESSTHAN only one arg\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_LESSTHAN},\n\t\t\tshouldPass: false},\n\t\t// OP_LESSTHAN no args\n\t\t{script: []byte{txscript.OP_LESSTHAN}, shouldPass: false},\n\n\t\t// OP_LESSTHANOREQUAL 1 <= 1 == true\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_LESSTHANOREQUAL}, shouldPass: true},\n\t\t// OP_LESSTHANOREQUAL 1 <= 0 == false\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_FALSE,\n\t\t\ttxscript.OP_LESSTHANOREQUAL}, shouldPass: false},\n\t\t// OP_LESSTHANOREQUAL 0 <= 1 == true\n\t\t{script: []byte{txscript.OP_FALSE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_LESSTHANOREQUAL}, shouldPass: true},\n\t\t// OP_LESSTHANOREQUAL only one arg\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_LESSTHANOREQUAL},\n\t\t\tshouldPass: false},\n\t\t// OP_LESSTHANOREQUAL no args\n\t\t{script: []byte{txscript.OP_LESSTHANOREQUAL}, shouldPass: false},\n\n\t\t// OP_GREATERTHAN 1 > 1 == false\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_GREATERTHAN}, shouldPass: false},\n\t\t// OP_GREATERTHAN 1 > 0 == true\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_FALSE,\n\t\t\ttxscript.OP_GREATERTHAN}, shouldPass: true},\n\t\t// OP_GREATERTHAN 0 > 1 == false\n\t\t{script: []byte{txscript.OP_FALSE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_GREATERTHAN}, shouldPass: false},\n\t\t// OP_GREATERTHAN only one arg\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_GREATERTHAN},\n\t\t\tshouldPass: false},\n\t\t// OP_GREATERTHAN no args\n\t\t{script: []byte{txscript.OP_GREATERTHAN}, shouldPass: false},\n\n\t\t// OP_GREATERTHANOREQUAL 1 >= 1 == true\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_GREATERTHANOREQUAL}, shouldPass: true},\n\t\t// OP_GREATERTHANOREQUAL 1 >= 0 == false\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_FALSE,\n\t\t\ttxscript.OP_GREATERTHANOREQUAL}, shouldPass: true},\n\t\t// OP_GREATERTHANOREQUAL 0 >= 1 == true\n\t\t{script: []byte{txscript.OP_FALSE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_GREATERTHANOREQUAL}, shouldPass: false},\n\t\t// OP_GREATERTHANOREQUAL only one arg\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_GREATERTHANOREQUAL},\n\t\t\tshouldPass: false},\n\t\t// OP_GREATERTHANOREQUAL no args\n\t\t{script: []byte{txscript.OP_GREATERTHANOREQUAL}, shouldPass: false},\n\n\t\t// OP_MIN basic functionality -> min(0,1) = 0 = min(1,0)\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_FALSE,\n\t\t\ttxscript.OP_MIN}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_FALSE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_MIN}, shouldPass: false},\n\t\t// OP_MIN -> 1 arg errors\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_MIN},\n\t\t\tshouldPass: false},\n\t\t// OP_MIN -> 0 arg errors\n\t\t{script: []byte{txscript.OP_MIN}, shouldPass: false},\n\t\t// OP_MAX basic functionality -> max(0,1) = 1 = max(1,0)\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_FALSE,\n\t\t\ttxscript.OP_MAX}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_FALSE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_MAX}, shouldPass: true},\n\t\t// OP_MAX -> 1 arg errors\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_MAX},\n\t\t\tshouldPass: false},\n\t\t// OP_MAX -> 0 arg errors\n\t\t{script: []byte{txscript.OP_MAX}, shouldPass: false},\n\n\t\t// By this point we know a number of operations appear to be working\n\t\t// correctly. we can use them to test the other number pushing\n\t\t// operations\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_1ADD, txscript.OP_2,\n\t\t\ttxscript.OP_EQUAL}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_2, txscript.OP_1ADD, txscript.OP_3,\n\t\t\ttxscript.OP_EQUAL}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_3, txscript.OP_1ADD, txscript.OP_4,\n\t\t\ttxscript.OP_EQUAL}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_4, txscript.OP_1ADD, txscript.OP_5,\n\t\t\ttxscript.OP_EQUAL}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_5, txscript.OP_1ADD, txscript.OP_6,\n\t\t\ttxscript.OP_EQUAL}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_6, txscript.OP_1ADD, txscript.OP_7,\n\t\t\ttxscript.OP_EQUAL}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_7, txscript.OP_1ADD, txscript.OP_8,\n\t\t\ttxscript.OP_EQUAL}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_8, txscript.OP_1ADD, txscript.OP_9,\n\t\t\ttxscript.OP_EQUAL}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_9, txscript.OP_1ADD, txscript.OP_10,\n\t\t\ttxscript.OP_EQUAL}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_10, txscript.OP_1ADD, txscript.OP_11,\n\t\t\ttxscript.OP_EQUAL}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_11, txscript.OP_1ADD, txscript.OP_12,\n\t\t\ttxscript.OP_EQUAL}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_12, txscript.OP_1ADD, txscript.OP_13,\n\t\t\ttxscript.OP_EQUAL}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_13, txscript.OP_1ADD, txscript.OP_14,\n\t\t\ttxscript.OP_EQUAL}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_14, txscript.OP_1ADD, txscript.OP_15,\n\t\t\ttxscript.OP_EQUAL}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_15, txscript.OP_1ADD, txscript.OP_16,\n\t\t\ttxscript.OP_EQUAL}, shouldPass: true},\n\n\t\t// Test OP_WITHIN x, min, max\n\t\t// 0 <= 1 < 2\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_FALSE, txscript.OP_2,\n\t\t\ttxscript.OP_WITHIN}, shouldPass: true},\n\t\t// 1 <= 0 < 2 FAIL\n\t\t{script: []byte{txscript.OP_FALSE, txscript.OP_TRUE, txscript.OP_2,\n\t\t\ttxscript.OP_WITHIN}, shouldPass: false},\n\t\t// 1 <= 1 < 2\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_TRUE, txscript.OP_2,\n\t\t\ttxscript.OP_WITHIN}, shouldPass: true},\n\t\t// 1 <= 2 < 2 FAIL\n\t\t{script: []byte{txscript.OP_2, txscript.OP_TRUE, txscript.OP_2,\n\t\t\ttxscript.OP_WITHIN}, shouldPass: false},\n\t\t// only two arguments\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_FALSE,\n\t\t\ttxscript.OP_WITHIN}, shouldPass: false},\n\t\t// only one argument\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_WITHIN},\n\t\t\tshouldPass: false},\n\t\t// no arguments\n\t\t{script: []byte{txscript.OP_WITHIN}, shouldPass: false},\n\n\t\t// OP_BOOLAND\n\t\t// 1 && 1 == 1\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_BOOLAND}, shouldPass: true},\n\t\t// 1 && 0 == 0\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_FALSE,\n\t\t\ttxscript.OP_BOOLAND}, shouldPass: false},\n\t\t// 0 && 1 == 0\n\t\t{script: []byte{txscript.OP_FALSE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_BOOLAND}, shouldPass: false},\n\t\t// 0 && 0 == 0\n\t\t{script: []byte{txscript.OP_FALSE, txscript.OP_FALSE,\n\t\t\ttxscript.OP_BOOLAND}, shouldPass: false},\n\t\t// 0 && <nothing> - boom\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_BOOLAND},\n\t\t\tshouldPass: false},\n\t\t// <nothing> && <nothing> - boom\n\t\t{script: []byte{txscript.OP_BOOLAND}, shouldPass: false},\n\n\t\t// OP_BOOLOR\n\t\t// 1 || 1 == 1\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_BOOLOR}, shouldPass: true},\n\t\t// 1 || 0 == 1\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_FALSE,\n\t\t\ttxscript.OP_BOOLOR}, shouldPass: true},\n\t\t// 0 || 1 == 1\n\t\t{script: []byte{txscript.OP_FALSE, txscript.OP_TRUE,\n\t\t\ttxscript.OP_BOOLOR}, shouldPass: true},\n\t\t// 0 || 0 == 0\n\t\t{script: []byte{txscript.OP_FALSE, txscript.OP_FALSE,\n\t\t\ttxscript.OP_BOOLOR}, shouldPass: false},\n\t\t// 0 && <nothing> - boom\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_BOOLOR},\n\t\t\tshouldPass: false},\n\t\t// <nothing> && <nothing> - boom\n\t\t{script: []byte{txscript.OP_BOOLOR}, shouldPass: false},\n\n\t\t// OP_0NOTEQUAL\n\t\t// 1 with input != 0 XXX check output is actually 1.\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_0NOTEQUAL},\n\t\t\tshouldPass: true},\n\t\t{script: []byte{txscript.OP_2, txscript.OP_0NOTEQUAL},\n\t\t\tshouldPass: true},\n\t\t{script: []byte{txscript.OP_3, txscript.OP_0NOTEQUAL},\n\t\t\tshouldPass: true},\n\t\t{script: []byte{txscript.OP_4, txscript.OP_0NOTEQUAL},\n\t\t\tshouldPass: true},\n\t\t{script: []byte{txscript.OP_5, txscript.OP_0NOTEQUAL},\n\t\t\tshouldPass: true},\n\t\t{script: []byte{txscript.OP_6, txscript.OP_0NOTEQUAL},\n\t\t\tshouldPass: true},\n\t\t{script: []byte{txscript.OP_7, txscript.OP_0NOTEQUAL},\n\t\t\tshouldPass: true},\n\t\t{script: []byte{txscript.OP_8, txscript.OP_0NOTEQUAL},\n\t\t\tshouldPass: true},\n\t\t{script: []byte{txscript.OP_9, txscript.OP_0NOTEQUAL},\n\t\t\tshouldPass: true},\n\t\t{script: []byte{txscript.OP_10, txscript.OP_0NOTEQUAL},\n\t\t\tshouldPass: true},\n\t\t{script: []byte{txscript.OP_11, txscript.OP_0NOTEQUAL},\n\t\t\tshouldPass: true},\n\t\t{script: []byte{txscript.OP_12, txscript.OP_0NOTEQUAL},\n\t\t\tshouldPass: true},\n\t\t{script: []byte{txscript.OP_13, txscript.OP_0NOTEQUAL},\n\t\t\tshouldPass: true},\n\t\t{script: []byte{txscript.OP_14, txscript.OP_0NOTEQUAL},\n\t\t\tshouldPass: true},\n\t\t{script: []byte{txscript.OP_15, txscript.OP_0NOTEQUAL},\n\t\t\tshouldPass: true},\n\t\t{script: []byte{txscript.OP_16, txscript.OP_0NOTEQUAL},\n\t\t\tshouldPass: true},\n\t\t{script: []byte{txscript.OP_FALSE, txscript.OP_0NOTEQUAL}, shouldPass: false},\n\t\t// No arguments also blows up\n\t\t{script: []byte{txscript.OP_0NOTEQUAL}, shouldPass: false},\n\n\t\t// OP_NOT: 1 i input is 0, else 0\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_NOT}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_2, txscript.OP_NOT}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_3, txscript.OP_NOT}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_4, txscript.OP_NOT}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_5, txscript.OP_NOT}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_6, txscript.OP_NOT}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_7, txscript.OP_NOT}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_8, txscript.OP_NOT}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_9, txscript.OP_NOT}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_10, txscript.OP_NOT}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_11, txscript.OP_NOT}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_12, txscript.OP_NOT}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_13, txscript.OP_NOT}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_14, txscript.OP_NOT}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_15, txscript.OP_NOT}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_16, txscript.OP_NOT}, shouldPass: false},\n\t\t// check negative numbers too\n\t\t{script: []byte{txscript.OP_TRUE, txscript.OP_NEGATE,\n\t\t\ttxscript.OP_NOT}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_FALSE, txscript.OP_NOT},\n\t\t\tshouldPass: true},\n\t\t// No arguments also blows up\n\t\t{script: []byte{txscript.OP_NOT}, shouldPass: false},\n\n\t\t// Conditional Execution\n\t\t{script: []byte{txscript.OP_0, txscript.OP_IF, txscript.OP_0, txscript.OP_ELSE, txscript.OP_2, txscript.OP_ENDIF}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_1, txscript.OP_IF, txscript.OP_0, txscript.OP_ELSE, txscript.OP_2, txscript.OP_ENDIF}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_1, txscript.OP_NOTIF, txscript.OP_0, txscript.OP_ELSE, txscript.OP_2, txscript.OP_ENDIF}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_0, txscript.OP_NOTIF, txscript.OP_0, txscript.OP_ELSE, txscript.OP_2, txscript.OP_ENDIF}, shouldPass: false},\n\t\t{script: []byte{txscript.OP_0, txscript.OP_IF, txscript.OP_0, txscript.OP_ELSE, txscript.OP_2}, shouldFail: txscript.ErrStackMissingEndif},\n\t\t{script: []byte{txscript.OP_1, txscript.OP_NOTIF, txscript.OP_0, txscript.OP_ELSE, txscript.OP_2}, shouldFail: txscript.ErrStackMissingEndif},\n\t\t{script: []byte{txscript.OP_1, txscript.OP_1, txscript.OP_IF, txscript.OP_IF, txscript.OP_1, txscript.OP_ELSE, txscript.OP_0, txscript.OP_ENDIF, txscript.OP_ENDIF}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_1, txscript.OP_IF, txscript.OP_IF, txscript.OP_1, txscript.OP_ELSE, txscript.OP_0, txscript.OP_ENDIF, txscript.OP_ENDIF}, shouldFail: txscript.ErrStackUnderflow},\n\t\t{script: []byte{txscript.OP_0, txscript.OP_IF, txscript.OP_IF, txscript.OP_0, txscript.OP_ELSE, txscript.OP_0, txscript.OP_ENDIF, txscript.OP_ELSE, txscript.OP_1, txscript.OP_ENDIF}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_0, txscript.OP_IF, txscript.OP_NOTIF, txscript.OP_0, txscript.OP_ELSE, txscript.OP_0, txscript.OP_ENDIF, txscript.OP_ELSE, txscript.OP_1, txscript.OP_ENDIF}, shouldPass: true},\n\t\t{script: []byte{txscript.OP_NOTIF, txscript.OP_0, txscript.OP_ENDIF}, shouldFail: txscript.ErrStackUnderflow},\n\t\t{script: []byte{txscript.OP_ELSE, txscript.OP_0, txscript.OP_ENDIF}, shouldFail: txscript.ErrStackNoIf},\n\t\t{script: []byte{txscript.OP_ENDIF}, shouldFail: txscript.ErrStackNoIf},\n\t\t/* up here because error from sig parsing is undefined. */\n\t\t{script: []byte{txscript.OP_1, txscript.OP_1, txscript.OP_DATA_65,\n\t\t\t0x04, 0xae, 0x1a, 0x62, 0xfe, 0x09, 0xc5, 0xf5, 0x1b, 0x13,\n\t\t\t0x90, 0x5f, 0x07, 0xf0, 0x6b, 0x99, 0xa2, 0xf7, 0x15, 0x9b,\n\t\t\t0x22, 0x25, 0xf3, 0x74, 0xcd, 0x37, 0x8d, 0x71, 0x30, 0x2f,\n\t\t\t0xa2, 0x84, 0x14, 0xe7, 0xaa, 0xb3, 0x73, 0x97, 0xf5, 0x54,\n\t\t\t0xa7, 0xdf, 0x5f, 0x14, 0x2c, 0x21, 0xc1, 0xb7, 0x30, 0x3b,\n\t\t\t0x8a, 0x06, 0x26, 0xf1, 0xba, 0xde, 0xd5, 0xc7, 0x2a, 0x70,\n\t\t\t0x4f, 0x7e, 0x6c, 0xd8, 0x4c,\n\t\t\ttxscript.OP_1, txscript.OP_CHECKMULTISIG},\n\t\t\tshouldPass: false},\n\t\t{script: []byte{txscript.OP_1, txscript.OP_1, txscript.OP_DATA_65,\n\t\t\t0x04, 0xae, 0x1a, 0x62, 0xfe, 0x09, 0xc5, 0xf5, 0x1b, 0x13,\n\t\t\t0x90, 0x5f, 0x07, 0xf0, 0x6b, 0x99, 0xa2, 0xf7, 0x15, 0x9b,\n\t\t\t0x22, 0x25, 0xf3, 0x74, 0xcd, 0x37, 0x8d, 0x71, 0x30, 0x2f,\n\t\t\t0xa2, 0x84, 0x14, 0xe7, 0xaa, 0xb3, 0x73, 0x97, 0xf5, 0x54,\n\t\t\t0xa7, 0xdf, 0x5f, 0x14, 0x2c, 0x21, 0xc1, 0xb7, 0x30, 0x3b,\n\t\t\t0x8a, 0x06, 0x26, 0xf1, 0xba, 0xde, 0xd5, 0xc7, 0x2a, 0x70,\n\t\t\t0x4f, 0x7e, 0x6c, 0xd8, 0x4c,\n\t\t\ttxscript.OP_1, txscript.OP_CHECKMULTISIG},\n\t\t\tstrictSigs: true,\n\t\t\tshouldPass: false},\n\t\t/* up here because no defined error case. */\n\t\t{script: []byte{txscript.OP_1, txscript.OP_1, txscript.OP_DATA_65,\n\t\t\t0x04, 0xae, 0x1a, 0x62, 0xfe, 0x09, 0xc5, 0xf5, 0x1b, 0x13,\n\t\t\t0x90, 0x5f, 0x07, 0xf0, 0x6b, 0x99, 0xa2, 0xf7, 0x15, 0x9b,\n\t\t\t0x22, 0x25, 0xf3, 0x74, 0xcd, 0x37, 0x8d, 0x71, 0x30, 0x2f,\n\t\t\t0xa2, 0x84, 0x14, 0xe7, 0xaa, 0xb3, 0x73, 0x97, 0xf5, 0x54,\n\t\t\t0xa7, 0xdf, 0x5f, 0x14, 0x2c, 0x21, 0xc1, 0xb7, 0x30, 0x3b,\n\t\t\t0x8a, 0x06, 0x26, 0xf1, 0xba, 0xde, 0xd5, 0xc7, 0x2a, 0x70,\n\t\t\t0x4f, 0x7e, 0x6c, 0xd8, 0x4c,\n\t\t\ttxscript.OP_1, txscript.OP_CHECKMULTISIGVERIFY},\n\t\t\tshouldPass: false},\n\n\t\t// Invalid Opcodes\n\t\t{script: []byte{186}, shouldPass: false},\n\t\t{script: []byte{187}, shouldPass: false},\n\t\t{script: []byte{188}, shouldPass: false},\n\t\t{script: []byte{189}, shouldPass: false},\n\t\t{script: []byte{190}, shouldPass: false},\n\t\t{script: []byte{191}, shouldPass: false},\n\t\t{script: []byte{192}, shouldPass: false},\n\t\t{script: []byte{193}, shouldPass: false},\n\t\t{script: []byte{194}, shouldPass: false},\n\t\t{script: []byte{195}, shouldPass: false},\n\t\t{script: []byte{195}, shouldPass: false},\n\t\t{script: []byte{196}, shouldPass: false},\n\t\t{script: []byte{197}, shouldPass: false},\n\t\t{script: []byte{198}, shouldPass: false},\n\t\t{script: []byte{199}, shouldPass: false},\n\t\t{script: []byte{200}, shouldPass: false},\n\t\t{script: []byte{201}, shouldPass: false},\n\t\t{script: []byte{202}, shouldPass: false},\n\t\t{script: []byte{203}, shouldPass: false},\n\t\t{script: []byte{204}, shouldPass: false},\n\t\t{script: []byte{205}, shouldPass: false},\n\t\t{script: []byte{206}, shouldPass: false},\n\t\t{script: []byte{207}, shouldPass: false},\n\t\t{script: []byte{208}, shouldPass: false},\n\t\t{script: []byte{209}, shouldPass: false},\n\t\t{script: []byte{210}, shouldPass: false},\n\t\t{script: []byte{211}, shouldPass: false},\n\t\t{script: []byte{212}, shouldPass: false},\n\t\t{script: []byte{213}, shouldPass: false},\n\t\t{script: []byte{214}, shouldPass: false},\n\t\t{script: []byte{215}, shouldPass: false},\n\t\t{script: []byte{216}, shouldPass: false},\n\t\t{script: []byte{217}, shouldPass: false},\n\t\t{script: []byte{218}, shouldPass: false},\n\t\t{script: []byte{219}, shouldPass: false},\n\t\t{script: []byte{220}, shouldPass: false},\n\t\t{script: []byte{221}, shouldPass: false},\n\t\t{script: []byte{222}, shouldPass: false},\n\t\t{script: []byte{223}, shouldPass: false},\n\t\t{script: []byte{224}, shouldPass: false},\n\t\t{script: []byte{225}, shouldPass: false},\n\t\t{script: []byte{226}, shouldPass: false},\n\t\t{script: []byte{227}, shouldPass: false},\n\t\t{script: []byte{228}, shouldPass: false},\n\t\t{script: []byte{229}, shouldPass: false},\n\t\t{script: []byte{230}, shouldPass: false},\n\t\t{script: []byte{231}, shouldPass: false},\n\t\t{script: []byte{232}, shouldPass: false},\n\t\t{script: []byte{233}, shouldPass: false},\n\t\t{script: []byte{234}, shouldPass: false},\n\t\t{script: []byte{235}, shouldPass: false},\n\t\t{script: []byte{236}, shouldPass: false},\n\t\t{script: []byte{237}, shouldPass: false},\n\t\t{script: []byte{238}, shouldPass: false},\n\t\t{script: []byte{239}, shouldPass: false},\n\t\t{script: []byte{240}, shouldPass: false},\n\t\t{script: []byte{241}, shouldPass: false},\n\t\t{script: []byte{242}, shouldPass: false},\n\t\t{script: []byte{243}, shouldPass: false},\n\t\t{script: []byte{244}, shouldPass: false},\n\t\t{script: []byte{245}, shouldPass: false},\n\t\t{script: []byte{246}, shouldPass: false},\n\t\t{script: []byte{247}, shouldPass: false},\n\t\t{script: []byte{248}, shouldPass: false},\n\t\t{script: []byte{249}, shouldPass: false},\n\t\t{script: []byte{250}, shouldPass: false},\n\t\t{script: []byte{251}, shouldPass: false},\n\t\t{script: []byte{252}, shouldPass: false},\n\t}\n\n\t// Mock up fake tx used during script execution.\n\tmockTx := &wire.MsgTx{\n\t\tVersion: 1,\n\t\tTxIn: []*wire.TxIn{\n\t\t\t{\n\t\t\t\tPreviousOutPoint: wire.OutPoint{\n\t\t\t\t\tHash: wire.ShaHash{},\n\t\t\t\t\tIndex: 0xffffffff,\n\t\t\t\t},\n\t\t\t\tSignatureScript: []byte{txscript.OP_NOP},\n\t\t\t\tSequence: 0xffffffff,\n\t\t\t},\n\t\t},\n\t\tTxOut: []*wire.TxOut{\n\t\t\t{\n\t\t\t\tValue: 0x12a05f200,\n\t\t\t\tPkScript: []byte{},\n\t\t\t},\n\t\t},\n\t\tLockTime: 0,\n\t}\n\n\tfor i, test := range tests {\n\t\t// Parse and execute the test script.\n\t\tvar flags txscript.ScriptFlags\n\t\tif test.strictSigs {\n\t\t\tflags = txscript.ScriptVerifyDERSignatures\n\t\t}\n\t\tmockTx.TxOut[0].PkScript = test.script\n\t\tsigScript := mockTx.TxIn[0].SignatureScript\n\t\tengine, err := txscript.NewScript(sigScript, test.script, 0,\n\t\t\tmockTx, flags)\n\t\tif err == nil {\n\t\t\terr = engine.Execute()\n\t\t}\n\n\t\tif test.shouldFail != nil {\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"test %d passed should fail with %v\",\n\t\t\t\t\ti, test.shouldFail)\n\t\t\t\tcontinue\n\t\t\t} else if test.shouldFail != err {\n\t\t\t\tt.Errorf(\"test %d failed with wrong error \"+\n\t\t\t\t\t\"[%v], expected [%v]\", i, err,\n\t\t\t\t\ttest.shouldFail)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif test.shouldPass && err != nil {\n\t\t\tt.Errorf(\"test %d failed: %v\", i, err)\n\t\t\tcontinue\n\t\t} else if !test.shouldPass && err == nil {\n\t\t\tt.Errorf(\"test %d passed, should fail\", i)\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func RequireScript(script string, priority int) View {\n\treturn RenderView(\n\t\tfunc(ctx *Context) error {\n\t\t\tctx.Response.RequireScript(script, priority)\n\t\t\treturn nil\n\t\t},\n\t)\n}", "func getScript(name string) (script, bool) {\n\tscript, ok := scriptRegistry[name]\n\treturn script, ok\n}", "func Example_immutability() {\n\tcspBase := New().\n\t\tDefaultSrc(NONE).\n\t\tImgSrc(\"*\").\n\t\tSandbox(\"allow-forms\")\n\n\tcspDerived := cspBase.ScriptSrc(SELF)\n\n\tfmt.Println(cspBase.MustCompile())\n\tfmt.Println(cspDerived.MustCompile())\n\t// Output:\n\t// default-src 'none'; img-src *; sandbox allow-forms\n\t// default-src 'none'; img-src *; sandbox allow-forms; script-src 'self'\n}", "func hasSSE42Asm() bool", "func (fn *Function) privateRuntime() bool {\n\tname := fn.Name\n\tconst n = len(\"runtime.\")\n\treturn len(name) > n && name[:n] == \"runtime.\" && !('A' <= name[n] && name[n] <= 'Z')\n}", "func (a API) DecodeScriptChk() (isNew bool) {\n\tselect {\n\tcase o := <-a.Ch.(chan DecodeScriptRes):\n\t\tif o.Err != nil {\n\t\t\ta.Result = o.Err\n\t\t} else {\n\t\t\ta.Result = o.Res\n\t\t}\n\t\tisNew = true\n\tdefault:\n\t}\n\treturn\n}", "func (c *Debugger) CanSetScriptSource() (bool, error) {\n\tresp, err := gcdmessage.SendCustomReturn(c.target, c.target.GetSendCh(), &gcdmessage.ParamRequest{Id: c.target.GetId(), Method: \"Debugger.canSetScriptSource\"})\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tvar chromeData struct {\n\t\tResult struct {\n\t\t\tResult bool\n\t\t}\n\t}\n\n\tif resp == nil {\n\t\treturn false, &gcdmessage.ChromeEmptyResponseErr{}\n\t}\n\n\t// test if error first\n\tcerr := &gcdmessage.ChromeErrorResponse{}\n\tjson.Unmarshal(resp.Data, cerr)\n\tif cerr != nil && cerr.Error != nil {\n\t\treturn false, &gcdmessage.ChromeRequestErr{Resp: cerr}\n\t}\n\n\tif err := json.Unmarshal(resp.Data, &chromeData); err != nil {\n\t\treturn false, err\n\t}\n\n\treturn chromeData.Result.Result, nil\n}", "func jsRuntime() *goja.Runtime {\n\trt := goja.New()\n\trt.Set(\"lookupHost\", lookupHost)\n\trt.Set(\"httpClient\", new(http.Client))\n\trt.Set(\"copyBody\", copyBody)\n\tnew(require.Registry).Enable(rt)\n\tconsole.Enable(rt)\n\treturn rt\n}", "func (Task) IsNode() {}", "func (b *taskBuilder) useIsolatedAssets() bool {\n\t// Only do this on the RPIs for now. Other, faster machines shouldn't\n\t// see much benefit and we don't need the extra complexity, for now.\n\tif b.os(\"ChromeOS\", \"iOS\") || b.matchOs(\"Android\") {\n\t\treturn true\n\t}\n\treturn false\n}", "func (r *RoleTriggerScript) HasScript() bool {\n\treturn r.hasScript\n}", "func ScriptEQ(v string) predicate.AllocationStrategy {\n\treturn predicate.AllocationStrategy(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldScript), v))\n\t})\n}", "func (t NodeType) IsExecutable() bool {\n\treturn t > execBegin && t < execEnd\n}", "func isServerChunkedTransferEncodingDirective(directive string) bool {\n\tif isEqualString(directive, ServerChunkedTransferEncodingDirective) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (t *Link) IsHreflang() (ok bool) {\n\treturn t.hreflang != nil && t.hreflang.bcp47LanguageTag != nil\n\n}", "func (s *Script) IsContractSig() bool {\n\treturn len(*s) == 1 && (*s)[0] == byte(OPCONTRACT)\n}", "func (client *Client) ScriptLoad(scriptText string) string {\n v, _ := client.Do(\"SCRIPT\", \"LOAD\", scriptText)\n return v.String()\n}", "func (o *SyntheticMonitorUpdate) GetScriptOk() (*map[string]interface{}, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Script, true\n}", "func MustParseScript(s string) Script {\n\tscr, err := ParseScript(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn scr\n}", "func IsThreadSafe(source rand.Source64) bool {\n\tassert.NotNil(&source)\n\n\tfor source != nil {\n\t\tif _, ok := source.(*syncSource); ok {\n\t\t\treturn true\n\t\t}\n\n\t\twrapper, ok := source.(WrappedSource)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\n\t\tsource = wrapper.Unwrap()\n\t}\n\treturn false\n}", "func isIFrame(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"iframe\"\n}", "func Script(props *ScriptProps, children ...Element) *ScriptElem {\n\trProps := &_ScriptProps{\n\t\tBasicHTMLElement: newBasicHTMLElement(),\n\t}\n\n\tif props != nil {\n\t\tprops.assign(rProps)\n\t}\n\n\treturn &ScriptElem{\n\t\tElement: createElement(\"script\", rProps, children...),\n\t}\n}", "func (_Abi *AbiSession) IsWrappedAsset(arg0 common.Address) (bool, error) {\n\treturn _Abi.Contract.IsWrappedAsset(&_Abi.CallOpts, arg0)\n}", "func AMPBoilerplate(e *Context) error {\n // Remove <style> and <noscript> tags keeping only the amp-runtime and\n // amp-custom style tag. amp-runtime may be removed later by the\n // AMPRuntimeCSS transformer.\n\tfor n := e.DOM.HeadNode; n != nil && n.DataAtom != atom.Body; n = htmlnode.Next(n) {\n\t\tswitch n.DataAtom {\n\t\tcase atom.Style:\n\t\t\tif !htmlnode.HasAttribute(n, \"\", amphtml.AMPCustom) && !htmlnode.HasAttribute(n, \"\", amphtml.AMPRuntime) {\n\t\t\t\thtmlnode.RemoveNode(&n)\n\t\t\t}\n\t\tcase atom.Noscript:\n\t\t\thtmlnode.RemoveNode(&n)\n\t\t}\n\t}\n\n\tif e.Version >= 3 {\n\t\t// If the document had been modified by a Server-Side-Rendering transform\n\t\t// earlier, for example by the AMP Optimizer, and that transform\n\t\t// determined that the boilerplate was unnecessary, we don't add the\n\t\t// boilerplate back. Note this can mean that an error in that transform\n\t\t// could result in boilerplate being removed when it shouldn't be.\n\t\tif htmlnode.HasAttribute(e.DOM.HTMLNode, \"\", \"i-amphtml-no-boilerplate\") {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tboilerplate, css := determineBoilerplateAndCSS(e.DOM.HTMLNode)\n\n\tstyleNode := htmlnode.Element(\"style\", html.Attribute{Key: boilerplate})\n\te.DOM.HeadNode.AppendChild(styleNode)\n\n\tcssNode := htmlnode.Text(css)\n\tstyleNode.AppendChild(cssNode)\n\n\tif boilerplate != amphtml.AMPBoilerplate {\n\t\treturn nil\n\t}\n\n\t// Regular AMP boilerplate also includes a noscript.\n\tnoScriptNode := htmlnode.Element(\"noscript\")\n\te.DOM.HeadNode.AppendChild(noScriptNode)\n\n\tnoScriptStyle := htmlnode.Element(\"style\", html.Attribute{Key: boilerplate})\n\tnoScriptNode.AppendChild(noScriptStyle)\n\n\tnoScriptCSS := htmlnode.Text(amphtml.AMPBoilerplateNoscriptCSS)\n\tnoScriptStyle.AppendChild(noScriptCSS)\n\treturn nil\n}", "func Script(name string) got.HTML {\n\treturn got.HTML(fmt.Sprintf(\"<script src=\\\"/assets/scripts/%s.js\\\" type=\\\"text/javascript\\\"></script>\", EscapeURL(name)))\n}", "func (o *OutputStream) ByScript() *OutputStream {\n\treturn o.Filter(func(t *token.Output) bool {\n\t\towner, err := identity.UnmarshallRawOwner(t.Owner)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tswitch owner.Type {\n\t\tcase ScriptType:\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t})\n}", "func (element *Element) Script(value string) *Element {\n\treturn element.Attr(\"data-script\", value)\n}", "func isVideo(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"video\"\n}", "func (c *TokenFilterConditional) Script(script *Script) *TokenFilterConditional {\n\tc.script = script\n\treturn c\n}", "func (f *File) isPy37(b []byte) bool {\n\ti := (int(b[1]) << 8) + int(b[0])\n\t// Python 2 versions use magic numbers in the 20-60,000 range. Ensure it's not one of them.\n\treturn i >= 3394 && i < 10000\n}", "func (me TxsdActuate) IsOnLoad() bool { return me == \"onLoad\" }", "func (a *tailscaleSTSReconciler) IsHTTPSEnabledOnTailnet() bool {\n\treturn len(a.tsnetServer.CertDomains()) > 0\n}", "func isNodeUntainted(node *v1.Node) bool {\n\treturn isNodeUntaintedWithNonblocking(node, \"\")\n}", "func (c *Module) IsInstallableToApex() bool {\n\tif shared, ok := c.linker.(interface {\n\t\tshared() bool\n\t}); ok {\n\t\t// Stub libs and prebuilt libs in a versioned SDK are not\n\t\t// installable to APEX even though they are shared libs.\n\t\treturn shared.shared() && !c.IsStubs() && c.ContainingSdk().Unversioned()\n\t} else if _, ok := c.linker.(testPerSrc); ok {\n\t\treturn true\n\t}\n\treturn false\n}", "func (me TxsdImpactSimpleContentExtensionType) IsPolicy() bool { return me.String() == \"policy\" }", "func (me TxsdMimeTypeSequenceType) IsAudio() bool { return me.String() == \"audio\" }", "func NewScript(src string) *Script {\n\ts := new(Script)\n\ts.Element = NewElement(\"script\")\n\ts.Type = TypeJavascript\n\ts.SetAttribute(\"type\", s.Type)\n\tif src != \"\" {\n\t\ts.AddSrc(src)\n\t}\n\treturn s\n}", "func ServerApplication_IsResource(construct awscdk.IConstruct) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_codedeploy.ServerApplication\",\n\t\t\"isResource\",\n\t\t[]interface{}{construct},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func IsWebRoot(apath string) bool {\n\treturn path.IsAbs(apath)\n}", "func (c *Compiler) isEmitted(code operation.Opcode) bool {\n\tif len(c.currentInstructions()) == 0 {\n\t\treturn false\n\t}\n\treturn c.scopes[c.currentScope].emitted.Opcode == code\n}", "func IsLoaded(name string) (bool, error) {\n\treturn false, ErrApparmorUnsupported\n}", "func TestRuntimeRequireEval(t *testing.T) {\n\tdefer os.RemoveAll(DATA_PATH)\n\twriteTestModule()\n\twriteLuaModule(\"test-invoke.lua\", `\nlocal nakama = require(\"nakama\")\nlocal test = require(\"test\")\ntest.printWorld()\n`)\n\n\t_, err := newRuntimePool()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}", "func hasScript(script string) (string, bool) {\n\tfiles, err := ioutil.ReadDir(localScriptPath)\n\tvar exist = false\n\tvar name = \"\"\n\n\tif err != nil {\n\t\tLogErr(logContextRunner, \"No scripts directory found\")\n\t} else {\n\t\tfor _, file := range files {\n\t\t\tif strings.Contains(file.Name(), script) {\n\t\t\t\tname = file.Name()\n\t\t\t\texist = true\n\t\t\t}\n\t\t}\n\t}\n\treturn name, exist\n}", "func (t ResolvedPipelineRunTask) IsCustomTask() bool {\n\treturn t.CustomTask\n}", "func IsProgram(p Program) bool {\n\treturn gl.IsProgram(p.Value)\n}", "func (pp packagePath) IsRel() bool {\n\treturn !pp.IsAbs()\n}", "func isMixedContent(srclink, reflink string) bool {\n\tsrcLink, err := url.Parse(srclink)\n\tif err != nil || srcLink.Scheme == \"http\" {\n\t\treturn false\n\t}\n\n\trefLink, err := url.Parse(reflink)\n\tif err == nil && refLink.Scheme == \"http\" {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func isDataURL(url string) bool {\n\n\tif strings.HasPrefix(url, dataURLprefix) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (me TxsdActuate) IsOnLoad() bool { return me.String() == \"onLoad\" }", "func (sd *ScriptData) CanSign() bool {\n\treturn CanSignType(sd.Type)\n}", "func (sc ScriptClosure) GetScript(address massutil.Address) ([]byte, error) {\n\treturn sc(address)\n}", "func isAnchor(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"a\"\n}", "func blockSquashable(n nodes.Node) bool {\n\tif n.Block() == nil {\n\t\treturn false\n\t}\n\treturn nodes.IsInline(n.Type())\n}", "func isGoferTaskFile(file *ast.File) bool {\n for _, imprt := range file.Imports {\n if PACKAGE_NAME == file.Name.String() && strings.ContainsAny(imprt.Path.Value, EXPECTED_IMPORT) {\n return true\n }\n }\n\n return false\n}", "func (session *Session) ExecuteAsyncScript(script string, _args map[string]interface{}) error {\n\targs := map[string]interface{}{\n\t\t\"sessionId\": session.Id,\n\t\t\"script\": script,\n\t\t\"args\": _args,\n\t}\n\n\tres, err := Commands.ExecuteAsyncScript.Execute(session.addr, args, session.options)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar ret Response\n\terr = json.Unmarshal(res.Data, &ret)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif ret.Status != 0 {\n\t\treturn fmt.Errorf(\"%s\", res.Data)\n\t}\n\treturn nil\n}", "func PreferSameScript(preferSame bool) MatchOption {\n\treturn func(m *matcher) { m.preferSameScript = preferSame }\n}", "func GetScriptURL(tx *gorm.DB, inout, owner, campaign string) (string, error) {\n\n\tvar (\n\t\terr error\n\n\t\tlink string\n\n\t\tscript, scriptContent interface{}\n\t)\n\n\terr = func() error {\n\n\t\t// script for incoming calls for a particular ingroup\n\t\tif strings.ToUpper(inout) == \"IN\" {\n\n\t\t\terr = tx.Raw(`\n\t\t\t\tSELECT ingroup_script FROM vicidial_inbound_groups\n\t\t\t\tWHERE\n\t\t\t\tgroup_id = ? AND\n\t\t\t\t\tget_call_launch = 'SCRIPT';`, campaign,\n\t\t\t).Row().Scan(&script)\n\n\t\t\tif err != nil && err != sql.ErrNoRows {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// script for incoming calls for a campaign\n\t\t} else if strings.ToUpper(inout) == \"OUT\" {\n\n\t\t\terr = tx.Raw(`\n\t\t\t\tSELECT campaign_script FROM vicidial_campaigns\n\t\t\t\tWHERE\n\t\t\t\t\tcampaign_id = ? AND\n\t\t\t\t\tget_call_launch = 'SCRIPT'\n\t\t\t`, campaign).Row().Scan(&script)\n\n\t\t\tif err != nil && err != sql.ErrNoRows {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t}\n\n\t\t// if script is NOT found\n\t\tif script != nil {\n\n\t\t\terr = tx.Raw(`\n\t\t\t\t\tSELECT script_text FROM vicidial_scripts WHERE script_id = ?`, script,\n\t\t\t).Row().Scan(&scriptContent)\n\n\t\t\tif err != nil && err != sql.ErrNoRows {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor _, k := range strings.Split(fmt.Sprintf(\"%s\", scriptContent), \" \") {\n\t\t\t\tif strings.Contains(k, \"http\") {\n\t\t\t\t\tlink = strings.Split(k, `\"`)[1]\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// all script variables\n\t\t\tvar variables = []string{\n\t\t\t\t\"--A--phone_number--B--\",\n\t\t\t\t\"--A--owner--B--\",\n\t\t\t}\n\n\t\t\tfor _, v := range variables {\n\t\t\t\tlink = strings.Replace(link, v, owner, 1)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}()\n\n\treturn link, err\n}", "func isMsgServerDefined(appPath, moduleName string) (bool, error) {\n\ttxProto, err := filepath.Abs(filepath.Join(appPath, \"proto\", moduleName, \"tx.proto\"))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif _, err := os.Stat(txProto); os.IsNotExist(err) {\n\t\treturn false, nil\n\t}\n\treturn true, err\n}", "func EagerPyFuncIsAsync(value bool) EagerPyFuncAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"is_async\"] = value\n\t}\n}", "func (m *HPAMetric) IsExternal() bool {\n\treturn m.Program != \"\"\n}", "func TestRuntimeData(t *testing.T) {\n\trunPolicyPackIntegrationTest(t, \"runtime_data\", NodeJS, map[string]string{\n\t\t\"aConfigValue\": \"this value is a value\",\n\t\t\"aws:region\": \"us-west-2\",\n\t}, []policyTestScenario{{WantErrors: nil}})\n}", "func IsDynamicLinked(efd *elf.File) bool {\n\tfor _, phdr := range efd.Progs {\n\t\tif phdr.Type == elf.PT_DYNAMIC {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func LoadScript(filename string) (int, string) {\n\trepeat := 0\n\trequests := \"\"\n\tthread := &starlark.Thread{\n\t\tLoad: loader,\n\t}\n\targuments := starlark.StringDict{}\n\tresponse, err := starlark.ExecFile(thread, filename, nil, arguments)\n\tif nil != err {\n\t\tfmt.Println(\"Error: \", err)\n\t} else {\n\t\tvar names []string\n\t\tfor name := range response {\n\t\t\tnames = append(names, name)\n\t\t}\n\t\tsort.Strings(names)\n\t\tfor _, name := range names {\n\t\t\tv := response[name]\n\t\t\tif strings.Compare(name, \"repeat\") == 0 {\n\t\t\t\tvalue, err := strconv.Atoi(v.String())\n\t\t\t\tif nil == err {\n\t\t\t\t\trepeat = value\n\t\t\t\t}\n\t\t\t}\n\t\t\tif strings.Compare(name, \"requests\") == 0 {\n\t\t\t\trequests = AsString(v)\n\t\t\t}\n\t\t}\n\t}\n\treturn repeat, requests\n}", "func importRuntimeInst(erp *ECALRuntimeProvider, node *parser.ASTNode) parser.Runtime {\n\treturn &importRuntime{newBaseRuntime(erp, node)}\n}", "func isServerReadAheadDirective(directive string) bool {\n\tif isEqualString(directive, ServerReadAheadDirective) {\n\t\treturn true\n\t}\n\treturn false\n}", "func isNodeSchedulableWithoutTaints(node *v1.Node) bool {\n\treturn IsNodeSchedulable(node) && isNodeUntainted(node)\n}" ]
[ "0.71983045", "0.6859008", "0.6489399", "0.6116429", "0.59569806", "0.50658715", "0.48501772", "0.476661", "0.47604835", "0.4714461", "0.46902317", "0.4659031", "0.45383734", "0.44345397", "0.4392329", "0.4341767", "0.43279102", "0.4326666", "0.42890763", "0.42832336", "0.4259594", "0.42201263", "0.42027166", "0.4177183", "0.41707805", "0.4115585", "0.4087291", "0.40610078", "0.40598133", "0.405817", "0.40557188", "0.40183365", "0.40165344", "0.4013949", "0.3979044", "0.397779", "0.39584884", "0.39540565", "0.3920502", "0.3912377", "0.3911401", "0.38770986", "0.3875265", "0.38689587", "0.38672972", "0.38489053", "0.38441464", "0.38389868", "0.3818437", "0.38129348", "0.38089773", "0.38004547", "0.37942618", "0.3785264", "0.37831396", "0.37824923", "0.37773424", "0.37751955", "0.37701368", "0.37658396", "0.3764019", "0.3751639", "0.37505528", "0.3742127", "0.37382984", "0.3737655", "0.37366852", "0.37356102", "0.37296927", "0.37266138", "0.3726086", "0.37249652", "0.3724233", "0.37236375", "0.37185138", "0.37156072", "0.37145305", "0.37114456", "0.37105802", "0.37091166", "0.3707626", "0.3702579", "0.37024134", "0.36976108", "0.36943385", "0.36942095", "0.36881876", "0.36803067", "0.367715", "0.3676394", "0.3674379", "0.36721653", "0.36714906", "0.36635178", "0.3662087", "0.3659624", "0.36553666", "0.36546546", "0.36541182", "0.36535195" ]
0.85922056
0
IsScriptAMPViewer returns true if the node is of the form <script async src=
func IsScriptAMPViewer(n *html.Node) bool { if n.DataAtom != atom.Script { return false } a, ok := htmlnode.FindAttribute(n, "", "src") return ok && !IsScriptAMPExtension(n) && strings.HasPrefix(a.Val, AMPCacheSchemeAndHost+"/v0/amp-viewer-integration-") && strings.HasSuffix(a.Val, ".js") && htmlnode.HasAttribute(n, "", "async") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IsScriptAMPRuntime(n *html.Node) bool {\n\tif n.DataAtom != atom.Script {\n\t\treturn false\n\t}\n\tif v, ok := htmlnode.GetAttributeVal(n, \"\", \"src\"); ok {\n\t\treturn htmlnode.HasAttribute(n, \"\", \"async\") &&\n\t\t\t!IsScriptAMPExtension(n) &&\n\t\t\tstrings.HasPrefix(v, AMPCacheRootURL) &&\n\t\t\t(strings.HasSuffix(v, \"/v0.js\") ||\n\t\t\t\tstrings.HasSuffix(v, \"/v0.mjs\") ||\n\t\t\t\tstrings.HasSuffix(v, \"/amp4ads-v0.js\") ||\n\t\t\t\tstrings.HasSuffix(v, \"/amp4ads-v0.mjs\"))\n\t}\n\treturn false\n}", "func IsScriptAMPExtension(n *html.Node) bool {\n\t_, ok := AMPExtensionName(n)\n\treturn ok\n}", "func IsScriptRenderDelaying(n *html.Node) bool {\n\tif n.DataAtom != atom.Script {\n\t\treturn false\n\t}\n\tif IsScriptAMPViewer(n) {\n\t\treturn true\n\t}\n\tif v, ok := htmlnode.GetAttributeVal(n, \"\", AMPCustomElement); ok {\n\t\t// TODO(b/77581738): Remove amp-story from this list.\n\t\treturn (v == AMPDynamicCSSClasses ||\n\t\t\tv == AMPExperiment ||\n\t\t\tv == AMPStory)\n\t}\n\treturn false\n}", "func AMPExtensionScriptDefinition(n *html.Node) (string, bool) {\n\tif n.DataAtom != atom.Script {\n\t\treturn \"\", false\n\t}\n\tsrc, hasSrc := htmlnode.GetAttributeVal(n, \"\", \"src\")\n\tif hasSrc {\n\t\tm := srcURLRE.FindStringSubmatch(src)\n\t\tif len(m) < 2 {\n\t\t\treturn src, true\n\t\t}\n\t\treturn m[1], true\n\t}\n\treturn \"\", false\n}", "func isScript(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"script\"\n}", "func (me TxsdPresentationAttributesTextContentElementsDominantBaseline) IsAutosenseScript() bool {\n\treturn me.String() == \"autosense-script\"\n}", "func isEmbed(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"embed\"\n}", "func IsAMPCustomElement(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && strings.HasPrefix(n.Data, \"amp-\")\n}", "func isApplet(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"applet\"\n}", "func registerScript(n *html.Node, hn *headNodes) {\n\tif amphtml.IsScriptAMPRuntime(n) {\n\t\thn.scriptAMPRuntime = append(hn.scriptAMPRuntime, n)\n\t\treturn\n\t}\n\tif amphtml.IsScriptAMPViewer(n) {\n\t\thn.scriptAMPViewer = n\n\t\treturn\n\t}\n\tif amphtml.IsScriptAMPExtension(n) {\n\t\tif amphtml.IsScriptRenderDelaying(n) {\n\t\t\thn.scriptRenderDelaying = append(hn.scriptRenderDelaying, n)\n\t\t\treturn\n\t\t}\n\t\thn.scriptNonRenderDelaying = append(hn.scriptNonRenderDelaying, n)\n\t\treturn\n\t}\n\thn.other = append(hn.other, n)\n}", "func isAudio(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"audio\"\n}", "func (m *Basic) IframeScript() []byte {\n\tdata, err := Asset(\"script.js\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn data\n}", "func AMPExtensionName(n *html.Node) (string, bool) {\n\tif n.DataAtom != atom.Script {\n\t\treturn \"\", false\n\t}\n\tfor _, attr := range n.Attr {\n\t\tfor _, k := range []string{AMPCustomElement, AMPCustomTemplate, AMPHostService} {\n\t\t\tif attr.Key == k {\n\t\t\t\treturn attr.Val, true\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\", false\n}", "func streamcode_bodyscripts(qw422016 *qt422016.Writer, lang string) {\n//line template/code.qtpl:16\n\tqw422016.N().S(`\n `)\n//line template/code.qtpl:17\n\tqw422016.N().S(`<script src='`)\n//line template/code.qtpl:18\n\tqw422016.E().S(prefix)\n//line template/code.qtpl:18\n\tqw422016.N().S(`/prism.js'></script>`)\n//line template/code.qtpl:19\n\tif lang != \"\" && lang != \"none\" {\n//line template/code.qtpl:19\n\t\tqw422016.N().S(`<script src='`)\n//line template/code.qtpl:20\n\t\tqw422016.E().S(prefix)\n//line template/code.qtpl:20\n\t\tqw422016.N().S(`/components/prism-`)\n//line template/code.qtpl:20\n\t\tqw422016.E().S(lang)\n//line template/code.qtpl:20\n\t\tqw422016.N().S(`.js'></script>`)\n//line template/code.qtpl:21\n\t}\n//line template/code.qtpl:22\n\tqw422016.N().S(`\n`)\n//line template/code.qtpl:23\n}", "func streamcode_scripts(qw422016 *qt422016.Writer, lang string) {\n//line template/code.qtpl:11\n\tqw422016.N().S(`\n <link rel='stylesheet' crossorigin='anonymous' href='`)\n//line template/code.qtpl:12\n\tqw422016.E().S(prefix)\n//line template/code.qtpl:12\n\tqw422016.N().S(`/themes/prism.css' />\n`)\n//line template/code.qtpl:13\n}", "func (a *scriptAddress) Imported() bool {\n\treturn true\n}", "func (_Eth *EthCaller) IsPeerAPublisher(opts *bind.CallOpts, a common.Address, topic string) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Eth.contract.Call(opts, out, \"isPeerAPublisher\", a, topic)\n\treturn *ret0, err\n}", "func StripJS(e *Context) error {\n\tfor n := e.DOM.RootNode; n != nil; n = htmlnode.Next(n) {\n\t\tif n.Type != html.ElementNode {\n\t\t\tcontinue\n\t\t}\n\n\t\tif n.DataAtom == atom.Script {\n\t\t\tsrcVal, srcOk := htmlnode.GetAttributeVal(n, \"\", \"src\")\n\t\t\tvar isCacheSrc bool\n\t\t\tif srcOk {\n\t\t\t\tif !strings.HasPrefix(strings.ToLower(srcVal), amphtml.AMPCacheRootURL) {\n\t\t\t\t\thtmlnode.RemoveNode(&n)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tisCacheSrc = true\n\t\t\t}\n\t\t\ttypeVal, typeOk := htmlnode.GetAttributeVal(n, \"\", \"type\")\n\t\t\tif !srcOk && !typeOk {\n\t\t\t\thtmlnode.RemoveNode(&n)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif typeOk {\n\t\t\t\tswitch strings.ToLower(typeVal) {\n\t\t\t\tcase \"application/json\", \"application/ld+json\", \"text/plain\":\n\t\t\t\t\t// ok to keep\n\t\t\t\tcase \"text/javascript\", \"module\":\n\t\t\t\t\t// ok to keep only for AMP Cache scripts\n\t\t\t\t\tif !isCacheSrc {\n\t\t\t\t\t\thtmlnode.RemoveNode(&n)\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\thtmlnode.RemoveNode(&n)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfor _, attr := range n.Attr {\n\t\t\t\tif attr.Namespace == \"\" {\n\t\t\t\t\tif match := eventRE.MatchString(attr.Key); match {\n\t\t\t\t\t\thtmlnode.RemoveAttribute(n, &attr)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func isPublished(api *anypoint.API, authPolicy string, c cache.Cache) (bool, string) {\n\t// Change detection (asset + policies)\n\tchecksum := makeChecksum(api, authPolicy)\n\titem, err := c.Get(checksum)\n\tif err != nil || item == nil {\n\t\treturn false, checksum\n\t} else {\n\t\treturn true, checksum\n\t}\n}", "func (me TxsdShow) IsEmbed() bool { return me == \"embed\" }", "func Script(attrs []htmlgo.Attribute, children ...HTML) HTML {\n\treturn &htmlgo.Tree{Tag: \"script\", Attributes: attrs, Children: children}\n}", "func (o *DisplayInfo) HasScript() bool {\n\tif o != nil && o.Script != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TxsdShow) IsEmbed() bool { return me.String() == \"embed\" }", "func IsThreadSafe(source rand.Source64) bool {\n\tassert.NotNil(&source)\n\n\tfor source != nil {\n\t\tif _, ok := source.(*syncSource); ok {\n\t\t\treturn true\n\t\t}\n\n\t\twrapper, ok := source.(WrappedSource)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\n\t\tsource = wrapper.Unwrap()\n\t}\n\treturn false\n}", "func ParseScript(vm *otto.Otto, scriptPath string) (program *otto.Script, subs []string, err error) {\n\tscript, err := os.Open(scriptPath)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar src string\n\tvar subsFound bool\n\tprefix := `\"igor.subs`\n\n\tscriptReader := bufio.NewReader(script)\n\tfor {\n\t\tline, err := scriptReader.ReadString(byte('\\n'))\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\ttrimmed := strings.TrimSpace(line)\n\t\tif strings.HasPrefix(trimmed, prefix) {\n\t\t\tsubs = igor.ParseSubscriptionDirective(trimmed, prefix)\n\t\t\tsubsFound = true\n\t\t}\n\t\tif len(trimmed) > 0 {\n\t\t\tsrc = src + line\n\t\t}\n\t}\n\n\tif !subsFound {\n\t\terr = errors.New(\"GetSubscriptions: no subscription directive found\")\n\t\treturn\n\t}\n\n\tprogram, err = vm.Compile(scriptPath, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func isTrack(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"track\"\n}", "func isIFrame(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"iframe\"\n}", "func RequireScriptURL(url string, priority int) View {\n\treturn RenderView(\n\t\tfunc(ctx *Context) error {\n\t\t\tctx.Response.RequireScriptURL(url, priority)\n\t\t\treturn nil\n\t\t},\n\t)\n}", "func (_Eth *EthCallerSession) IsPeerAPublisher(a common.Address, topic string) (bool, error) {\n\treturn _Eth.Contract.IsPeerAPublisher(&_Eth.CallOpts, a, topic)\n}", "func isMixedContent(srclink, reflink string) bool {\n\tsrcLink, err := url.Parse(srclink)\n\tif err != nil || srcLink.Scheme == \"http\" {\n\t\treturn false\n\t}\n\n\trefLink, err := url.Parse(reflink)\n\tif err == nil && refLink.Scheme == \"http\" {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func RequireScript(script string, priority int) View {\n\treturn RenderView(\n\t\tfunc(ctx *Context) error {\n\t\t\tctx.Response.RequireScript(script, priority)\n\t\t\treturn nil\n\t\t},\n\t)\n}", "func (s *Script) IsPayToPubKeyHashCLTVScript() bool {\n\tss := *s\n\tl := len(ss)\n\treturn l >= 27 && ss[l-1] == byte(OPCHECKSIG) && ss[l-2] == byte(OPEQUALVERIFY) &&\n\t\tss[l-23] == ripemd160.Size && ss[l-24] == byte(OPHASH160) &&\n\t\tss[l-25] == byte(OPDUP) && ss[l-26] == byte(OPCHECKLOCKTIMEVERIFY)\n}", "func (_Eth *EthSession) IsPeerAPublisher(a common.Address, topic string) (bool, error) {\n\treturn _Eth.Contract.IsPeerAPublisher(&_Eth.CallOpts, a, topic)\n}", "func isExecutableScript(item *DisplayItem) bool {\n\tif item.info.Mode()&0111 != 0 && item.info.Mode().IsRegular() {\n\t\treturn true\n\t}\n\treturn false\n}", "func Script(name string) got.HTML {\n\treturn got.HTML(fmt.Sprintf(\"<script src=\\\"/assets/scripts/%s.js\\\" type=\\\"text/javascript\\\"></script>\", EscapeURL(name)))\n}", "func (c *TokenFilterConditional) Script(script *Script) *TokenFilterConditional {\n\tc.script = script\n\treturn c\n}", "func (m *FeedMutation) Transcript() (r string, exists bool) {\n\tv := m.transcript\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}", "func isVideo(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"video\"\n}", "func isAnchor(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"a\"\n}", "func IsSubscribeAsync(m Mono) bool {\n\treturn mono.IsSubscribeAsync(m.Raw())\n}", "func (c *Debugger) CanSetScriptSource() (bool, error) {\n\tresp, err := gcdmessage.SendCustomReturn(c.target, c.target.GetSendCh(), &gcdmessage.ParamRequest{Id: c.target.GetId(), Method: \"Debugger.canSetScriptSource\"})\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tvar chromeData struct {\n\t\tResult struct {\n\t\t\tResult bool\n\t\t}\n\t}\n\n\tif resp == nil {\n\t\treturn false, &gcdmessage.ChromeEmptyResponseErr{}\n\t}\n\n\t// test if error first\n\tcerr := &gcdmessage.ChromeErrorResponse{}\n\tjson.Unmarshal(resp.Data, cerr)\n\tif cerr != nil && cerr.Error != nil {\n\t\treturn false, &gcdmessage.ChromeRequestErr{Resp: cerr}\n\t}\n\n\tif err := json.Unmarshal(resp.Data, &chromeData); err != nil {\n\t\treturn false, err\n\t}\n\n\treturn chromeData.Result.Result, nil\n}", "func (me TxsdImpactSimpleContentExtensionType) IsPolicy() bool { return me.String() == \"policy\" }", "func (s *Script) IsPayToScriptHash() bool {\n\tss := *s\n\tif len(ss) != p2SHScriptLen {\n\t\treturn false\n\t}\n\treturn ss[0] == byte(OPHASH160) && ss[1] == ripemd160.Size && ss[22] == byte(OPEQUAL)\n}", "func ScriptEQ(v string) predicate.AllocationStrategy {\n\treturn predicate.AllocationStrategy(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldScript), v))\n\t})\n}", "func (me TxsdCounterSimpleContentExtensionType) IsSite() bool { return me.String() == \"site\" }", "func Example_immutability() {\n\tcspBase := New().\n\t\tDefaultSrc(NONE).\n\t\tImgSrc(\"*\").\n\t\tSandbox(\"allow-forms\")\n\n\tcspDerived := cspBase.ScriptSrc(SELF)\n\n\tfmt.Println(cspBase.MustCompile())\n\tfmt.Println(cspDerived.MustCompile())\n\t// Output:\n\t// default-src 'none'; img-src *; sandbox allow-forms\n\t// default-src 'none'; img-src *; sandbox allow-forms; script-src 'self'\n}", "func (o *OutputStream) ByScript() *OutputStream {\n\treturn o.Filter(func(t *token.Output) bool {\n\t\towner, err := identity.UnmarshallRawOwner(t.Owner)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tswitch owner.Type {\n\t\tcase ScriptType:\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t})\n}", "func playScript(fsys fs.FS, transport string) {\n\tmodTime := time.Now()\n\tvar buf bytes.Buffer\n\tfor _, p := range scripts {\n\t\tb, err := fs.ReadFile(fsys, \"static/\"+p)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tbuf.Write(b)\n\t}\n\tfmt.Fprintf(&buf, \"\\ninitPlayground(new %v());\\n\", transport)\n\tb := buf.Bytes()\n\thttp.HandleFunc(\"/play.js\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-type\", \"application/javascript\")\n\t\thttp.ServeContent(w, r, \"\", modTime, bytes.NewReader(b))\n\t})\n}", "func (r *RoleTriggerScript) HasScript() bool {\n\treturn r.hasScript\n}", "func isEscrowScript(script []byte) bool {\n\tif len(script) != 77 {\n\t\treturn false\n\t}\n\tif script[0] == txscript.OP_IF &&\n\t\tscript[1] == txscript.OP_DATA_33 &&\n\t\tscript[35] == txscript.OP_ELSE &&\n\t\tscript[36] == txscript.OP_DATA_2 &&\n\t\tscript[39] == txscript.OP_CHECKSEQUENCEVERIFY &&\n\t\tscript[40] == txscript.OP_DROP &&\n\t\tscript[41] == txscript.OP_DATA_33 &&\n\t\tscript[75] == txscript.OP_ENDIF &&\n\t\tscript[76] == txscript.OP_CHECKSIG {\n\n\t\treturn true\n\t}\n\treturn false\n}", "func PreferSameScript(preferSame bool) MatchOption {\n\treturn func(m *matcher) { m.preferSameScript = preferSame }\n}", "func (me TxsdActuate) IsOnLoad() bool { return me == \"onLoad\" }", "func (s *Setting) IsTrusted(adr string) bool {\n\tok := false\n\tfor _, t := range s.TrustedNodes {\n\t\tif t == adr {\n\t\t\tok = true\n\t\t}\n\t}\n\treturn ok\n}", "func (me TxsdPresentationAttributesTextContentElementsUnicodeBidi) IsEmbed() bool {\n\treturn me.String() == \"embed\"\n}", "func IsHTTPRouterRequired(mode *string) bool {\n\tmodes := []string{Hybrid, Receiver, Rest, TokenServer, HTTPOnly, HTTPWithNoRest}\n\treturn StrContains(modes, *mode)\n}", "func GetScriptURL(tx *gorm.DB, inout, owner, campaign string) (string, error) {\n\n\tvar (\n\t\terr error\n\n\t\tlink string\n\n\t\tscript, scriptContent interface{}\n\t)\n\n\terr = func() error {\n\n\t\t// script for incoming calls for a particular ingroup\n\t\tif strings.ToUpper(inout) == \"IN\" {\n\n\t\t\terr = tx.Raw(`\n\t\t\t\tSELECT ingroup_script FROM vicidial_inbound_groups\n\t\t\t\tWHERE\n\t\t\t\tgroup_id = ? AND\n\t\t\t\t\tget_call_launch = 'SCRIPT';`, campaign,\n\t\t\t).Row().Scan(&script)\n\n\t\t\tif err != nil && err != sql.ErrNoRows {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// script for incoming calls for a campaign\n\t\t} else if strings.ToUpper(inout) == \"OUT\" {\n\n\t\t\terr = tx.Raw(`\n\t\t\t\tSELECT campaign_script FROM vicidial_campaigns\n\t\t\t\tWHERE\n\t\t\t\t\tcampaign_id = ? AND\n\t\t\t\t\tget_call_launch = 'SCRIPT'\n\t\t\t`, campaign).Row().Scan(&script)\n\n\t\t\tif err != nil && err != sql.ErrNoRows {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t}\n\n\t\t// if script is NOT found\n\t\tif script != nil {\n\n\t\t\terr = tx.Raw(`\n\t\t\t\t\tSELECT script_text FROM vicidial_scripts WHERE script_id = ?`, script,\n\t\t\t).Row().Scan(&scriptContent)\n\n\t\t\tif err != nil && err != sql.ErrNoRows {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor _, k := range strings.Split(fmt.Sprintf(\"%s\", scriptContent), \" \") {\n\t\t\t\tif strings.Contains(k, \"http\") {\n\t\t\t\t\tlink = strings.Split(k, `\"`)[1]\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// all script variables\n\t\t\tvar variables = []string{\n\t\t\t\t\"--A--phone_number--B--\",\n\t\t\t\t\"--A--owner--B--\",\n\t\t\t}\n\n\t\t\tfor _, v := range variables {\n\t\t\t\tlink = strings.Replace(link, v, owner, 1)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}()\n\n\treturn link, err\n}", "func (m multiSigner) Script() []byte {\n\treturn m.accounts[0].Contract.Script\n}", "func (me TxsdActuate) IsOnLoad() bool { return me.String() == \"onLoad\" }", "func altScript(l language.Language, s language.Script) language.Script {\n\tfor _, alt := range matchScript {\n\t\t// TODO: also match cases where language is not the same.\n\t\tif (language.Language(alt.wantLang) == l || language.Language(alt.haveLang) == l) &&\n\t\t\tlanguage.Script(alt.haveScript) == s {\n\t\t\treturn language.Script(alt.wantScript)\n\t\t}\n\t}\n\treturn 0\n}", "func candidateImageForPreloading(n *html.Node) (string, bool) {\n\t// amp-image under following containers do not qualify for preloading.\n\timgsrcset, hasSrcset := htmlnode.GetAttributeVal(n, \"\", \"srcset\")\n\n\t// Ignores images with no src attribute.\n\t// These can be css images inside class definition.\n\tif !hasSrcset || len(imgsrcset) == 0 {\n\t\treturn \"\", false\n\t}\n\n\t// Ignores if image src is not a https url.\n\t// URL rewrite transformer guarantees img srcs are https protocol.\n\tif !strings.HasPrefix(imgsrcset, \"https://\") {\n\t\treturn \"\", false\n\t}\n\n\twidthInt, heightInt := nodeDimensions(n)\n\n\t// Ignores smaller images, unless they are aspect ratio dimensions.\n\tif isTinyNode(widthInt, heightInt) {\n\t\t// Checks for aspect ratio images.\n\t\t// Aspect ratio images larger than maxAspectRatioSize are ignored.\n\t\t// Small images of icon types inside input type container types\n\t\t// are ignored.\n\t\tif widthInt > 0 && widthInt <= maxAspectRatioSize && heightInt > 0 && heightInt <= maxAspectRatioSize && isAspectRatioDimensions(n, widthInt, heightInt) && !containerTypeInput(n) {\n\t\t\treturn imgsrcset, true\n\t\t}\n\t\treturn \"\", false\n\t}\n\n\t// Checks if it is placeholder image for iframe.\n\t// https://www.ampproject.org/docs/reference/components/amp-iframe#iframe-with-placeholder\n\t_, hasPlaceholder := htmlnode.GetAttributeVal(n, \"\", \"placeholder\")\n\tparentWidthInt, parentHeightInt := nodeDimensions(n.Parent)\n\tif hasPlaceholder {\n\t\tif n.Parent.Data == \"amp-iframe\" {\n\t\t\tif isTinyNode(parentWidthInt, parentHeightInt) {\n\t\t\t\treturn \"\", false\n\t\t\t}\n\t\t\treturn imgsrcset, true\n\t\t}\n\t\treturn \"\", false\n\t}\n\n\tlayoutType := layout.ParseAMPLayout(n)\n\t// Responsive and fill layout types generally accept parent containers dimensions.\n\tif layoutType == amppb.AmpLayout_RESPONSIVE || layoutType == amppb.AmpLayout_FILL {\n\t\tif widthInt == 0 && heightInt == 0 {\n\t\t\tif isTinyNode(parentWidthInt, parentHeightInt) {\n\t\t\t\treturn \"\", false\n\t\t\t}\n\t\t\treturn imgsrcset, true\n\t\t}\n\n\t\t// Actual image dimension check is performed later.\n\t}\n\n\t// For other layouts with no image dimensions, take parent containers\n\t// dimensions into account.\n\tif widthInt == 0 && heightInt == 0 {\n\t\twidthInt = parentWidthInt\n\t\theightInt = parentHeightInt\n\t}\n\n\t// Checks image meets minimum dimension requirements.\n\t// Ignores the width size if it is not specified. In most layouts it\n\t// defaults to auto or 100% size of container.\n\tif (widthInt >= minImageSize || widthInt == 0) && heightInt >= minImageSize {\n\t\treturn imgsrcset, true\n\t}\n\n\treturn \"\", false\n}", "func (element *Element) Script(value string) *Element {\n\treturn element.Attr(\"data-script\", value)\n}", "func (Task) IsNode() {}", "func Scripts() []html.Node {\n\treturn []html.Node{\n\t\thtml.Script().Attribute(\"src\", jquery.WebPath),\n\t\thtml.Script().Attribute(\"src\", JsWebPath),\n\t}\n}", "func Loaded(source string) bool {\n\treturn source != \"\"\n}", "func aria10(node *html.Node) (string, bool) {\n\n\tif hasChildren(node) {\n\t\tif attributeCheckValEmpty(node.Attr, \"aria-labelledby\") {\n\t\t\tfor c := node.FirstChild; c != nil; c = c.NextSibling {\n\t\t\t\tif attributeSearch(c.Attr, \"src\") {\n\t\t\t\t\tif attributeCheckValEmpty(node.Attr, \"alt\") {\n\t\t\t\t\t\treturn Applicable, true\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn Applicable, false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn NA, true\n\n}", "func (o *WorkflowBuildTaskMeta) HasSrc() bool {\n\tif o != nil && o.Src != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (n *Declarator) isExternInline() bool {\n\treturn n.IsExtern() && n.Type() != nil && n.Type().Inline()\n}", "func (sd *ScriptData) IsAllowedP2SH() bool {\n\treturn IsAllowedP2shType(sd.Type)\n}", "func (t *Tweet) IsLinkable() {}", "func shown() bool {\n\treturn !js.Global.Get(\"document\").Get(\"hidden\").Bool()\n}", "func hasScript(script string) (string, bool) {\n\tfiles, err := ioutil.ReadDir(localScriptPath)\n\tvar exist = false\n\tvar name = \"\"\n\n\tif err != nil {\n\t\tLogErr(logContextRunner, \"No scripts directory found\")\n\t} else {\n\t\tfor _, file := range files {\n\t\t\tif strings.Contains(file.Name(), script) {\n\t\t\t\tname = file.Name()\n\t\t\t\texist = true\n\t\t\t}\n\t\t}\n\t}\n\treturn name, exist\n}", "func (*InstSIToFP) isInst() {}", "func (t *Link) IsHreflang() (ok bool) {\n\treturn t.hreflang != nil && t.hreflang.bcp47LanguageTag != nil\n\n}", "func (o *DisplayInfo) GetScriptOk() (*string, bool) {\n\tif o == nil || o.Script == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Script, true\n}", "func (o *SyntheticMonitorUpdate) GetScriptOk() (*map[string]interface{}, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Script, true\n}", "func (s *Script) AddSrc(src string) {\n\tif src > \"\" {\n\t\ts.Src = src\n\t\ts.SetAttribute(\"src\", s.Src)\n\t}\n}", "func (gg GlobGroup) Truth() starlark.Bool { return starlark.True }", "func IsTransformFeedback(id uint32) bool {\n\tret, _, _ := syscall.Syscall(gpIsTransformFeedback, 1, uintptr(id), 0, 0)\n\treturn ret != 0\n}", "func isImage(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"img\"\n}", "func (*HTML) isOutput() {\n}", "func (b *taskBuilder) useIsolatedAssets() bool {\n\t// Only do this on the RPIs for now. Other, faster machines shouldn't\n\t// see much benefit and we don't need the extra complexity, for now.\n\tif b.os(\"ChromeOS\", \"iOS\") || b.matchOs(\"Android\") {\n\t\treturn true\n\t}\n\treturn false\n}", "func (s *Script) Source() (interface{}, error) {\n\tif s.typ == \"\" && s.lang == \"\" && len(s.params) == 0 {\n\t\treturn s.script, nil\n\t}\n\tsource := make(map[string]interface{})\n\t// Beginning with 6.0, the type can only be \"source\" or \"id\"\n\tif s.typ == \"\" || s.typ == \"inline\" {\n\t\tsrc, err := s.rawScriptSource(s.script)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsource[\"source\"] = src\n\t} else {\n\t\tsource[\"id\"] = s.script\n\t}\n\tif s.lang != \"\" {\n\t\tsource[\"lang\"] = s.lang\n\t}\n\tif len(s.params) > 0 {\n\t\tsource[\"params\"] = s.params\n\t}\n\treturn source, nil\n}", "func replaceScriptSrcs(orig_url url.URL, n *html.Node) {\n\tif n.Type == html.ElementNode && n.Data == \"script\" {\n\t\tfor i, a := range n.Attr {\n\t\t\tif a.Key == \"src\" {\n\t\t\t\ta.Val = createProxyableUrl(orig_url, a.Val)\n\t\t\t}\n\t\t\tn.Attr[i] = a\n\t\t}\n\t}\n\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\treplaceScriptSrcs(orig_url, c)\n\t}\n}", "func isServerChunkedTransferEncodingDirective(directive string) bool {\n\tif isEqualString(directive, ServerChunkedTransferEncodingDirective) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (m *FeedMutation) AudioURL() (r string, exists bool) {\n\tv := m.audio_url\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}", "func (r *Permitter) ViewerCanAdmin(\n\tctx context.Context,\n\tnode interface{},\n) (bool, error) {\n\tviewer, ok := myctx.UserFromContext(ctx)\n\tif !ok {\n\t\terr := &myctx.ErrNotFound{\"viewer\"}\n\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\treturn false, err\n\t}\n\tif viewer.Login.String == Guest {\n\t\treturn false, nil\n\t}\n\tvid := viewer.ID.String\n\tswitch node := node.(type) {\n\tcase data.Activity:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tactivity, err := r.repos.Activity().load.Get(ctx, node.ID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &activity.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase *data.Activity:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tactivity, err := r.repos.Activity().load.Get(ctx, node.ID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &activity.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase data.ActivityAsset:\n\t\tactivityID := &node.ActivityID\n\t\tif activityID.Status == pgtype.Undefined {\n\t\t\tactivityAsset, err := r.repos.ActivityAsset().load.Get(ctx, node.AssetID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tactivityID = &activityAsset.ActivityID\n\t\t}\n\t\tactivity, err := r.repos.Activity().load.Get(ctx, activityID.String)\n\t\tif err != nil {\n\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\treturn false, err\n\t\t}\n\t\treturn vid == activity.UserID.String, nil\n\tcase *data.ActivityAsset:\n\t\tactivityID := &node.ActivityID\n\t\tif activityID.Status == pgtype.Undefined {\n\t\t\tactivityAsset, err := r.repos.ActivityAsset().load.Get(ctx, node.AssetID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tactivityID = &activityAsset.ActivityID\n\t\t}\n\t\tactivity, err := r.repos.Activity().load.Get(ctx, activityID.String)\n\t\tif err != nil {\n\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\treturn false, err\n\t\t}\n\t\treturn vid == activity.UserID.String, nil\n\tcase data.Appled:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tappled, err := r.repos.Appled().load.Get(ctx, node.ID.Int)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &appled.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase *data.Appled:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tappled, err := r.repos.Appled().load.Get(ctx, node.ID.Int)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &appled.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase data.Comment:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tcomment, err := r.repos.Comment().load.Get(ctx, node.ID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &comment.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase *data.Comment:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tcomment, err := r.repos.Comment().load.Get(ctx, node.ID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &comment.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase data.CommentDraftBackup:\n\t\tcomment, err := r.repos.Comment().load.Get(ctx, node.CommentID.String)\n\t\tif err != nil {\n\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\treturn false, err\n\t\t}\n\t\tuserID := &comment.UserID\n\t\treturn vid == userID.String, nil\n\tcase *data.CommentDraftBackup:\n\t\tcomment, err := r.repos.Comment().load.Get(ctx, node.CommentID.String)\n\t\tif err != nil {\n\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\treturn false, err\n\t\t}\n\t\tuserID := &comment.UserID\n\t\treturn vid == userID.String, nil\n\tcase data.Course:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tcourse, err := r.repos.Course().load.Get(ctx, node.ID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &course.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase *data.Course:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tcourse, err := r.repos.Course().load.Get(ctx, node.ID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &course.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase data.CourseLesson:\n\t\tcourseID := &node.CourseID\n\t\tif courseID.Status == pgtype.Undefined {\n\t\t\tcourseLesson, err := r.repos.CourseLesson().load.Get(ctx, node.LessonID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tcourseID = &courseLesson.CourseID\n\t\t}\n\t\tcourse, err := r.repos.Course().load.Get(ctx, courseID.String)\n\t\tif err != nil {\n\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\treturn false, err\n\t\t}\n\t\treturn vid == course.UserID.String, nil\n\tcase *data.CourseLesson:\n\t\tcourseID := &node.CourseID\n\t\tif courseID.Status == pgtype.Undefined {\n\t\t\tcourseLesson, err := r.repos.CourseLesson().load.Get(ctx, node.LessonID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tcourseID = &courseLesson.CourseID\n\t\t}\n\t\tcourse, err := r.repos.Course().load.Get(ctx, courseID.String)\n\t\tif err != nil {\n\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\treturn false, err\n\t\t}\n\t\treturn vid == course.UserID.String, nil\n\tcase data.Email:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\temail, err := r.repos.Email().load.Get(ctx, node.ID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &email.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase *data.Email:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\temail, err := r.repos.Email().load.Get(ctx, node.ID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &email.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase data.Enrolled:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tenrolled, err := r.repos.Enrolled().load.Get(ctx, node.ID.Int)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &enrolled.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase *data.Enrolled:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tenrolled, err := r.repos.Enrolled().load.Get(ctx, node.ID.Int)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &enrolled.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase data.EVT:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tevt, err := r.repos.EVT().load.Get(ctx, node.EmailID.String, node.Token.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &evt.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase *data.EVT:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tevt, err := r.repos.EVT().load.Get(ctx, node.EmailID.String, node.Token.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &evt.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase data.Label:\n\t\tlabel, err := r.repos.Label().load.Get(ctx, node.ID.String)\n\t\tif err != nil {\n\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\treturn false, err\n\t\t}\n\t\tstudy, err := r.repos.Study().load.Get(ctx, label.StudyID.String)\n\t\tif err != nil {\n\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\treturn false, err\n\t\t}\n\t\treturn vid == study.UserID.String, nil\n\tcase *data.Label:\n\t\tlabel, err := r.repos.Label().load.Get(ctx, node.ID.String)\n\t\tif err != nil {\n\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\treturn false, err\n\t\t}\n\t\tstudy, err := r.repos.Study().load.Get(ctx, label.StudyID.String)\n\t\tif err != nil {\n\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\treturn false, err\n\t\t}\n\t\treturn vid == study.UserID.String, nil\n\tcase data.Labeled:\n\t\tuserID := mytype.OID{}\n\t\tswitch node.LabelableID.Type {\n\t\tcase \"Comment\":\n\t\t\tcomment, err := r.repos.Comment().load.Get(ctx, node.LabelableID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = comment.UserID\n\t\tcase \"Lesson\":\n\t\t\tlesson, err := r.repos.Lesson().load.Get(ctx, node.LabelableID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = lesson.UserID\n\t\tcase \"UserAsset\":\n\t\t\tlesson, err := r.repos.UserAsset().load.Get(ctx, node.LabelableID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = lesson.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase *data.Labeled:\n\t\tuserID := mytype.OID{}\n\t\tswitch node.LabelableID.Type {\n\t\tcase \"Comment\":\n\t\t\tcomment, err := r.repos.Comment().load.Get(ctx, node.LabelableID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = comment.UserID\n\t\tcase \"Lesson\":\n\t\t\tlesson, err := r.repos.Lesson().load.Get(ctx, node.LabelableID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = lesson.UserID\n\t\tcase \"UserAsset\":\n\t\t\tlesson, err := r.repos.UserAsset().load.Get(ctx, node.LabelableID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = lesson.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase data.Lesson:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tlesson, err := r.repos.Lesson().load.Get(ctx, node.ID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &lesson.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase *data.Lesson:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tlesson, err := r.repos.Lesson().load.Get(ctx, node.ID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &lesson.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase data.LessonDraftBackup:\n\t\tlesson, err := r.repos.Lesson().load.Get(ctx, node.LessonID.String)\n\t\tif err != nil {\n\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\treturn false, err\n\t\t}\n\t\tuserID := &lesson.UserID\n\t\treturn vid == userID.String, nil\n\tcase *data.LessonDraftBackup:\n\t\tlesson, err := r.repos.Lesson().load.Get(ctx, node.LessonID.String)\n\t\tif err != nil {\n\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\treturn false, err\n\t\t}\n\t\tuserID := &lesson.UserID\n\t\treturn vid == userID.String, nil\n\tcase data.Notification:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tnotification, err := r.repos.Notification().load.Get(ctx, node.ID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &notification.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase *data.Notification:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tnotification, err := r.repos.Notification().load.Get(ctx, node.ID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &notification.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase data.PRT:\n\t\treturn vid == node.UserID.String, nil\n\tcase *data.PRT:\n\t\treturn vid == node.UserID.String, nil\n\tcase data.Study:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tstudy, err := r.repos.Study().load.Get(ctx, node.ID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &study.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase *data.Study:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tstudy, err := r.repos.Study().load.Get(ctx, node.ID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &study.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase data.Topiced:\n\t\tuserID := mytype.OID{}\n\t\tswitch node.TopicableID.Type {\n\t\tcase \"Course\":\n\t\t\tcourse, err := r.repos.Course().load.Get(ctx, node.TopicableID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = course.UserID\n\t\tcase \"Study\":\n\t\t\tstudy, err := r.repos.Study().load.Get(ctx, node.TopicableID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = study.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase *data.Topiced:\n\t\tuserID := mytype.OID{}\n\t\tswitch node.TopicableID.Type {\n\t\tcase \"Course\":\n\t\t\tcourse, err := r.repos.Course().load.Get(ctx, node.TopicableID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = course.UserID\n\t\tcase \"Study\":\n\t\t\tstudy, err := r.repos.Study().load.Get(ctx, node.TopicableID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = study.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase data.User:\n\t\treturn vid == node.ID.String, nil\n\tcase *data.User:\n\t\treturn vid == node.ID.String, nil\n\tcase data.UserAsset:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tuserAsset, err := r.repos.UserAsset().load.Get(ctx, node.ID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &userAsset.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tcase *data.UserAsset:\n\t\tuserID := &node.UserID\n\t\tif node.UserID.Status == pgtype.Undefined {\n\t\t\tuserAsset, err := r.repos.UserAsset().load.Get(ctx, node.ID.String)\n\t\t\tif err != nil {\n\t\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tuserID = &userAsset.UserID\n\t\t}\n\t\treturn vid == userID.String, nil\n\tdefault:\n\t\treturn false, nil\n\t}\n\treturn false, nil\n}", "func isShareable(mode string) bool {\n\treturn mode == \"shareable\"\n}", "func (o *LocalDatabaseProvider) HasUserScripts() bool {\n\tif o != nil && o.UserScripts != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func isGoferTaskFile(file *ast.File) bool {\n for _, imprt := range file.Imports {\n if PACKAGE_NAME == file.Name.String() && strings.ContainsAny(imprt.Path.Value, EXPECTED_IMPORT) {\n return true\n }\n }\n\n return false\n}", "func (e *htmlTag) ContainPlainText() bool {\n\treturn e.containPlainText\n}", "func H35(node *html.Node) (string, bool) {\n\tif hasOneChild(node) {\n\t\tif isTextNode(node.FirstChild) {\n\t\t\tif attributeCheckValEmpty(node.Attr, \"alt\") {\n\t\t\t\treturn Applicable, true\n\t\t\t} else {\n\t\t\t\treturn Applicable, false\n\t\t\t}\n\t\t}\n\t}\n\treturn NA, false\n\n}", "func (me TAttlistOtherIDSource) IsArpl() bool { return me.String() == \"ARPL\" }", "func (_Abi *AbiSession) IsWrappedAsset(arg0 common.Address) (bool, error) {\n\treturn _Abi.Contract.IsWrappedAsset(&_Abi.CallOpts, arg0)\n}", "func CanUseDownloadShareLink(url string) bool {\n\tmatch, _ := regexp.MatchString(\"vm.tiktok.com\\\\/.+\", url)\n\treturn match\n}", "func (m *HPAMetric) IsExternal() bool {\n\treturn m.Program != \"\"\n}", "func isServerReadAheadDirective(directive string) bool {\n\tif isEqualString(directive, ServerReadAheadDirective) {\n\t\treturn true\n\t}\n\treturn false\n}", "func TrustedTag(tag string) bool {\n\tif tag == core.DNS || tag == core.CERT || tag == core.ARCHIVE || tag == core.AXFR {\n\t\treturn true\n\t}\n\treturn false\n}", "func (this ActivityStreamsImagePropertyIterator) IsActivityStreamsLink() bool {\n\treturn this.activitystreamsLinkMember != nil\n}", "func hasSSE42Asm() bool", "func (client *Client) ScriptLoad(scriptText string) string {\n v, _ := client.Do(\"SCRIPT\", \"LOAD\", scriptText)\n return v.String()\n}" ]
[ "0.7577506", "0.6722259", "0.65249646", "0.6250946", "0.6023484", "0.5208977", "0.49847195", "0.4919317", "0.48820964", "0.47879615", "0.47224522", "0.4716671", "0.46345818", "0.44537893", "0.4429202", "0.42752665", "0.42705268", "0.42685997", "0.42263883", "0.42169845", "0.4215349", "0.42065543", "0.41709965", "0.41664916", "0.4104369", "0.4103697", "0.4097135", "0.40903115", "0.40647843", "0.40622243", "0.40566933", "0.40447986", "0.40441346", "0.40268067", "0.40061998", "0.3989688", "0.39834148", "0.3980252", "0.39714494", "0.39704984", "0.39694777", "0.39661986", "0.39308795", "0.39285108", "0.390374", "0.39000806", "0.38938206", "0.38935098", "0.38931754", "0.38898048", "0.38827753", "0.3877998", "0.38706845", "0.38601488", "0.38519564", "0.38443565", "0.3839979", "0.38152716", "0.3814346", "0.38101858", "0.38004655", "0.37953603", "0.37899178", "0.37887332", "0.3788708", "0.37863714", "0.37782794", "0.37770715", "0.37757394", "0.37667322", "0.37659782", "0.37569612", "0.37543675", "0.3749148", "0.37460905", "0.3743218", "0.37380946", "0.37336287", "0.37307313", "0.37290943", "0.37242714", "0.3719495", "0.3714287", "0.37099704", "0.37096965", "0.37026164", "0.37017444", "0.36950895", "0.36942422", "0.36905104", "0.3689477", "0.36831692", "0.36816865", "0.36779928", "0.36672613", "0.36672315", "0.36670312", "0.36647832", "0.36640057", "0.36468726" ]
0.8310235
0
IsScriptRenderDelaying returns true if the node has one of these values for attribute 'customelement': ampdynamiccssclasses, ampexperiment, ampstory.
func IsScriptRenderDelaying(n *html.Node) bool { if n.DataAtom != atom.Script { return false } if IsScriptAMPViewer(n) { return true } if v, ok := htmlnode.GetAttributeVal(n, "", AMPCustomElement); ok { // TODO(b/77581738): Remove amp-story from this list. return (v == AMPDynamicCSSClasses || v == AMPExperiment || v == AMPStory) } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IsScriptAMPRuntime(n *html.Node) bool {\n\tif n.DataAtom != atom.Script {\n\t\treturn false\n\t}\n\tif v, ok := htmlnode.GetAttributeVal(n, \"\", \"src\"); ok {\n\t\treturn htmlnode.HasAttribute(n, \"\", \"async\") &&\n\t\t\t!IsScriptAMPExtension(n) &&\n\t\t\tstrings.HasPrefix(v, AMPCacheRootURL) &&\n\t\t\t(strings.HasSuffix(v, \"/v0.js\") ||\n\t\t\t\tstrings.HasSuffix(v, \"/v0.mjs\") ||\n\t\t\t\tstrings.HasSuffix(v, \"/amp4ads-v0.js\") ||\n\t\t\t\tstrings.HasSuffix(v, \"/amp4ads-v0.mjs\"))\n\t}\n\treturn false\n}", "func (me TxsdPresentationAttributesColorColorRendering) IsAuto() bool { return me.String() == \"auto\" }", "func (me TxsdAnimTimingAttrsRestart) IsWhenNotActive() bool { return me.String() == \"whenNotActive\" }", "func (me TxsdPresentationAttributesColorColorRendering) IsOptimizeSpeed() bool {\n\treturn me.String() == \"optimizeSpeed\"\n}", "func (me TxsdPresentationAttributesGraphicsTextRendering) IsAuto() bool { return me.String() == \"auto\" }", "func (me TxsdPresentationAttributesTextContentElementsDominantBaseline) IsAutosenseScript() bool {\n\treturn me.String() == \"autosense-script\"\n}", "func (me TxsdPresentationAttributesGraphicsTextRendering) IsOptimizeSpeed() bool {\n\treturn me.String() == \"optimizeSpeed\"\n}", "func IsScriptAMPViewer(n *html.Node) bool {\n\tif n.DataAtom != atom.Script {\n\t\treturn false\n\t}\n\ta, ok := htmlnode.FindAttribute(n, \"\", \"src\")\n\treturn ok &&\n\t\t!IsScriptAMPExtension(n) &&\n\t\tstrings.HasPrefix(a.Val,\n\t\t\tAMPCacheSchemeAndHost+\"/v0/amp-viewer-integration-\") &&\n\t\tstrings.HasSuffix(a.Val, \".js\") &&\n\t\thtmlnode.HasAttribute(n, \"\", \"async\")\n}", "func (me TxsdColorProfileTypeRenderingIntent) IsAuto() bool { return me.String() == \"auto\" }", "func (me TxsdAnimTimingAttrsRestart) IsAlways() bool { return me.String() == \"always\" }", "func (me TxsdAnimTimingAttrsFill) IsFreeze() bool { return me.String() == \"freeze\" }", "func (me TxsdPresentationAttributesColorColorRendering) IsOptimizeQuality() bool {\n\treturn me.String() == \"optimizeQuality\"\n}", "func (me TxsdPresentationAttributesColorColorInterpolation) IsAuto() bool {\n\treturn me.String() == \"auto\"\n}", "func (t *Trigger) needsDelay() (bool, time.Duration) {\n\tif t.params.MinInterval == time.Duration(0) {\n\t\treturn false, 0\n\t}\n\n\tsleepTime := time.Since(t.lastTrigger.Add(t.params.MinInterval))\n\treturn sleepTime < 0, sleepTime * -1\n}", "func (me TxsdPresentationAttributesGraphicsShapeRendering) IsOptimizeSpeed() bool {\n\treturn me.String() == \"optimizeSpeed\"\n}", "func (me TxsdAnimTimingAttrsRestart) IsNever() bool { return me.String() == \"never\" }", "func (me TxsdTimeImpactSimpleContentExtensionMetric) IsDowntime() bool {\n\treturn me.String() == \"downtime\"\n}", "func (delivery_instructions DeliveryInstructions) HasDelay() (bool, error) {\n\tif len(delivery_instructions) >= 1 {\n\t\t/*\n\t\t\t Check if the 4 bit of the Delivery Instructions\n\t\t\t is set using binary AND operator to determine\n\t\t\t if the Delivery Instructions has a delay\n\n\t\t\t xxx1xxxx\t xxx0xxxx\n\t\t\t &00010000\t &00010000\n\t\t\t ---------\t ---------\n\t\t\t 00010000\t 00000000\n\n\t\t\t bit is set,\t\tbit is not set,\n\t\t\t delay is included no delay included\n\n\t\t\tDelay is unimplemented in the Java router, a warning\n\t\t\tis logged as this is interesting behavior.\n\t\t*/\n\t\tdelay := (delivery_instructions[0] & 0x10) == 0x10\n\t\tif delay {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"at\": \"(DeliveryInstructions) HasDelay\",\n\t\t\t\t\"info\": \"this feature is unimplemented in the Java router\",\n\t\t\t}).Warn(\"DeliveryInstructions found with delay bit set\")\n\t\t}\n\t\treturn delay, nil\n\t}\n\treturn false, errors.New(\"DeliveryInstructions contains no data\")\n}", "func (o *V0037JobProperties) HasDelayBoot() bool {\n\tif o != nil && o.DelayBoot != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsRendered(wid fyne.Widget) bool {\n\trenderersLock.RLock()\n\t_, found := renderers[wid]\n\trenderersLock.RUnlock()\n\treturn found\n}", "func (me TxsdPresentationAttributesGraphicsTextRendering) IsOptimizeLegibility() bool {\n\treturn me.String() == \"optimizeLegibility\"\n}", "func (me TxsdPresentationAttributesGraphicsShapeRendering) IsAuto() bool {\n\treturn me.String() == \"auto\"\n}", "func (me TxsdTimeImpactSimpleContentExtensionMetric) IsElapsed() bool {\n\treturn me.String() == \"elapsed\"\n}", "func isScript(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"script\"\n}", "func (me TxsdPresentationAttributesViewportsOverflow) IsAuto() bool { return me.String() == \"auto\" }", "func (me TGetAssignmentsForHITSortProperty) IsAcceptTime() bool { return me.String() == \"AcceptTime\" }", "func IsAMPCustomElement(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && strings.HasPrefix(n.Data, \"amp-\")\n}", "func (me TxsdPresentationAttributesColorColorRendering) IsInherit() bool {\n\treturn me.String() == \"inherit\"\n}", "func IsScriptAMPExtension(n *html.Node) bool {\n\t_, ok := AMPExtensionName(n)\n\treturn ok\n}", "func (o *GuardianPolicyDataData) HasPolicyDelayedTime() bool {\n\tif o != nil && o.PolicyDelayedTime != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *WafTrafficPolicy) HasAntiautomationAndBehavioral() bool {\n\tif o != nil && o.AntiautomationAndBehavioral != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (delay Delay) Validate() bool {\n\tret := true\n\n\ttime := &Time{}\n\ttime.SetValue(delay.value)\n\tret = time.Validate()\n\n\tif ret != true {\n\t\tlog.Println(\"Failed to validate delay '\" + delay.value + \"'\")\n\t}\n\n\treturn ret\n}", "func (r *RoleTriggerScript) HasScript() bool {\n\treturn r.hasScript\n}", "func (me TxsdPresentationAttributesGraphicsTextRendering) IsInherit() bool {\n\treturn me.String() == \"inherit\"\n}", "func (me TxsdPresentationAttributesTextContentElementsDominantBaseline) IsAuto() bool {\n\treturn me.String() == \"auto\"\n}", "func (p *Probe) IsRuntimeCompiled() bool {\n\treturn p.runtimeCompiled\n}", "func (o *DisplayInfo) HasScript() bool {\n\tif o != nil && o.Script != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *Content) HasInitTimeout() bool {\n\tif o != nil && o.InitTimeout.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TxsdPresentationAttributesGraphicsDisplay) IsRunIn() bool { return me.String() == \"run-in\" }", "func (throttler *Throttler) isDormant() bool {\n\tlastCheckTime := time.Unix(0, atomic.LoadInt64(&throttler.lastCheckTimeNano))\n\treturn time.Since(lastCheckTime) > dormantPeriod\n}", "func (t ResolvedPipelineRunTask) IsCustomTask() bool {\n\treturn t.CustomTask\n}", "func (fn *sepHarnessBase) delay() bool {\n\tsepClientMu.Lock()\n\tdefer sepClientMu.Unlock()\n\tvar delay bool\n\terr := sepClient.Call(\"Watchers.Delay\", &Args{WatcherID: fn.WatcherID}, &delay)\n\tif err != nil {\n\t\tslog.Error(\"Watchers.Delay error\", err)\n\t\tpanic(err)\n\t}\n\treturn delay\n}", "func CfnWaitCondition_IsCfnElement(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"aws-cdk-lib.aws_cloudformation.CfnWaitCondition\",\n\t\t\"isCfnElement\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func (me TxsdPresentationAttributesGraphicsPointerEvents) IsVisible() bool {\n\treturn me.String() == \"visible\"\n}", "func (r *Release) isConsideredToRun() bool {\n\tif r == nil {\n\t\treturn false\n\t}\n\treturn !r.disabled\n}", "func (o *DnsZoneDataData) HasZoneDelayedCreateTime() bool {\n\tif o != nil && o.ZoneDelayedCreateTime != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (e ElementType) IsSynaptic() bool {\n\tswitch e {\n\tcase PostSyn, PreSyn, Gap:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (o *StackpathRpcRetryInfoAllOf) HasRetryDelay() bool {\n\tif o != nil && o.RetryDelay != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *DhcpRangeDataData) HasRangeDelayedCreateTime() bool {\n\tif o != nil && o.RangeDelayedCreateTime != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (so *SocketOptions) GetDelayOption() bool {\n\treturn so.delayOptionEnabled.Load() != 0\n}", "func (me TxsdPresentationAttributesFontSpecificationFontWeight) Is900() bool {\n\treturn me.String() == \"900\"\n}", "func (me TxsdPresentationAttributesTextElementsWritingMode) IsLr() bool { return me.String() == \"lr\" }", "func (me TxsdPresentationAttributesTextElementsWritingMode) IsInherit() bool {\n\treturn me.String() == \"inherit\"\n}", "func (me TxsdColorProfileTypeRenderingIntent) IsRelativeColorimetric() bool {\n\treturn me.String() == \"relative-colorimetric\"\n}", "func (o *DnsViewparamDataData) HasViewparamDelayedCreateTime() bool {\n\tif o != nil && o.ViewparamDelayedCreateTime != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (policy *ticketPolicy) IsAutoMining() bool {\n\treturn policy.isAutoMining()\n}", "func (me TxsdPresentationAttributesViewportsOverflow) IsVisible() bool {\n\treturn me.String() == \"visible\"\n}", "func (me TxsdPresentationAttributesGraphicsVisibility) IsVisible() bool {\n\treturn me.String() == \"visible\"\n}", "func (th *transitionHandler) IsPreparingTimedOut(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) (bool, error) {\n\tsCluster, ok := sw.(*stateCluster)\n\tif !ok {\n\t\treturn false, errors.New(\"IsPreparingTimedOut incompatible type of StateSwitch\")\n\t}\n\t// can happen if the service was rebooted or somehow the async part crashed.\n\tif time.Since(time.Time(sCluster.cluster.StatusUpdatedAt)) > th.prepareConfig.PrepareForInstallationTimeout {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}", "func (th *transitionHandler) IsPreparingTimedOut(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) (bool, error) {\n\tsCluster, ok := sw.(*stateCluster)\n\tif !ok {\n\t\treturn false, errors.New(\"IsPreparingTimedOut incompatible type of StateSwitch\")\n\t}\n\t// can happen if the service was rebooted or somehow the async part crashed.\n\tif time.Since(time.Time(sCluster.cluster.StatusUpdatedAt)) > th.prepareConfig.InstallationTimeout {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}", "func (me TRequesterStatistic) IsEstimatedRewardLiability() bool {\n\treturn me.String() == \"EstimatedRewardLiability\"\n}", "func (me TxsdPresentationAttributesFontSpecificationFontWeight) Is200() bool {\n\treturn me.String() == \"200\"\n}", "func (me TxsdPresentationAttributesGraphicsPointerEvents) IsVisiblePainted() bool {\n\treturn me.String() == \"visiblePainted\"\n}", "func (self *PhysicsP2) UseElapsedTime() bool{\n return self.Object.Get(\"useElapsedTime\").Bool()\n}", "func (rb *QueryDelayRetryBehavior) CanRetry(retries uint) bool {\n\treturn retries < rb.maxRetries\n}", "func (me TxsdPresentationAttributesFontSpecificationFontWeight) Is300() bool {\n\treturn me.String() == \"300\"\n}", "func (me TxsdActuate) IsOnLoad() bool { return me == \"onLoad\" }", "func (t *Task) IsTimeout() bool {\n\treturn time.Now().Sub(t.AssignedAt) > TIMEOUT\n}", "func IsNodeCordoned(node *v1.Node) (bool, time.Time) {\n\tif node.Spec.Unschedulable {\n\t\tfor _, taint := range node.Spec.Taints {\n\t\t\tif taint.Key == v1.TaintNodeUnschedulable {\n\t\t\t\tif taint.TimeAdded != nil {\n\t\t\t\t\treturn true, taint.TimeAdded.Time\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treturn true, time.Time{}\n\t}\n\treturn false, time.Time{}\n}", "func (me TxsdAnimValueAttrsCalcMode) IsPaced() bool { return me.String() == \"paced\" }", "func (o IntentOutput) IsFallback() pulumi.BoolOutput {\n\treturn o.ApplyT(func(v *Intent) pulumi.BoolOutput { return v.IsFallback }).(pulumi.BoolOutput)\n}", "func IsNodeRecentlyCordoned(\n\tnode *v1.Node,\n\tcluster *corev1.StorageCluster,\n) bool {\n\tcordoned, startTime := IsNodeCordoned(node)\n\tif !cordoned || startTime.IsZero() {\n\t\treturn false\n\t}\n\n\tvar waitDuration time.Duration\n\tif duration, err := strconv.Atoi(cluster.Annotations[constants.AnnotationCordonedRestartDelay]); err == nil {\n\t\twaitDuration = time.Duration(duration) * time.Second\n\t} else {\n\t\twaitDuration = constants.DefaultCordonedRestartDelay\n\t}\n\treturn time.Now().Add(-waitDuration).Before(startTime)\n}", "func shown() bool {\n\treturn !js.Global.Get(\"document\").Get(\"hidden\").Bool()\n}", "func (me TxsdPresentationAttributesColorColorInterpolation) IsInherit() bool {\n\treturn me.String() == \"inherit\"\n}", "func (me TxsdPresentationAttributesFillStrokeStrokeLinecap) IsButt() bool {\n\treturn me.String() == \"butt\"\n}", "func IsStress() bool {\n\tif _, ok := os.LookupEnv(\"stress\"); ok {\n\t\treturn true\n\t}\n\treturn false\n}", "func (me TxsdPresentationAttributesFontSpecificationFontWeight) IsLighter() bool {\n\treturn me.String() == \"lighter\"\n}", "func (c *jsiiProxy_CfnWaitCondition) ShouldSynthesize() *bool {\n\tvar returns *bool\n\n\t_jsii_.Invoke(\n\t\tc,\n\t\t\"shouldSynthesize\",\n\t\tnil, // no parameters\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func (me TxsdActuate) IsOnLoad() bool { return me.String() == \"onLoad\" }", "func (s TiFlashSpec) IsImported() bool {\n\treturn s.Imported\n}", "func (me TxsdFeBlendTypeMode) IsLighten() bool { return me.String() == \"lighten\" }", "func (setting *DDLStrategySetting) IsPreferInstantDDL() bool {\n\treturn setting.hasFlag(preferInstantDDL)\n}", "func (ts TaskSpec) HasLaunchPolicyRestart() bool {\n\treturn ts.LaunchPolicy == LaunchPolicyRestart\n}", "func (packet *ReliablePacket) IsSequenced() bool {\n\treturn packet.Reliability == UnreliableSequenced || packet.Reliability == ReliableSequenced\n}", "func (m CrossOrderCancelReplaceRequest) HasEffectiveTime() bool {\n\treturn m.Has(tag.EffectiveTime)\n}", "func (s *TiFlashSpec) IsImported() bool {\n\treturn s.Imported\n}", "func (me TxsdAnimValueAttrsCalcMode) IsSpline() bool { return me.String() == \"spline\" }", "func (me TxsdPresentationAttributesGraphicsVisibility) IsInherit() bool {\n\treturn me.String() == \"inherit\"\n}", "func (o *SyntheticsBrowserTestResultShortResult) HasDuration() bool {\n\tif o != nil && o.Duration != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TxsdPresentationAttributesTextElementsWritingMode) IsLrTb() bool {\n\treturn me.String() == \"lr-tb\"\n}", "func (o *PhysicsDirectBodyState) IsSleeping() gdnative.Bool {\n\t//log.Println(\"Calling PhysicsDirectBodyState.IsSleeping()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"PhysicsDirectBodyState\", \"is_sleeping\")\n\n\t// Call the parent method.\n\t// bool\n\tretPtr := gdnative.NewEmptyBool()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewBoolFromPointer(retPtr)\n\treturn ret\n}", "func (me TxsdPresentationAttributesTextElementsWritingMode) IsRlTb() bool {\n\treturn me.String() == \"rl-tb\"\n}", "func (self *TraitPixbufAnimationIter) GetDelayTime() (return__ int) {\n\tvar __cgo__return__ C.int\n\t__cgo__return__ = C.gdk_pixbuf_animation_iter_get_delay_time(self.CPointer)\n\treturn__ = int(__cgo__return__)\n\treturn\n}", "func (t Trigger) IsDeferred() bool {\n\treturn t.Event == EVENT_TYPE_DEFERRED\n}", "func CfnCustomResource_IsCfnElement(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"aws-cdk-lib.aws_cloudformation.CfnCustomResource\",\n\t\t\"isCfnElement\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func CfnJobTemplate_IsCfnElement(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_mediaconvert.CfnJobTemplate\",\n\t\t\"isCfnElement\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func (w *Widget) IsCached() (template.HTML, bool) {\n\t// Lock mutex\n\tw.rw.RLock()\n\tdefer w.rw.RUnlock()\n\n\t// Get value from cache\n\tbuff, found := Cache.Get(\n\t\tfmt.Sprintf(\"widget_%v\", w.Name),\n\t)\n\n\tif !found {\n\t\treturn \"\", false\n\t}\n\n\treturn buff.(template.HTML), true\n}", "func (o *V0037Node) HasSlurmdStartTime() bool {\n\tif o != nil && o.SlurmdStartTime != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TxsdPresentationAttributesFontSpecificationFontWeight) IsInherit() bool {\n\treturn me.String() == \"inherit\"\n}", "func (me TxsdPresentationAttributesTextElementsWritingMode) IsRl() bool { return me.String() == \"rl\" }" ]
[ "0.54627293", "0.5384497", "0.52935505", "0.52349025", "0.5183518", "0.51164085", "0.5027169", "0.49183807", "0.48826963", "0.48015487", "0.47464487", "0.47278962", "0.4711677", "0.4694349", "0.46618658", "0.46478906", "0.46302602", "0.460766", "0.45946616", "0.45566878", "0.45514327", "0.45060393", "0.44965526", "0.44458428", "0.44361484", "0.44323373", "0.43953", "0.43682906", "0.43625385", "0.4361285", "0.4352202", "0.4351187", "0.43476397", "0.43358973", "0.4329126", "0.43140528", "0.4308332", "0.42977285", "0.42967382", "0.42804697", "0.4268571", "0.42504492", "0.42290753", "0.42256293", "0.42251998", "0.42182076", "0.4208899", "0.42069322", "0.42042184", "0.42019516", "0.4199266", "0.41895527", "0.4187073", "0.41857016", "0.41709125", "0.41676855", "0.4165849", "0.41649655", "0.41550612", "0.415114", "0.41489825", "0.41444126", "0.41385078", "0.41266316", "0.41230738", "0.41148463", "0.40882123", "0.408781", "0.4086346", "0.4086312", "0.4074669", "0.40743446", "0.40733945", "0.40677947", "0.4065792", "0.40553093", "0.40489283", "0.40487626", "0.4042153", "0.40332013", "0.40268174", "0.402641", "0.40110406", "0.40055352", "0.400437", "0.4000661", "0.39970487", "0.39949164", "0.399144", "0.39902622", "0.3989437", "0.39872473", "0.39824232", "0.39824083", "0.39768413", "0.39734766", "0.39567187", "0.39459592", "0.3943776", "0.39429334" ]
0.8075554
0
NewDOM constructs and returns a pointer to a DOM struct by finding the HTML nodes relevant to an AMP Document or an error if there was a problem. TODO(alin04): I don't think this can EVER return an error. The golang parser creates all these nodes if they're missing.
func NewDOM(n *html.Node) (*DOM, error) { var ok bool d := &DOM{RootNode: n} if d.HTMLNode, ok = htmlnode.FindNode(n, atom.Html); !ok { return d, errors.New("missing <html> node") } if d.HeadNode, ok = htmlnode.FindNode(d.HTMLNode, atom.Head); !ok { return d, errors.New("missing <head> node") } if d.BodyNode, ok = htmlnode.FindNode(d.HTMLNode, atom.Body); !ok { return d, errors.New("missing <body> node") } return d, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewDOM() *DOM {\n\treturn &DOM{\n\t\tGlobalInst: map[string]*Instruction{},\n\t\tScenarios: map[string]*Instruction{},\n\t\tSubScenarios: map[string]*Instruction{},\n\t\tOtherScenarios: map[string]map[string]*Instruction{},\n\t\tOtherSubScenarios: map[string]map[string]*Instruction{},\n\t\tCoveredPaths: map[string]*Path{},\n\t}\n}", "func FromHTMLParseTree(h *html.Node, css cssom.StyleSheet) *W3CNode {\n\tif h == nil {\n\t\tT().Infof(\"Cannot create DOM for null-HTML\")\n\t\treturn nil\n\t}\n\tstyles := douceuradapter.ExtractStyleElements(h)\n\tT().Debugf(\"Extracted %d <style> elements\", len(styles))\n\ts := cssom.NewCSSOM(nil) // nil = no additional properties\n\tfor _, sty := range styles {\n\t\ts.AddStylesForScope(nil, sty, cssom.Script)\n\t}\n\tif css != nil {\n\t\ts.AddStylesForScope(nil, css, cssom.Author)\n\t}\n\tstytree, err := s.Style(h, styledtree.Creator())\n\tif err != nil {\n\t\tT().Errorf(\"Cannot style test document: %s\", err.Error())\n\t\treturn nil\n\t}\n\treturn domify(stytree)\n}", "func NewHTMLParser(t testing.TB, body *bytes.Buffer) *HTMLDoc {\n\tt.Helper()\n\tdoc, err := goquery.NewDocumentFromReader(body)\n\tassert.NoError(t, err)\n\treturn &HTMLDoc{doc: doc}\n}", "func NewDocForTesting(html string) js.Value {\n\tdom := jsdom.New(html, map[string]interface{}{\n\t\t\"runScripts\": \"dangerously\",\n\t\t\"resources\": \"usable\",\n\t})\n\n\t// Create doc, but then wait until loading is complete and constructed\n\t// doc is returned. By default, jsdom loads doc asynchronously:\n\t// https://oliverjam.es/blog/frontend-testing-node-jsdom/#waiting-for-external-resources\n\tc := make(chan js.Value)\n\tdom.Get(\"window\").Call(\n\t\t\"addEventListener\", \"load\",\n\t\tjsutil.OneTimeFuncOf(func(this js.Value, args []js.Value) interface{} {\n\t\t\tc <- dom.Get(\"window\").Get(\"document\")\n\t\t\treturn nil\n\t\t}))\n\treturn <-c\n}", "func NewHTML(children ...Element) Element {\n\treturn newWithChildren(\"html\", children)\n}", "func Parse(content, inEncoding, url []byte, options int, outEncoding []byte) (doc *HtmlDocument, err error) {\n\tinEncoding = AppendCStringTerminator(inEncoding)\n\toutEncoding = AppendCStringTerminator(outEncoding)\n\n\tvar docPtr *C.xmlDoc\n\tcontentLen := len(content)\n\n\tif contentLen > 0 {\n\t\tvar contentPtr, urlPtr, encodingPtr unsafe.Pointer\n\n\t\tcontentPtr = unsafe.Pointer(&content[0])\n\t\tif len(url) > 0 {\n\t\t\turl = AppendCStringTerminator(url)\n\t\t\turlPtr = unsafe.Pointer(&url[0])\n\t\t}\n\t\tif len(inEncoding) > 0 {\n\t\t\tencodingPtr = unsafe.Pointer(&inEncoding[0])\n\t\t}\n\n\t\tdocPtr = C.htmlParse(contentPtr, C.int(contentLen), urlPtr, encodingPtr, C.int(options), nil, 0)\n\n\t\tif docPtr == nil {\n\t\t\terr = ERR_FAILED_TO_PARSE_HTML\n\t\t} else {\n\t\t\tdoc = NewDocument(unsafe.Pointer(docPtr), contentLen, inEncoding, outEncoding)\n\t\t}\n\t}\n\tif docPtr == nil {\n\t\tdoc = CreateEmptyDocument(inEncoding, outEncoding)\n\t}\n\treturn\n}", "func ParseHTMLDocument(r io.ReadCloser) (*html.Node, error) {\n\tdoc, err := html.Parse(r)\n\n\tif err != nil {\n\t\tpanic(\"error parsing document\")\n\t}\n\n\treturn doc, nil\n}", "func ParseHTML(url string) *html.Node {\r\n\t_, body, err := fasthttp.Get(nil, url)\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\tdocument, err := html.Parse(bytes.NewReader(body))\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\treturn document\r\n}", "func GetParseableHTML(url string) (*html.Node, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\troot, err := html.Parse(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\treturn root, nil\n}", "func NewDomVisit(badFname string) *DomVisit {\n\tvar itm *DomVisit\n\titm = new(DomVisit)\n\titm.sm = new(sync.RWMutex)\n\tdv := make(map[string]struct{})\n\tbd := make(map[string]struct{})\n\titm.wg = new(sync.WaitGroup)\n\titm.re = regexp.MustCompile(\"([a-zA-Z0-9_\\\\-\\\\.]+)(\\\\/[\\\\/\\\\w\\\\.]+)?$\")\n\titm.domainsVisited = &dv\n\titm.badDomains = &bd\n\tif badFname != \"\" {\n\t\tvar err error\n\t\titm.badFile, err = os.OpenFile(\"badf.txt\", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)\n\t\tcheck(err)\n\t\titm.LoadBadFiles(badFname)\n\t}\n\n\treturn itm\n}", "func VerifyDOM(s string) bool { //(body io.ReadCloser) bool {\n\n\tbody := ioutil.NopCloser(strings.NewReader(s)) // r type is io.ReadCloser\n\tdefer body.Close()\n\n\t// Load the HTML document\n\tdoc, err := goquery.NewDocumentFromReader(body)\n\tcheck := false\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn false\n\t}\n\t// Find the review items\n\tdoc.Find(\".dalfox\").Each(func(i int, s *goquery.Selection) {\n\t\tcheck = true\n\t})\n\tif !check {\n\t\tdoc.Find(\"dalfox\").Each(func(i int, s *goquery.Selection) {\n\t\t\t// For each item found, get the band and title\n\t\t\tcheck = true\n\t\t})\n\t}\n\treturn check\n}", "func NewHTML(width, height float64, useWebGL bool) *HTML {\n\treturn &HTML{\n\t\tWidth: width,\n\t\tHeight: height,\n\t\tUseWebGL: useWebGL,\n\t}\n}", "func NewDocument(url *url.URL, node *html.Node, logger log.Logger) *Document {\n\treturn &Document{\n\t\turl: url,\n\t\tnode: node,\n\t\tlogger: logger,\n\t}\n}", "func NewElements() Elements {\n\treturn Elements{}\n}", "func ParseHTML(buf io.Reader) (*Object, error) {\n\tobj := newObject()\n\tisInsideHead := false\n\tisInsideTitle := false\n\tz := html.NewTokenizer(buf)\n\tfor {\n\t\ttt := z.Next()\n\t\tif tt == html.ErrorToken {\n\t\t\tif z.Err() == io.EOF {\n\t\t\t\treturn obj, nil\n\t\t\t}\n\n\t\t\treturn nil, z.Err()\n\t\t}\n\n\t\tisStartTagToken := tt == html.StartTagToken\n\t\tif !isInsideHead && !isStartTagToken {\n\t\t\tcontinue\n\t\t}\n\t\tif tt == html.CommentToken || tt == html.DoctypeToken {\n\t\t\tcontinue\n\t\t}\n\n\t\tif isInsideTitle {\n\t\t\tisInsideTitle = false\n\t\t\tif tt == html.TextToken {\n\t\t\t\ttitleText := string(z.Text())\n\t\t\t\tobj.Title = titleText\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tisEndTagToken := tt == html.EndTagToken\n\t\tisSelfClosingTagToken := tt == html.SelfClosingTagToken\n\t\tif isStartTagToken || isEndTagToken || isSelfClosingTagToken {\n\t\t\tname, hasAttr := z.TagName()\n\t\t\tnameAtom := atom.Lookup(name)\n\t\t\tif !isInsideHead {\n\t\t\t\tif nameAtom == atom.Head {\n\t\t\t\t\tif isStartTagToken {\n\t\t\t\t\t\tisInsideHead = true\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn obj, nil\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif nameAtom == atom.Title && isStartTagToken {\n\t\t\t\tif isStartTagToken {\n\t\t\t\t\tisInsideTitle = true\n\t\t\t\t} else if isEndTagToken {\n\t\t\t\t\tisInsideTitle = false\n\t\t\t\t}\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// skip if the current tag doesn't have any attributes or is an end\n\t\t\t// tag token\n\t\t\tif !hasAttr || isEndTagToken {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// base tag\n\t\t\tif nameAtom == atom.Base {\n\t\t\t\tvar key, value []byte\n\t\t\t\tvar keyString string\n\t\t\t\tfor hasAttr {\n\t\t\t\t\tkey, value, hasAttr = z.TagAttr()\n\t\t\t\t\tkeyString = atom.String(key)\n\t\t\t\t\tif keyString == attrHREF {\n\t\t\t\t\t\tif href := string(value); validator.ValidateHREF(href) {\n\t\t\t\t\t\t\tobj.Base = href\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// link tag\n\t\t\tif nameAtom == atom.Link {\n\t\t\t\tvar key, value []byte\n\t\t\t\tvar keyString, relValue string\n\t\t\t\tlink := &Link{}\n\t\t\t\tfor hasAttr {\n\t\t\t\t\tkey, value, hasAttr = z.TagAttr()\n\t\t\t\t\tkeyString = atom.String(key)\n\t\t\t\t\tif keyString == attrRel {\n\t\t\t\t\t\trelValue = string(value)\n\t\t\t\t\t} else if keyString == attrHREF {\n\t\t\t\t\t\tif href := string(value); validator.ValidateHREF(href) {\n\t\t\t\t\t\t\tlink.HREF = href\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if keyString == attrType {\n\t\t\t\t\t\t// TODO: validation\n\t\t\t\t\t\tlink.Type = string(value)\n\t\t\t\t\t} else if keyString == attrTitle {\n\t\t\t\t\t\tlink.Title = string(value)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif relValue != \"\" {\n\t\t\t\t\tobj.Links[relValue] = append(obj.Links[relValue], link)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// meta tag\n\t\t\tif nameAtom == atom.Meta {\n\t\t\t\tvar key, value []byte\n\t\t\t\tvar keyString, propertyValue, contentValue string\n\t\t\t\tvar hasCharset bool\n\t\t\t\tfor hasAttr {\n\t\t\t\t\tkey, value, hasAttr = z.TagAttr()\n\t\t\t\t\tkeyString = atom.String(key)\n\t\t\t\t\tif keyString == attrCharset {\n\t\t\t\t\t\t// TODO: validation\n\t\t\t\t\t\tobj.Charset = string(value)\n\t\t\t\t\t\thasCharset = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t} else if keyString == attrProperty ||\n\t\t\t\t\t\tkeyString == attrName ||\n\t\t\t\t\t\tkeyString == attrHTTPEquiv ||\n\t\t\t\t\t\tkeyString == attrItemProp {\n\t\t\t\t\t\tpropertyValue = string(value)\n\t\t\t\t\t} else if keyString == \"content\" {\n\t\t\t\t\t\tcontentValue = string(value)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif !hasCharset && propertyValue != \"\" {\n\t\t\t\t\tobj.Metas[propertyValue] = contentValue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func NewHtml(lang string, head *Head, body *Body) *Html {\n\th := new(Html)\n\th.Element = NewElement(\"html\")\n\tif lang != \"\" {\n\t\th.Lang = lang\n\t\th.SetAttribute(\"lang\", h.Lang)\n\t}\n\tif head != nil {\n\t\th.Head = head\n\t\th.AddElement(head.Element)\n\t}\n\tif body != nil {\n\t\th.Body = body\n\t\th.AddElement(body.Element)\n\t}\n\treturn h\n}", "func ParseHTML(resp *http.Response) (*html.Node, error) {\n\tr, err := readResponseBody(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn htmlquery.Parse(r)\n}", "func (c *DOM) GetDocumentWithParams(v *DOMGetDocumentParams) (*DOMNode, error) {\n\tresp, err := gcdmessage.SendCustomReturn(c.target, c.target.GetSendCh(), &gcdmessage.ParamRequest{Id: c.target.GetId(), Method: \"DOM.getDocument\", Params: v})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar chromeData struct {\n\t\tResult struct {\n\t\t\tRoot *DOMNode\n\t\t}\n\t}\n\n\tif resp == nil {\n\t\treturn nil, &gcdmessage.ChromeEmptyResponseErr{}\n\t}\n\n\t// test if error first\n\tcerr := &gcdmessage.ChromeErrorResponse{}\n\tjson.Unmarshal(resp.Data, cerr)\n\tif cerr != nil && cerr.Error != nil {\n\t\treturn nil, &gcdmessage.ChromeRequestErr{Resp: cerr}\n\t}\n\n\tif err := json.Unmarshal(resp.Data, &chromeData); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn chromeData.Result.Root, nil\n}", "func NewElem(template string, elemValue interface{}) HtmlElementer {\n\telemName, ok := guessElemName(elemValue)\n\tif !ok {\n\t\tfmt.Println(\"Failed to guess element name\")\n\t\treturn nil\n\t}\n\n\t// vPointOfElem is reflect.Value of a pointer to the specific element\n\tvPointOfElem := reflect.New(elementTypeRegistry[elemName])\n\n\t// refelct.Value.Interface() convert reflect.Value to interface{}\n\t// then type assertion will work because interface can be either of point to struct or struct value\n\t// After converting, pElem is a point to specific element struct\n\t// e,g: var elem *htmlelem.form = &form{}\n\tpElem, ok:= vPointOfElem.Interface().(HtmlElementer)\n\tif !ok {\n\t\tlog.Println(\"Failed to convert to HtmlElementer interface!\")\n\t\treturn nil\n\t}\n\n\tpElem.Init(template, elemName, elemValue)\n\n\treturn pElem\n}", "func NewDocument() *Element {\n\treturn &Element{\n\t\tType: DocumentType,\n\t}\n}", "func documentElement(doc *html.Node) *html.Node {\n\tnodes := getElementsByTagName(doc, \"html\")\n\n\tif len(nodes) > 0 {\n\t\treturn nodes[0]\n\t}\n\n\treturn nil\n}", "func MakeDoc(body []byte) (*goquery.Document, error) {\n\ttext := strings.NewReader(string(body))\n\tdom, err := html.Parse(text)\n\n\tif err != nil {\n\t\treturn nil, errors.New(err)\n\t}\n\n\treturn goquery.NewDocumentFromNode(dom), nil\n}", "func createElement(tagName string) *html.Node {\n\treturn &html.Node{Type: html.ElementNode, Data: tagName}\n}", "func (c Crawler) getHTMLDoc(body io.ReadCloser) (*goquery.Document, error) {\n doc, err := goquery.NewDocumentFromReader(body)\n if err != nil {\n return nil, err\n }\n return doc, nil\n}", "func (p *Page) MustWaitDOMStable() *Page {\n\tp.e(p.WaitDOMStable(time.Second, 0))\n\treturn p\n}", "func getDiv(url string) (*html.Node, error) {\n\tvar b *html.Node\n\tvar f func(*html.Node)\n\n\tpageSource := read(url)\n\ts := strings.NewReader(pageSource)\n\tdoc, err := html.Parse(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// searches for the body of the wiki page\n\tf = func(n *html.Node) {\n\t\tif n.Type == html.ElementNode && n.Data == \"div\" {\n\t\t\tfor _, attr := range n.Attr {\n\t\t\t\tif attr.Key == \"id\" {\n\t\t\t\t\tif attr.Val == \"mw-content-text\" {\n\t\t\t\t\t\tb = n\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\t\tf(c)\n\t\t}\n\t}\n\tf(doc)\n\n\tbody := renderNode(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbodys := strings.NewReader(body)\n\tdoc1, err := html.Parse(bodys)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, err\n\t}\n\treturn doc1, nil\n}", "func getHtmlNodes(doc *html.Node) []*html.Node {\n\tif doc.Type == html.ElementNode && doc.Data == \"a\" {\n\t\tlog.Printf(\"tag a %+v \\n\", doc)\n\t\treturn []*html.Node{doc}\n\t}\n\n\tvar nod []*html.Node\n\tfor b := doc.FirstChild; b != nil; b = b.NextSibling {\n\t\tnod = append(nod, getHtmlNodes(b)...)\n\t}\n\treturn nod\n}", "func New(r io.Reader) (*Tree, error) {\n\tn, err := html.Parse(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n == nil {\n\t\treturn nil, fmt.Errorf(\"Error parsing html from reader\")\n\t}\n\treturn &Tree{n}, nil\n}", "func (cstyles *computedStyles) HTMLNode() *html.Node {\n\treturn cstyles.domnode.HTMLNode()\n}", "func (page *Page) Parse(body io.ReadCloser) (e error) {\n\tdoc, err := html.Parse(body)\n\tbody.Close()\n\tif err != nil {\n\t\te = err\n\t} else {\n\t\tpage.Node(doc)\n\t}\n\treturn\n}", "func NewElement(name string, els ...interface{}) *Element {\n\txel := &Element{\n\t\tse: xml.StartElement{\n\t\t\tName: xml.Name{Local: name},\n\t\t},\n\t}\n\tif err := xel.AddChild(els...); err != nil {\n\t\tpanic(err)\n\t}\n\treturn xel\n}", "func NewDangerousInnerHTML(s string) *DangerousInnerHTML {\n\to := object.New()\n\to.Set(\"__html\", s)\n\n\tres := &DangerousInnerHTML{o: o}\n\n\treturn res\n}", "func CreateNode(data string, dataAtom atom.Atom, values ...interface{}) *html.Node {\n\tnode := &html.Node{\n\t\tType: html.ElementNode,\n\t\tData: data,\n\t\tDataAtom: dataAtom,\n\t}\n\tfor _, value := range values {\n\t\tswitch v := value.(type) {\n\t\tcase html.Attribute:\n\t\t\tnode.Attr = append(node.Attr, v)\n\t\tcase *html.Node:\n\t\t\tnode.AppendChild(v)\n\t\tcase []*html.Node:\n\t\t\tfor _, c := range v {\n\t\t\t\tnode.AppendChild(c)\n\t\t\t}\n\t\tcase string:\n\t\t\tnode.AppendChild(&html.Node{Type: html.TextNode, Data: v})\n\t\t}\n\t}\n\treturn node\n}", "func NewAMT(bs cbor.IpldStore, opts ...Option) (*Root, error) {\n\tcfg := defaultConfig()\n\tfor _, opt := range opts {\n\t\tif err := opt(cfg); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &Root{\n\t\tbitWidth: cfg.bitWidth,\n\t\tstore: bs,\n\t\tnode: new(node),\n\t}, nil\n}", "func (w *W3CNode) HTMLNode() *html.Node {\n\treturn w.stylednode.HTMLNode()\n}", "func Parse(url string, timeout time.Duration) (Article, error) {\n\t// Make sure url is valid\n\tparsedURL, err := nurl.Parse(url)\n\tif err != nil {\n\t\treturn Article{}, err\n\t}\n\n\t// Fetch page from URL\n\tclient := &http.Client{Timeout: timeout}\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\treturn Article{}, err\n\t}\n\tdefer resp.Body.Close()\n\n\tbtHTML, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn Article{}, err\n\t}\n\tstrHTML := string(btHTML)\n\n\t// Replaces 2 or more successive <br> elements with a single <p>.\n\t// Whitespace between <br> elements are ignored. For example:\n\t// <div>foo<br>bar<br> <br><br>abc</div>\n\t// will become:\n\t// <div>foo<br>bar<p>abc</p></div>\n\tstrHTML = replaceBrs.ReplaceAllString(strHTML, \"</p><p>\")\n\tstrHTML = strings.TrimSpace(strHTML)\n\n\t// Check if HTML page is empty\n\tif strHTML == \"\" {\n\t\treturn Article{}, fmt.Errorf(\"HTML is empty\")\n\t}\n\n\t// Create goquery document\n\tdoc, err := goquery.NewDocumentFromReader(strings.NewReader(strHTML))\n\tif err != nil {\n\t\treturn Article{}, err\n\t}\n\n\t// Create new readability\n\tr := readability{\n\t\turl: parsedURL,\n\t\tcandidates: make(map[string]candidateItem),\n\t}\n\n\t// Prepare document and fetch content\n\tr.prepareDocument(doc)\n\tcontentNode := r.getArticleContent(doc)\n\n\t// Get article metadata\n\tmeta := r.getArticleMetadata(doc)\n\tmeta.MinReadTime, meta.MaxReadTime = r.estimateReadTime(contentNode)\n\n\t// Get text and HTML from content\n\ttextContent := \"\"\n\thtmlContent := \"\"\n\tif contentNode != nil {\n\t\t// If we haven't found an excerpt in the article's metadata, use the first paragraph\n\t\tif meta.Excerpt == \"\" {\n\t\t\tp := contentNode.Find(\"p\").First().Text()\n\t\t\tmeta.Excerpt = normalizeText(p)\n\t\t}\n\n\t\t// Get content text and HTML\n\t\ttextContent = r.getTextContent(contentNode)\n\t\thtmlContent = r.getHTMLContent(contentNode)\n\t}\n\n\tarticle := Article{\n\t\tURL: parsedURL.String(),\n\t\tMeta: meta,\n\t\tContent: textContent,\n\t\tRawContent: htmlContent,\n\t}\n\n\treturn article, nil\n}", "func parse(reader io.Reader) []*html.Node {\n\tnodes, err := html.ParseFragment(reader, &html.Node{\n\t\tType: html.ElementNode,\n\t\tData: \"div\",\n\t\tDataAtom: atom.Div,\n\t})\n\tmust(err)\n\treturn nodes\n}", "func RequestHTML(method string, url url.URL) (*goquery.Document, error) {\n\theaders := map[string]string{\n\t\t\"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n\t\t\"user-agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/83.0.4103.61 Chrome/83.0.4103.61 Safari/537.36\",\n\t}\n\n\tdata, err := Request(method, url, headers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Load the HTML document\n\tdoc, err := goquery.NewDocumentFromReader(bytes.NewReader(data))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn doc, nil\n}", "func NewHTML(t string) http.Handler {\n\treturn &HTMLhandler{t}\n}", "func NewDiv(children ...Element) Element {\n\treturn newWithChildren(\"div\", children)\n}", "func (c *DOM) GetDocument(depth int, pierce bool) (*DOMNode, error) {\n\tvar v DOMGetDocumentParams\n\tv.Depth = depth\n\tv.Pierce = pierce\n\treturn c.GetDocumentWithParams(&v)\n}", "func NewHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {\n\tr := &HTMLRenderer{\n\t\tConfig: html.NewConfig(),\n\t}\n\tfor _, opt := range opts {\n\t\topt.SetHTMLOption(&r.Config)\n\t}\n\treturn r\n}", "func ParseHTML(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {\n\tvar htmlText starlark.String\n\tif err := starlark.UnpackArgs(\"parseHtml\", args, kwargs, \"htmlText\", &htmlText); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontent, err := AsString(htmlText)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\troot := soup.HTMLParse(string(content))\n\treturn NewSoupNode(&root), nil\n}", "func HTML(props *HTMLProps, children ...Element) *HTMLElem {\n\trProps := &_HTMLProps{\n\t\tBasicHTMLElement: newBasicHTMLElement(),\n\t}\n\n\tif props != nil {\n\t\tprops.assign(rProps)\n\t}\n\n\treturn &HTMLElem{\n\t\tElement: createElement(\"html\", rProps, children...),\n\t}\n}", "func NewDocument(chainDoc *ChainDocument) *Document {\n\tcontentGroups := make([]*ContentGroup, 0, len(chainDoc.ContentGroups))\n\tcertificates := make([]*Certificate, 0, len(chainDoc.Certificates))\n\n\tfor i, chainContentGroup := range chainDoc.ContentGroups {\n\t\tcontentGroups = append(contentGroups, NewContentGroup(chainContentGroup, i+1))\n\t}\n\n\tfor i, chainCertificate := range chainDoc.Certificates {\n\t\tcertificates = append(certificates, NewCertificate(chainCertificate, i+1))\n\t}\n\n\treturn &Document{\n\t\tHash: chainDoc.Hash,\n\t\tCreatedDate: ToTime(chainDoc.CreatedDate),\n\t\tCreator: chainDoc.Creator,\n\t\tContentGroups: contentGroups,\n\t\tCertificates: certificates,\n\t\tDType: []string{\"Document\"},\n\t}\n}", "func (self *Response) ResetHtmlParser() *goquery.Document {\n\tr := strings.NewReader(self.body)\n\tvar err error\n\tself.docParser, err = goquery.NewDocumentFromReader(r)\n\tif err != nil {\n\t\treporter.Log.Println(err.Error())\n\t\tpanic(err.Error())\n\t}\n\treturn self.docParser\n}", "func (o *OpenGraph) UnmarshalDOM(src tame.DOMSelection) error {\n\tif src == nil {\n\t\treturn errors.New(\"empty DOM Selection\")\n\t}\n\n\thead := src.Find(\"head\")\n\n\thead.ReadAttr(\"meta[property=\\\"og:type\\\"]\", \"content\", &o.Type)\n\thead.ReadAttr(\"meta[property=\\\"og:title\\\"]\", \"content\", &o.Title)\n\thead.ReadAttr(\"meta[property=\\\"og:locale\\\"]\", \"content\", &o.Locale)\n\thead.ReadAttr(\"meta[property=\\\"og:site_name\\\"]\", \"content\", &o.SiteName)\n\thead.ReadAttr(\"meta[property=\\\"og:description\\\"]\", \"content\", &o.Description)\n\n\tvar u string\n\thead.ReadAttr(\"meta[property=\\\"og:url\\\"]\", \"content\", &u)\n\tif len(u) > 0 {\n\t\to.URL, _ = url.Parse(u)\n\t}\n\n\thead.Find(\"meta[property=\\\"og:image\\\"]\").Each(func(s tame.DOMSelection) {\n\t\tu := s.Attr(\"content\", \"\")\n\t\tif len(u) > 0 {\n\t\t\tpu, err := url.Parse(u)\n\t\t\tif err == nil && pu != nil {\n\t\t\t\to.Images = append(o.Images, *pu)\n\t\t\t}\n\t\t}\n\t})\n\n\treturn nil\n}", "func parseHTMLWrapped(r io.Reader) (*Node, error) {\r\n\tnodes := []*Node{}\r\n\tp := newParser(r)\r\n\tvar err error\r\n\tfor {\r\n\t\tn, err := p.parse()\r\n\t\tif err == nil && n != nil {\r\n\t\t\tnodes = append(nodes, n)\r\n\t\t} else {\r\n\t\t\tbreak\r\n\t\t}\r\n\t}\r\n\r\n\treturn NewRootNode(nodes), err\r\n}", "func New(duration time.Duration) *Dam {\n\td := &Dam{\n\t\tstorage: make(map[string]*element),\n\t\ttickerDone: make(chan struct{}),\n\t}\n\tif duration > time.Duration(0) {\n\t\td.ticker = time.NewTicker(duration)\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-d.ticker.C:\n\t\t\t\t\td.Purge()\n\t\t\t\tcase <-d.tickerDone:\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\treturn d\n}", "func New() (*HTMLToX, error) {\n\tvar err error\n\n\thtmltox := &HTMLToX{\n\t\tAPI: api.New(),\n\t\tBrowser: chrome.New(&chrome.Flags{\n\t\t\t\"addr\": []interface{}{\"localhost\"},\n\t\t\t\"disable-extensions\": nil,\n\t\t\t\"disable-gpu\": nil,\n\t\t\t\"headless\": nil,\n\t\t\t\"hide-scrollbars\": nil,\n\t\t\t\"no-first-run\": nil,\n\t\t\t\"no-sandbox\": nil,\n\t\t\t\"port\": []interface{}{9222},\n\t\t\t\"remote-debugging-address\": []interface{}{\"0.0.0.0\"},\n\t\t\t\"remote-debugging-port\": []interface{}{9222},\n\t\t}, \"\", \"\", \"\", \"\"),\n\t\tSockets: make(map[string]socket.Socketer),\n\t}\n\n\terr = htmltox.Browser.Launch()\n\tif nil != err {\n\t\tlog.Error(err)\n\t\treturn nil, err\n\t}\n\n\thtmltox.API.Handle(\"GET\", \"/\", htmltox.Usage)\n\thtmltox.API.Handle(\"GET\", \"/test\", htmltox.RenderURL)\n\thtmltox.API.Handle(\"GET\", \"/favicon.ico\", func(response http.ResponseWriter, request *http.Request) {\n\t\tdata, err := ioutil.ReadFile(\"/go/src/github.com/mkenney/docker-htmltox/app/assets/favicon.ico\")\n\t\tif nil != err {\n\t\t\tlog.Debugf(err.Error())\n\t\t\treturn\n\t\t}\n\t\theaders := make(map[string]string)\n\t\theaders[\"Content-Type\"] = \"image/vnd.microsoft.icon\"\n\t\thtmltox.API.RespondWithRawBody(\n\t\t\trequest,\n\t\t\tresponse,\n\t\t\t200,\n\t\t\tstring(data),\n\t\t\theaders,\n\t\t)\n\t})\n\n\treturn htmltox, nil\n}", "func (self *Response) GetHtmlParser() *goquery.Document {\n\treturn self.docParser\n}", "func NewDocument(grammar *Grammar) *DocumentNode {\n\treturn &DocumentNode{Tag: \"Root\", TagDefinition: grammar.GetTag(\"#root\")}\n}", "func (i InputInlineQueryResultDocument) construct() InputInlineQueryResultClass { return &i }", "func parseHTML(r io.Reader) (*Node, error) {\r\n\tn, err := newParser(r).parse()\r\n\treturn n, err\r\n}", "func New(r io.Reader) (*WebmKeeper, error) {\n\tp := webmEdtd.NewParser(r)\n\theader := bytes.NewBuffer(make([]byte, 0, 4096))\n\tbody := bytes.NewBuffer(make([]byte, 0, 4096))\n\n\tfor {\n\t\tel, err := next(p)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif el.Name == \"Cluster\" {\n\t\t\tel.WriteTo(body)\n\t\t\tbreak\n\t\t}\n\t\tel.WriteTo(header)\n\t}\n\n\treturn &WebmKeeper{\n\t\tp: p,\n\t\theader: header,\n\t\tbody: body,\n\t\telemBuf: bytes.NewBuffer(make([]byte, 0, 1024)),\n\t}, nil\n}", "func FromHTML(content []byte) string {\n\tif cset := FromBOM(content); cset != \"\" {\n\t\treturn cset\n\t}\n\tif cset := fromHTML(content); cset != \"\" {\n\t\treturn cset\n\t}\n\treturn FromPlain(content)\n}", "func (t *HTMLDom) DomFind(selector string) *goquery.Selection {\n\tif selector == \"\" {\n\t\treturn nil\n\t}\n\treturn t.dom.Find(selector)\n}", "func NewElements(dtype part3.DataType, len int) Elements {\n\tel := Elements{}\n\tswitch dtype {\n\tcase part3.Int32:\n\t\tel.I32 = make([]part3.Int32Element, len)\n\tcase part3.Float32:\n\t\tel.F32 = make([]part3.Float32Element, len)\n\tcase part3.Float64:\n\t\tel.F64 = make([]part3.Float64Element, len)\n\t}\n\tel.Type = dtype\n\treturn el\n}", "func (socket *MockSocket) DOM() *socket.DOMProtocol {\n\treturn socket.dom\n}", "func New(elem *jquery.JQuery) *Router {\n\tr := &Router{\n\t\telement: elem,\n\t}\n\telem.SetData(dataKey, r)\n\treturn r\n}", "func collectLinksFromHtml(htmlResp io.Reader) ([]Link, error) {\n\tdoc, err := html.Parse(htmlResp)\n\tif err != nil {\n\t\tlog.Printf(\"error while parsing htmlResp io.Reader in html.Parse(), error: %s\", err.Error())\n\t\treturn nil, err\n\t}\n\t//log.Printf(\"htmlNOde %+v \\n\", doc)\n\tnodes := getHtmlNodes(doc)\n\t//log.Printf(\"nodes %+v \\n\", nodes)\n\n\tvar links []Link\n\n\tfor _, node := range nodes {\n\t\tlinks = append(links, buildLink(node))\n\t}\n\treturn links, nil\n}", "func (htmlDoc *htmlDocument) append(e element) {\n\thtmlDoc.elements = append(htmlDoc.elements, e)\n}", "func New(TemplateDir, ContentType string) HTMLRender {\n\t// if TemplateDir[0] != '/' {\n\t// \tTemplateDir = \"/\" + TemplateDir\n\t// }\n\treturn &htmlRender{\n\t\ttemplateDir: TemplateDir,\n\t\tcontentType: ContentType,\n\t}\n}", "func NewSoupNode(root *soup.Root) starlark.Value {\n\t// Need to clone, since the input value is not immutable. Removing the clone operation\n\t// will break things like iteration, wherein a single soup.Root will be mutated by\n\t// each step of the loop body.\n\t// Luckily, the soup library exports the field names of this struct, so we can clone.\n\tclone := &soup.Root{\n\t\tPointer: root.Pointer,\n\t\tNodeValue: root.NodeValue,\n\t\tError: root.Error,\n\t}\n\n\treturn (*SoupNode)(clone)\n}", "func TokenizeHTML(s string) (*Node, error) {\r\n\thtmlContent := strings.NewReader(s)\r\n\tdoc, err := html.Parse(htmlContent)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\tchildren := make([]*Node, 0)\r\n\tbody := &Node{\r\n\t\tTag: \"body\",\r\n\t\tText: \"\",\r\n\t\tAttrs: []map[string]string{},\r\n\t\tChildren: children,\r\n\t}\r\n\texcludedTags := map[string]bool{\r\n\t\t\"html\": true, \"head\": true, \"body\": true,\r\n\t}\r\n\r\n\tvar f func(*html.Node, *[]*Node) []*Node\r\n\tf = func(n *html.Node, nodes *[]*Node) []*Node {\r\n\t\tchildNode := &Node{}\r\n\t\tchildNode.Children = make([]*Node, 0)\r\n\r\n\t\tcontinueTokenization := true\r\n\t\tif n.Type == html.DocumentNode {\r\n\t\t\tcontinueTokenization = false\r\n\t\t}\r\n\r\n\t\tif n.Type == html.ElementNode {\r\n\t\t\tif found := excludedTags[n.Data]; found {\r\n\t\t\t\tcontinueTokenization = false\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\tif continueTokenization {\r\n\t\t\tattrs := make([]map[string]string, 0)\r\n\t\t\tif n.Type == html.ElementNode {\r\n\t\t\t\tfor _, a := range n.Attr {\r\n\t\t\t\t\tattrs = append(attrs, map[string]string{\"key\": a.Key, \"value\": a.Val})\r\n\t\t\t\t}\r\n\t\t\t\tchildNode.Tag = n.Data\r\n\t\t\t} else if n.Type == html.TextNode {\r\n\t\t\t\tchildNode.Text = n.Data\r\n\t\t\t}\r\n\t\t\tchildNode.Attrs = attrs\r\n\t\t}\r\n\r\n\t\tfor child := n.FirstChild; child != nil; child = child.NextSibling {\r\n\t\t\tchildNode.Children = f(child, &childNode.Children)\r\n\t\t}\r\n\r\n\t\tif continueTokenization {\r\n\t\t\t*nodes = append(*nodes, childNode)\r\n\t\t} else {\r\n\t\t\t*nodes = childNode.Children\r\n\t\t}\r\n\r\n\t\treturn *nodes\r\n\t}\r\n\r\n\tbody.Children = f(doc, &body.Children)\r\n\r\n\treturn body, nil\r\n}", "func EnableDom(frame *cdp.Frame, timeout time.Duration) error {\n\terr := cdp.NewAction(\n\t\t[]cdp.Event{},\n\t\t[]cdp.Command{\n\t\t\tcdp.Command{ID: frame.RequestID.GetNext(), Method: dom.CommandDOMEnable, Params: &dom.EnableArgs{}, Reply: &dom.EnableReply{}, Timeout: timeout},\n\t\t}).Run(frame)\n\tif err != nil {\n\t\tframe.Browser.Log.Print(err)\n\t}\n\treturn err\n}", "func Parse(inputHTML []byte) renderer.Renderables {\n\trbs := renderer.Renderables{}\n\tdoc := bytes.NewReader(inputHTML)\n\ttokenizer := html.NewTokenizer(doc)\n\n\tfor {\n\t\ttt := tokenizer.Next()\n\n\t\tif tt == html.ErrorToken {\n\t\t\tbreak // End of document\n\t\t}\n\n\t\tif tt == html.SelfClosingTagToken {\n\t\t\tT1 := tokenizer.Token()\n\n\t\t\tif T1.Data == \"hr\" {\n\t\t\t\trbs.Add(renderer.HR{})\n\t\t\t}\n\t\t\tif T1.Data == \"img\" {\n\t\t\t\timg := Image(T1)\n\t\t\t\trbs.Add(img)\n\t\t\t}\n\t\t}\n\n\t\tif tt == html.StartTagToken {\n\t\t\tT1 := tokenizer.Token()\n\n\t\t\tif T1.Data == \"h1\" ||\n\t\t\t\tT1.Data == \"h2\" ||\n\t\t\t\tT1.Data == \"h3\" ||\n\t\t\t\tT1.Data == \"h4\" ||\n\t\t\t\tT1.Data == \"h5\" ||\n\t\t\t\tT1.Data == \"h6\" {\n\t\t\t\theader := Header(tokenizer, T1)\n\t\t\t\trbs.Add(header)\n\t\t\t}\n\t\t\tif T1.Data == \"pre\" {\n\t\t\t\tcb := Codeblock(tokenizer)\n\t\t\t\trbs.Add(cb)\n\t\t\t}\n\t\t\tif T1.Data == \"p\" {\n\t\t\t\tAddParagraph(&rbs, tokenizer)\n\t\t\t}\n\t\t\tif T1.Data == \"blockquote\" {\n\t\t\t\tbq := Blockquote(tokenizer)\n\t\t\t\trbs.Add(bq)\n\t\t\t}\n\t\t\tif T1.Data == \"ol\" || T1.Data == \"ul\" {\n\t\t\t\tls := List(tokenizer, T1)\n\t\t\t\trbs.Add(ls)\n\t\t\t}\n\t\t\tif T1.Data == \"table\" {\n\t\t\t\ttable := Table(tokenizer)\n\t\t\t\trbs.Add(table)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn rbs\n}", "func Document(children ...Element) *CompoundElement { return newCE(\"Document\", children) }", "func NewMap(div js.Value, options map[string]interface{}) *Map {\n\tif div == js.Null() {\n\t\tpanic(\"leaflet: cannot use null map div\")\n\t}\n\tif nodeName := div.Get(\"nodeName\").String(); nodeName != \"DIV\" {\n\t\tpanic(\"leaflet: map div nodeName should be DIV but is \" + nodeName)\n\t}\n\tInitialize()\n\treturn &Map{\n\t\tValue: L.Call(\"map\", div, options),\n\t}\n}", "func (c *DOM) GetOuterHTML(nodeId int, backendNodeId int, objectId string) (string, error) {\n\tvar v DOMGetOuterHTMLParams\n\tv.NodeId = nodeId\n\tv.BackendNodeId = backendNodeId\n\tv.ObjectId = objectId\n\treturn c.GetOuterHTMLWithParams(&v)\n}", "func newDam() *dam {\n\treturn &dam{\n\t\tlock: sync.RWMutex{},\n\t\tbarrier: make(chan error),\n\t}\n}", "func NewDefault() *sanitizer {\n\ts := New()\n\ts.StripHtml = false\n\ts.Elements = acceptableTagsList\n\ts.Attributes = acceptableAttributesList\n\ts.URISchemes = acceptableUriSchemes\n\ts.StrictMode()\n\treturn s\n}", "func NewEntries(url string) ([]*Entry, error) {\n\thtml, err := getHtml(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn entries(goquery.NewDocumentFromNode(html))\n}", "func NewHtmlStorage() htmlStorage {\n\tstorage := htmlStorage{Path: filepath.Join(utils.GetAppPath(), DOCS_FOLDER)}\n\tfmt.Println(\"Create storage folder by path: \", storage.Path)\n\n\tif err := os.MkdirAll(storage.Path, os.ModePerm); err != nil {\n\t\tlog.Fatalf(\"Failed to create storage for docs. Cause: '%s'\\n\", err.Error())\n\t}\n\treturn storage\n}", "func HTMLParser(response string) *html.Node {\n\tdoc, err := html.Parse(strings.NewReader(response))\n\tif err != nil {\n\t\tfmt.Println(\"Can't parse the html5 utf-8 encoded response\")\n\t\tos.Exit(1)\n\t}\n\treturn doc\n\n}", "func Parse(r io.Reader) (Nodes, error) {\n\tn, err := html.Parse(r)\n\treturn Nodes{&Node{n}}, err\n}", "func UrlToHTMLNode(url string) (*html.Node, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbody, err := html.Parse(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn body, nil\n}", "func (c *DOM) GetOuterHTMLWithParams(v *DOMGetOuterHTMLParams) (string, error) {\n\tresp, err := gcdmessage.SendCustomReturn(c.target, c.target.GetSendCh(), &gcdmessage.ParamRequest{Id: c.target.GetId(), Method: \"DOM.getOuterHTML\", Params: v})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar chromeData struct {\n\t\tResult struct {\n\t\t\tOuterHTML string\n\t\t}\n\t}\n\n\tif resp == nil {\n\t\treturn \"\", &gcdmessage.ChromeEmptyResponseErr{}\n\t}\n\n\t// test if error first\n\tcerr := &gcdmessage.ChromeErrorResponse{}\n\tjson.Unmarshal(resp.Data, cerr)\n\tif cerr != nil && cerr.Error != nil {\n\t\treturn \"\", &gcdmessage.ChromeRequestErr{Resp: cerr}\n\t}\n\n\tif err := json.Unmarshal(resp.Data, &chromeData); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn chromeData.Result.OuterHTML, nil\n}", "func Article(props *ArticleProps, children ...Element) *ArticleElem {\n\trProps := &_ArticleProps{\n\t\tBasicHTMLElement: newBasicHTMLElement(),\n\t}\n\n\tif props != nil {\n\t\tprops.assign(rProps)\n\t}\n\n\treturn &ArticleElem{\n\t\tElement: createElement(\"article\", rProps, children...),\n\t}\n}", "func NewDocument() *Document {\n\treturn &Document{documents: make(map[string]flare.Document)}\n}", "func Parse(r io.Reader) (doc Document) {\n\tn, err := html.Parse(r)\n\tif err != nil {\n\t\tdoc.setError(fmt.Errorf(\"Error parsing html: %w\", err))\n\t\treturn\n\t}\n\n\tdoc = findForms(n)\n\treturn\n}", "func New() *EAD3 {\n\tobj := new(EAD3)\n\tobj.XMLNameSpace = \"http://ead3.archivists.org/schema/undeprecated/\"\n\treturn obj\n}", "func (p *Page) MustElementFromNode(node *proto.DOMNode) *Element {\n\tel, err := p.ElementFromNode(node)\n\tp.e(err)\n\treturn el\n}", "func New() *Parser {\n\treturn &Parser{\n\t\tWords: make(map[string]*wordRef),\n\t}\n}", "func newHTMLTag(ln *line, rslt *result, src *source, parent element, opts *Options) (*htmlTag, error) {\n\tif len(ln.tokens) < 1 {\n\t\treturn nil, fmt.Errorf(\"an HTML tag is not specified [file: %s][line: %d]\", ln.fileName(), ln.no)\n\t}\n\n\ts := ln.tokens[0]\n\n\ttagName := extractTagName(s)\n\n\tid, err := extractID(s, ln)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclasses := extractClasses(s)\n\n\te := &htmlTag{\n\t\telementBase: newElementBase(ln, rslt, src, parent, opts),\n\t\ttagName: tagName,\n\t\tid: id,\n\t\tclasses: classes,\n\t\tcontainPlainText: strings.HasSuffix(s, dot),\n\t\tinsertBr: strings.HasSuffix(s, doubleDot),\n\t\tattributes: make([]htmlAttribute, 0, 2),\n\t}\n\n\tif err := e.setAttributes(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn e, nil\n}", "func (i InputInlineQueryResultArticle) construct() InputInlineQueryResultClass { return &i }", "func (*XMLDocument) CreateDocumentFragment() (w *window.DocumentFragment) {\n\tmacro.Rewrite(\"$_.createDocumentFragment()\")\n\treturn w\n}", "func New(cleanImg bool) Parser {\n\treturn &parser{\n\t\tcleanImg: cleanImg,\n\t}\n}", "func New(cleanImg bool) Parser {\n\treturn &parser{\n\t\tcleanImg: cleanImg,\n\t}\n}", "func NewElement(id any, label string) Element {\n\treturn Element{id, label}\n}", "func NewElement(v interface{}) *Element {\n\treturn new(Element).InitElement(v)\n}", "func NewGetDOMStorageItemsArgs(storageID StorageID) *GetDOMStorageItemsArgs {\n\targs := new(GetDOMStorageItemsArgs)\n\targs.StorageID = storageID\n\treturn args\n}", "func ParseHTMLFromSource(r io.Reader) ([]HTMLhrefEntries, error) {\n\thtmlReader := html.NewTokenizer(r)\n\tvar found = []HTMLhrefEntries{}\n\tfound, err := findHrefs(found, \"\", htmlReader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn found, nil\n}", "func New(table *postgres.TableElem) Manager {\n\treturn Manager{TableElem: table}\n}", "func NewAirmet() (airmet *Airmet, err error) {\n\tairmet = &Airmet{}\n\tdoc, err := goquery.NewDocument(\"http://www.bom.gov.au/aviation/warnings/airmet/\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tairmet.Message = doc.Find(\"span.info\").First().Text()\n\n\tif airmet.Message == \"\" {\n\t\tairmet.Message, err = doc.Find(\"p.product\").Last().Html()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tairmet.Message = strings.Replace(airmet.Message, \"<br/>\", \"\\n\", -1)\n\n\t// TODO: find specific \"RMK: GAF\" line and only do Contains in it\n\tairmet.RemarkedGAFs = []string{}\n\tfor _, area := range gaf.Areas {\n\t\tif strings.Contains(airmet.Message, area) {\n\t\t\tairmet.RemarkedGAFs = append(airmet.RemarkedGAFs, area)\n\t\t}\n\t}\n\n\treturn airmet, nil\n}", "func (p *Page) parse() error {\n\tp.once.Do(func() {\n\t\tif p.root, p.err = html.Parse(p.body); p.err != nil {\n\t\t\tp.err = fmt.Errorf(\"ant: parse html %q - %w\", p.URL, p.err)\n\t\t}\n\t\tp.close()\n\t})\n\treturn p.err\n}", "func NewElementFromElement(elem XElement) *Element {\n\te := &Element{}\n\te.copyFrom(elem)\n\treturn e\n}", "func New(chromePerf anomalies.Store) (*store, error) {\n\tcache, err := lru.New(cacheSize)\n\tif err != nil {\n\t\treturn nil, skerr.Wrapf(err, \"Failed to create anomaly store cache.\")\n\t}\n\n\t// cleanup the lru cache periodically.\n\tgo func() {\n\t\tfor range time.Tick(cacheCleanupPeriod) {\n\t\t\tcleanupCache(cache)\n\t\t}\n\t}()\n\n\tret := &store{\n\t\tcache: cache,\n\t\tnumEntriesInCache: metrics2.GetInt64Metric(\"anomaly_store_num_entries_in_cache\"),\n\t\tChromePerf: chromePerf,\n\t}\n\treturn ret, nil\n}", "func New(parser ParserProvider) *Root {\n\treturn &Root{\n\t\tParser: parser,\n\t}\n}", "func prepDocument(doc *goquery.Document) {\n\t// Remove all style tags in head\n\tdoc.Find(\"style\").Remove()\n\n\t// Replace all br\n\treplaceBrs(doc)\n\n\t// Replace font tags to span\n\tdoc.Find(\"font\").Each(func(_ int, font *goquery.Selection) {\n\t\thtml, _ := font.Html()\n\t\tfont.ReplaceWithHtml(\"<span>\" + html + \"</span>\")\n\t})\n}" ]
[ "0.64554495", "0.5551131", "0.5162261", "0.49324796", "0.4877913", "0.4703432", "0.46724957", "0.45684707", "0.45434424", "0.45177507", "0.44259483", "0.43745458", "0.43526706", "0.4270929", "0.42546213", "0.4241416", "0.41487038", "0.41184837", "0.40987206", "0.4083889", "0.40686446", "0.40649992", "0.40451846", "0.40033248", "0.40016308", "0.39999148", "0.39896816", "0.39567524", "0.39453995", "0.3922557", "0.38965523", "0.3890851", "0.38594034", "0.38432163", "0.3843077", "0.3838738", "0.38293973", "0.38221407", "0.38069952", "0.38024545", "0.37893185", "0.37868083", "0.37203917", "0.37114882", "0.37099153", "0.369033", "0.36858726", "0.36777633", "0.36741933", "0.36589846", "0.363708", "0.36360985", "0.36255637", "0.36108714", "0.36035728", "0.35885683", "0.35841218", "0.3579099", "0.35711527", "0.3562122", "0.35610563", "0.35534444", "0.3546591", "0.35417342", "0.35336676", "0.35204402", "0.35194206", "0.35001484", "0.3497797", "0.34907582", "0.3473438", "0.34729084", "0.34722245", "0.34711817", "0.3470237", "0.34685442", "0.3465566", "0.34579253", "0.3450296", "0.34485194", "0.34418082", "0.34385222", "0.34357256", "0.34326932", "0.34284192", "0.34234506", "0.34231248", "0.34208453", "0.34208453", "0.3419927", "0.34195995", "0.34179038", "0.3416707", "0.34070435", "0.33968168", "0.33955306", "0.33752596", "0.33727267", "0.33661076", "0.33658785" ]
0.6929188
0
InitStudentsSubscriptionsHandler initialize studentsSubscriptions router
func InitStudentsSubscriptionsHandler(r *atreugo.Router, s *service.Service) { r.GET("/", getAllStudentsSubscriptions(s)) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *StanServer) initSubscriptions() error {\n\n\t// Do not create internal subscriptions in clustered mode,\n\t// the leader will when it gets elected.\n\tif !s.isClustered {\n\t\tcreateSubOnClientPublish := true\n\n\t\tif s.partitions != nil {\n\t\t\t// Receive published messages from clients, but only on the list\n\t\t\t// of static channels.\n\t\t\tif err := s.partitions.initSubscriptions(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// Since we create a subscription per channel, do not create\n\t\t\t// the internal subscription on the > wildcard\n\t\t\tcreateSubOnClientPublish = false\n\t\t}\n\n\t\tif err := s.initInternalSubs(createSubOnClientPublish); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ts.log.Debugf(\"Discover subject: %s\", s.info.Discovery)\n\t// For partitions, we actually print the list of channels\n\t// in the startup banner, so we don't need to repeat them here.\n\tif s.partitions != nil {\n\t\ts.log.Debugf(\"Publish subjects root: %s\", s.info.Publish)\n\t} else {\n\t\ts.log.Debugf(\"Publish subject: %s.>\", s.info.Publish)\n\t}\n\ts.log.Debugf(\"Subscribe subject: %s\", s.info.Subscribe)\n\ts.log.Debugf(\"Subscription Close subject: %s\", s.info.SubClose)\n\ts.log.Debugf(\"Unsubscribe subject: %s\", s.info.Unsubscribe)\n\ts.log.Debugf(\"Close subject: %s\", s.info.Close)\n\treturn nil\n}", "func NewSubscriptionHandler(store common.SubscriptionStore) func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch r.Method {\n\t\tcase http.MethodGet:\n\t\t\t{\n\t\t\t\tsubs := store.GetAll()\n\t\t\t\tbytes, err := json.Marshal(subs)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\twriteError(err, w)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tw.WriteHeader(200)\n\t\t\t\tw.Write(bytes)\n\t\t\t}\n\t\tdefault:\n\t\t\t{\n\t\t\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\t\t\tw.Write([]byte(\"not implemented\"))\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *Subscription) Init(options ...func(*Subscription)) error {\n\tfor _, option := range options {\n\t\toption(s)\n\t}\n\n\tif s.client == nil {\n\t\treturn errors.New(\"invalid client\")\n\t}\n\n\tif s.resourceRepository == nil {\n\t\treturn errors.New(\"invalid resource repository\")\n\t}\n\n\ts.collection = \"subscriptions\"\n\ts.collectionTrigger = \"subscriptionTriggers\"\n\ts.database = s.client.database\n\n\treturn s.ensureIndex()\n}", "func (b *EventStreamBroker) UpdateSubscriptionsHandler(w http.ResponseWriter, r *http.Request) {\n\n\tif r.Method != \"POST\" {\n\t\thttp.Error(w, \"Only POST method allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\th := w.Header()\n\th.Set(\"Cache-Control\", \"no-cache\")\n\th.Set(\"Connection\", \"keep-alive\")\n\th.Set(\"Access-Control-Allow-Origin\", \"*\")\n\n\t// Incoming request data\n\tvar reqData updateSubscriptionsData\n\n\t// Decode JSON body\n\tdec := json.NewDecoder(r.Body)\n\tif err := dec.Decode(&reqData); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// If the ID isn't provided, that means it is a new client\n\t// So generate an ID and create a new client.\n\tif reqData.SessID == \"\" {\n\t\thttp.Error(w, \"Session ID is required 'session_id'\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tb.mu.RLock()\n\tclient, ok := b.clients[reqData.SessID]\n\tb.mu.RUnlock()\n\tif !ok {\n\t\thttp.Error(w, \"Invalid session ID\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tctx := r.Context()\n\n\tvar wg sync.WaitGroup\n\n\tfor _, topic := range reqData.Add {\n\t\twg.Add(1)\n\t\tgo func(t string) {\n\t\t\tif err := b.subscriptionBroker.SubscribeClient(client, t); err != nil {\n\t\t\t\tlog.Println(\"Error:\", err)\n\n\t\t\t\td, _ := json.Marshal(map[string]interface{}{\n\t\t\t\t\t\"error\": map[string]string{\n\t\t\t\t\t\t\"code\": \"subscription-failure\",\n\t\t\t\t\t\t\"message\": fmt.Sprintf(\"Cannot subscribe to topic %v\", t),\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\tclient.writeChannel <- d\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(topic)\n\t}\n\n\tfor _, topic := range reqData.Remove {\n\t\twg.Add(1)\n\t\tgo func(t string) {\n\t\t\tb.subscriptionBroker.UnsubscribeClient(ctx, client, t)\n\t\t\twg.Done()\n\t\t}(topic)\n\t}\n\n\twg.Wait()\n\n\tclient.mu.RLock()\n\tlog.Printf(\"Client '%v' subscriptions updated, total topics subscribed: %v \\n\", client.sessID, len(client.topics))\n\tclient.mu.RUnlock()\n\n\t// Return the ID of the client.\n\tenc := json.NewEncoder(w)\n\tif err := enc.Encode(map[string]string{\"session_id\": reqData.SessID}); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func (r *Routes) Subscribe(c *gin.Context) {\n\tc.Header(\"Content-Type\", \"text/event-stream\")\n\n\tticker := time.NewTicker(15 * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-c.Request.Context().Done():\n\t\t\t// release resources\n\t\t\tbreak\n\t\tcase data := <-GetSubscriptionService().Receive():\n\t\t\tsse.Encode(c.Writer, sse.Event{\n\t\t\t\tEvent: \"message\",\n\t\t\t\tData: data,\n\t\t\t})\n\n\t\t\tc.Writer.Flush()\n\t\tcase <-ticker.C:\n\t\t\tsse.Encode(c.Writer, sse.Event{\n\t\t\t\tEvent: \"heartbeat\",\n\t\t\t\tData: \"Alive\",\n\t\t\t})\n\n\t\t\tc.Writer.Flush()\n\t\t}\n\t}\n}", "func initPrivRoutes(r *middleware.Router, q *data.Queue) {\n\tr.Request(\"auth.jwt\", initJWTAuthHandler())\n\tr.Request(\"echo\", middleware.Echo)\n\tr.Request(\"msg.send\", initSendMsgHandler(q))\n}", "func NewSubscriptions(client *gosip.SPClient, endpoint string, config *RequestConfig) *Subscriptions {\n\treturn &Subscriptions{\n\t\tclient: client,\n\t\tendpoint: endpoint,\n\t\tconfig: config,\n\t}\n}", "func Init(r chi.Router) {\n\n\tstore = api.Store\n\n\tr.Method(http.MethodGet, \"/\", api.Handler(getAllIncidentsHandler))\n\tr.Method(http.MethodPost, \"/\", api.Handler(createIncidentHandler))\n\tr.With(middleware.IncidentRequired).\n\t\tRoute(\"/{incidentID:[0-9]+}\", incidentIDSubRoutes)\n}", "func (ec *EngineCommunication) initReverseCommandSubscription() {\n\tconfig.Log.Debug(\"Subscribing to \" + naming.ReverseCommand + \" of \" + naming.Name(ec.Index))\n\tec.ReverseCommandSubscriber = communication.Subscribe(\n\t\tnaming.Topic(ec.Index, naming.ReverseCommand),\n\t\tnaming.Subscriber(ec.Index, naming.ReverseCommand),\n\t\tfunc (client mqtt.Client, msg mqtt.Message) {\n\t\t\treverseCommandRequest := reverseCommandRequest{}\n\t\t\terr := json.Unmarshal(msg.Payload(), &reverseCommandRequest)\n\t\t\tif err != nil {\n\t\t\t\tformatted := fmt.Sprintf(\"couldn't unmarshal reverse command request: %v\", err)\n\t\t\t\tconfig.Log.Debug(formatted)\n\t\t\t}\n\t\t\tec.ReverseCommandRequests <- reverseCommandRequest\n\t\t})\n}", "func (s *Server) Init() {\n\n\tcustomerAuthMd := middleware.Authenticate(s.customersSvc.IDByToken)\n\tcustomersSubrouter := s.mux.PathPrefix(\"/api/customers\").Subrouter()\n\n\tcustomersSubrouter.Use(customerAuthMd)\n\tcustomersSubrouter.HandleFunc(\"\", s.handleCustomerRegistration).Methods(POST)\n\tcustomersSubrouter.HandleFunc(\"/token\", s.handleCustomerGetToken).Methods(POST)\n\tcustomersSubrouter.HandleFunc(\"/products\", s.handleCustomerGetProducts).Methods(GET)\n\tcustomersSubrouter.HandleFunc(\"/purchases\", s.handleCustomerGetPurchases).Methods(GET)\n\n\tmanagersAuthenticateMd := middleware.Authenticate(s.managerSvc.IDByToken)\n\tmanagersSubRouter := s.mux.PathPrefix(\"/api/managers\").Subrouter()\n\tmanagersSubRouter.Use(managersAuthenticateMd)\n\n\tmanagersSubRouter.HandleFunc(\"\", s.handleManagerRegistration).Methods(POST)\n\tmanagersSubRouter.HandleFunc(\"/token\", s.handleManagerGetToken).Methods(POST)\n\tmanagersSubRouter.HandleFunc(\"/sales\", s.handleManagerGetSales).Methods(GET)\n\tmanagersSubRouter.HandleFunc(\"/sales\", s.handleManagerMakeSales).Methods(POST)\n\tmanagersSubRouter.HandleFunc(\"/products\", s.handleManagerGetProducts).Methods(GET)\n\tmanagersSubRouter.HandleFunc(\"/products\", s.handleManagerChangeProducts).Methods(POST)\n\tmanagersSubRouter.HandleFunc(\"/products/{id}\", s.handleManagerRemoveProductByID).Methods(DELETE)\n\tmanagersSubRouter.HandleFunc(\"/customers\", s.handleManagerGetCustomers).Methods(GET)\n\tmanagersSubRouter.HandleFunc(\"/customers\", s.handleManagerChangeCustomer).Methods(POST)\n\tmanagersSubRouter.HandleFunc(\"/customers/{id}\", s.handleManagerRemoveCustomerByID).Methods(DELETE)\n\n}", "func (p SubscriberProfile) Init(ex *amqpdriver.Exchange) *Subscribers {\n\tsubscribers := make(Subscribers)\n\n\toptions := []amqptransport.SubscriberOption{\n\t\tamqptransport.SubscriberErrorLogger(p.Logger),\n\t}\n\n\tsubscribers[\"catalogItem\"] = &SubscriberQueue{\n\t\tSubscriber: amqptransport.NewSubscriber(\n\t\t\tp.Endpoint.CatalogItemEndpoint,\n\t\t\tDecodeAMQPCatalogItemRequest,\n\t\t\tEncodeAMQPCatalogItemResponse,\n\t\t\toptions...,\n\t\t),\n\t\tQueue: &amqpdriver.Queue{\n\t\t\tName: \"domain-catalog-item\",\n\t\t\tBindingKey: \"catalog-item\",\n\t\t\tExchange: *ex,\n\t\t}}\n\n\tsubscribers[\"allItems\"] = &SubscriberQueue{\n\t\tSubscriber: amqptransport.NewSubscriber(\n\t\t\tp.Endpoint.AllItemsEndpoint,\n\t\t\tDecodeAMQPCatalogItemRequest,\n\t\t\tEncodeAMQPAllItemsResponse,\n\t\t\toptions...,\n\t\t),\n\t\tQueue: &amqpdriver.Queue{\n\t\t\tName: \"domain-catalog-allitems\",\n\t\t\tBindingKey: \"catalog-allitems\",\n\t\t\tExchange: *ex,\n\t\t}}\n\n\tsubscribers[\"listingCategoryItems\"] = &SubscriberQueue{\n\t\tSubscriber: amqptransport.NewSubscriber(\n\t\t\tp.Endpoint.ListingCategoriesEndpoint,\n\t\t\tDecodeAMQPCatalogItemRequest,\n\t\t\tEncodeAMQPListinCategoryItemsResponse,\n\t\t\toptions...,\n\t\t),\n\t\tQueue: &amqpdriver.Queue{\n\t\t\tName: \"domain-catalog-listingcategoryitems\",\n\t\t\tBindingKey: \"catalog-listingcategoryitems\",\n\t\t\tExchange: *ex,\n\t\t}}\n\n\tsubscribers[\"geoItem\"] = &SubscriberQueue{\n\t\tSubscriber: amqptransport.NewSubscriber(\n\t\t\tp.Endpoint.GeoItemEndpoint,\n\t\t\tDecodeAMQPGeoItemRequest,\n\t\t\tEncodeAMQPGeoItemResponse,\n\t\t\toptions...,\n\t\t),\n\t\tQueue: &amqpdriver.Queue{\n\t\t\tName: \"domain-catalog-geoitem\",\n\t\t\tBindingKey: \"catalog-geoitem\",\n\t\t\tExchange: *ex,\n\t\t}}\n\n\treturn &subscribers\n}", "func (ss *SNSServer) PrepareAndStart() {\n\n\tss.Subscribe()\n}", "func init() {\n\tgather.Register(sqsRegName, &sqsCreator{})\n}", "func startSubscription(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tprotocol := vars[\"protocol\"]\n\tendpointType := vars[\"endpointType\"]\n\tvar keys []string\n\terr := json.NewDecoder(r.Body).Decode(&keys)\n\tif err != nil {\n\t\tsendResponse(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\tvar subscriptionID string\n\tif protocol == \"http\" {\n\t\tsubscriptionID, err = subscribeHTTP(keys, endpointType)\n\t} else if protocol == \"grpc\" {\n\t\tsubscriptionID, err = subscribeGRPC(keys, endpointType)\n\t} else {\n\t\terr = fmt.Errorf(\"unknown protocol in Subscribe call: %s\", protocol)\n\t}\n\n\tif err != nil {\n\t\tsendResponse(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\tsendResponse(w, http.StatusOK, subscriptionID)\n}", "func InitRoutes(router *gin.RouterGroup) {\n\trouter.GET(\":type/:id\", handler.GetIdentifiers)\n}", "func init() {\n\tbus := core.Bus\n\tbus.Subscribe(\"main:loadconfig\", func(data string) {\n\t\tfmt.Println(\"user:Subscribe(main:loadconfig)\",data)\n\t})\n}", "func initPubRoutes(r *middleware.Router, db *data.DB, pass string) {\n\tr.Request(\"auth.google\", initGoogleAuthHandler(db, pass))\n}", "func HandleSubscribedPublishers(env *config.Env) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tuidStr := r.Header.Get(\"UID\")\n\t\tif uidStr == \"\" {\n\t\t\thttp.Error(w, \"A UID header's missing\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tuid, err := strconv.ParseUint(uidStr, 10, 64)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"An unexpected error occured. Please try again later.\", http.StatusInternalServerError)\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tpublisherIds, err := env.DB.SubscribedPublishers(r.Context(), uid)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"An unexpected error has occured while retrieving the subscribed publishers\", http.StatusInternalServerError)\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tjson.NewEncoder(w).Encode(publisherIds)\n\t}\n}", "func Init() {\n\t// r := router.Get()\n\t// r.GET(\"/publishOffers\", func(ctx *gin.Context) {\n\t// \tctx.JSON(200, offersController.PublishOffers(ctx))\n\t// })\n}", "func NewSubscriptionsHandler(prefix string, postfixOpts string) (*EventStreamBroker, error) {\n\n\tif prefix == \"\" {\n\t\treturn nil, errors.New(\"Prefix cannot be empty\")\n\t}\n\n\treturn &EventStreamBroker{\n\t\tsubscriptionBroker: &subscriptionsBroker{\n\t\t\tPrefix: prefix,\n\t\t\tPostfixOptions: postfixOpts,\n\t\t\tsubs: make(map[string]*subscription),\n\t\t},\n\t\tclients: make(map[string]*client),\n\t}, nil\n}", "func InitializeRouter() *mux.Router {\n\twire.Build(http.NewRouter, http.NewSWAPIHandlerSet, CachedServiceSWAPIService, ServiceSWAPIService, client.New, ProvideClientOptions)\n\treturn &mux.Router{}\n}", "func (p *SimpleProxy) init() error {\n\treturn p.registerSubscribers()\n}", "func init() {\n\tregister(\"POST\", \"/user/register\", controllers.Register, nil)\n\tregister(\"POST\", \"/user/login\", controllers.Login, nil)\n\tregister(\"GET\", \"/user/info\", controllers.UserInfo, auth.TokenMiddleware)\n\tregister(\"POST\", \"/user/logout\", controllers.Logout, auth.TokenMiddleware)\n\tregister(\"POST\", \"/user/shift\", controllers.CreateUserSchedule, nil)\n\tregister(\"POST\", \"/admin/month\", controllers.SetMonth, nil)\n}", "func init() {\n\tauth.Register(\"entitlement\", auth.InitFunc(newAccessController))\n}", "func subscriptionFilter(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {\n\t\tcontactID := middleware.GetSubscriptionId(request)\n\t\tuserLogin := middleware.GetLogin(request)\n\t\tsubscriptionData, err := controller.CheckUserPermissionsForSubscription(database, contactID, userLogin)\n\t\tif err != nil {\n\t\t\trender.Render(writer, request, err)\n\t\t\treturn\n\t\t}\n\t\tctx := context.WithValue(request.Context(), subscriptionKey, subscriptionData)\n\t\tnext.ServeHTTP(writer, request.WithContext(ctx))\n\t})\n}", "func InitRoutes() *mux.Router {\n\trouter := mux.NewRouter()\n\trouter = SetFinancialTransationsRoutes(router)\n\trouter = SetLastPurchasesRoutes(router)\n\trouter = SetConsultedCPFRoutes(router)\n\treturn router\n}", "func init() {\n\t// system.Router.HandleFunc(\"/app/get/list/{crud}\", HandleListGeneric)\n}", "func InitRouter() http.Handler {\n\tr := chi.NewRouter()\n\n\tr.Route(\"/api/v2\", func(r chi.Router) {\n\t\tr.Mount(\"/educational-certificate\", educert.Router())\n\t\tr.Mount(\"/previous-employment\", prevemployment.Router())\n\t})\n\n\treturn r\n}", "func (s *Server) initializeRoutes() {\n\ts.Mux.HandleFunc(\"/countries\", s.countries)\n\ts.Mux.HandleFunc(\"/countries/\", s.countryById)\n}", "func (gatewayContext *GatewayContext) updateSubscriberClients() {\n\tif gatewayContext.gateway.Spec.Subscribers == nil {\n\t\treturn\n\t}\n\n\tif gatewayContext.httpSubscribers == nil {\n\t\tgatewayContext.httpSubscribers = make(map[string]cloudevents.Client)\n\t}\n\tif gatewayContext.natsSubscribers == nil {\n\t\tgatewayContext.natsSubscribers = make(map[string]cloudevents.Client)\n\t}\n\n\t// http subscribers\n\tfor _, subscriber := range gatewayContext.gateway.Spec.Subscribers.HTTP {\n\t\tif _, ok := gatewayContext.httpSubscribers[subscriber]; !ok {\n\t\t\tt, err := cloudevents.NewHTTPTransport(\n\t\t\t\tcloudevents.WithTarget(subscriber),\n\t\t\t\tcloudevents.WithEncoding(cloudevents.HTTPBinaryV03),\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\tgatewayContext.logger.WithError(err).WithField(\"subscriber\", subscriber).Warnln(\"failed to create a transport\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tclient, err := cloudevents.NewClient(t)\n\t\t\tif err != nil {\n\t\t\t\tgatewayContext.logger.WithError(err).WithField(\"subscriber\", subscriber).Warnln(\"failed to create a client\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgatewayContext.logger.WithField(\"subscriber\", subscriber).Infoln(\"added a client for the subscriber\")\n\t\t\tgatewayContext.httpSubscribers[subscriber] = client\n\t\t}\n\t}\n\n\t// nats subscribers\n\tfor _, subscriber := range gatewayContext.gateway.Spec.Subscribers.NATS {\n\t\tif _, ok := gatewayContext.natsSubscribers[subscriber.Name]; !ok {\n\t\t\tt, err := cloudeventsnats.New(subscriber.ServerURL, subscriber.Subject)\n\t\t\tif err != nil {\n\t\t\t\tgatewayContext.logger.WithError(err).WithField(\"subscriber\", subscriber).Warnln(\"failed to create a transport\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tclient, err := cloudevents.NewClient(t)\n\t\t\tif err != nil {\n\t\t\t\tgatewayContext.logger.WithError(err).WithField(\"subscriber\", subscriber).Warnln(\"failed to create a client\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgatewayContext.logger.WithField(\"subscriber\", subscriber).Infoln(\"added a client for the subscriber\")\n\t\t\tgatewayContext.natsSubscribers[subscriber.Name] = client\n\t\t}\n\t}\n}", "func (ctx *HandlerContext) SubscriptionHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == \"POST\" {\n\t\t// requires auth header to get device trying to subscribe\n\t\tsessionState := &SessionState{}\n\t\t_, err := sessions.GetState(r, ctx.SigningKey, ctx.SessStore, sessionState)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"problem with session %v\", err), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\tdevice, err := ctx.deviceStore.GetByID(sessionState.Device.ID)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"device not found: %v\", err), http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t\tsub := &webpush.Subscription{}\n\t\tif err := json.NewDecoder(r.Body).Decode(sub); err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"error decoding JSON: %v\", err), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tdevice.Subscription = sub\n\t\tif err = ctx.deviceStore.Update(device.ID, device); err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"error updating device: %v\", err), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\t// user subsribed successfully\n\t\trespond(w, nil, http.StatusCreated, ctx.PubVapid)\n\t} else {\n\t\thttp.Error(w, \"Method Not Allowed\", 405)\n\t}\n\n}", "func initApiHandlers(router *itineris.ApiRouter) {\n\trouter.SetHandler(\"pgsqlListDepartments\", apiListDepartments)\n\trouter.SetHandler(\"pgsqlCreateDepartment\", apiCreateDepartment)\n\trouter.SetHandler(\"pgsqlGetDepartment\", apiGetDepartment)\n\trouter.SetHandler(\"pgsqlUpdateDepartment\", apiUpdateDepartment)\n\trouter.SetHandler(\"pgsqlDeleteDepartment\", apiDeleteDepartment)\n}", "func InitDatastoreSubscription() error {\n\td, err := NewDatastoreSubscription(datastore.GlobalConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsetGlobalSubscription(d)\n\treturn nil\n}", "func InitRolesHandler(r *atreugo.Router, s *service.Service) {\n\tr.GET(\"/\", getAllRoles(s))\n}", "func (self *CentralBooking) InstallHandlers(router *mux.Router) {\n router.\n Methods(\"POST\").\n Path(\"/register/instance\").\n HandlerFunc(self.RegisterInstance)\n\n // apeing vault\n router.\n Methods(\"GET\").\n Path(\"/sys/health\").\n HandlerFunc(self.CheckHealth)\n}", "func (a *App) InitRoutes() {\n\ta.Router = mux.NewRouter()\n\n\tsrv := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &graph.Resolver{Service: a.Service}}))\n\ta.Router.Handle(\"/playground\", playground.Handler(\"GoNeo4jGql GraphQL playground\", \"/movies\"))\n\ta.Router.Handle(\"/movies\", srv)\n}", "func (h *UserHandlers) initRouter() {\n\tr := chi.NewRouter()\n\t// sub := chi.NewRouter()\n\t// r.Mount(\"/\", sub)\n\n\tr.Post(\"/\", h.Create)\n\tr.Post(\"/login\", h.Login)\n\th.Router = r\n}", "func registerSTSRouter(router *mux.Router) {\n\t// Initialize STS.\n\tsts := &stsAPIHandlers{}\n\n\t// STS Router\n\tstsRouter := router.NewRoute().PathPrefix(\"/\").Subrouter()\n\n\t// AssumeRoleWithClientGrants\n\tstsRouter.Methods(\"POST\").HandlerFunc(httpTraceAll(sts.AssumeRoleWithClientGrants)).\n\t\tQueries(\"Action\", \"AssumeRoleWithClientGrants\").\n\t\tQueries(\"Version\", stsAPIVersion).\n\t\tQueries(\"Token\", \"{Token:.*}\")\n\n\t// AssumeRoleWithWebIdentity\n\tstsRouter.Methods(\"POST\").HandlerFunc(httpTraceAll(sts.AssumeRoleWithWebIdentity)).\n\t\tQueries(\"Action\", \"AssumeRoleWithWebIdentity\").\n\t\tQueries(\"Version\", stsAPIVersion).\n\t\tQueries(\"WebIdentityToken\", \"{Token:.*}\")\n\n}", "func TestSubscriptionsTestSuite(t *testing.T) {\n\tsuite.Run(t, new(SubscriptionsTestSuite))\n}", "func InitRouter(r *gin.RouterGroup, svc accounts.Service) *gin.RouterGroup {\n\n\thandler := NewHandler(svc)\n\n\taccounts := r.Group(\"/accounts\")\n\t// Public\n\taccounts.PUT(\"/register\", handler.Singup)\n\taccounts.POST(\"/login\", AuthMiddleware(svc).LoginHandler)\n\t// Protected\n\taccounts.GET(\"/me\", AuthMiddleware(svc).MiddlewareFunc(), handler.Get)\n\taccounts.GET(\"/refresh_token\", AuthMiddleware(svc).RefreshHandler)\n\n\treturn r\n}", "func InitRoutes(router *httprouter.Router) {\n\t// standard\n\trouter.GET(\"/ping\", ping)\n\trouter.MethodNotAllowed = http.HandlerFunc(notfound)\n\n\t// bid\n\thandlerBid := &bid.HTTPBidHandler{\n\t\tBController: bcon.NewBidController(brepo.NewBidRedis()),\n\t}\n\trouter.GET(\"/get-product\", handlerBid.GetProductHandler)\n\trouter.POST(\"/bid-product\", handlerBid.ProductBidHandler)\n}", "func init() {\n\thttp.HandleFunc(\"/notify\", errorAdapter(notifyHandler))\n\thttp.HandleFunc(\"/processnotification\", notifyProcessorHandler)\n}", "func InitGetRoutes(e *echo.Echo) {\n\te.GET(\"user/all\", controllers.GetAllUser)\n\tr := e.Group(\"/restricted\")\n\n\t// Configure middleware with the custom claims type\n\tconfig := middleware.JWTConfig{\n\t\tClaims: &controllers.JwtCustomClaims{},\n\t\tSigningKey: []byte(\"bomba\"),\n\t}\n\tr.Use(middleware.JWTWithConfig(config))\n\tr.GET(\"/user/duration\", controllers.GetDurataionUsers)\n\te.GET(\"user/duration\", controllers.GetDurataionUsers)\n}", "func Initialize() *chi.Mux {\n\tmongoStruct := &db.MongoStruct{}\n\tappointmentsController := controller.AppointmentsController{DB: mongoStruct}\n\tmuxRouter := chi.NewRouter()\n\n\tcors := cors.New(cors.Options{\n\t\tAllowedOrigins: []string{\"*\"},\n\t\tAllowedMethods: []string{\"GET\"},\n\t\tAllowCredentials: true,\n\t})\n\n\tmuxRouter.Use(cors.Handler)\n\tmuxRouter.Use(middleware.RequestID)\n\tmuxRouter.Use(middleware.RealIP)\n\tmuxRouter.Use(middleware.Logger)\n\tmuxRouter.Use(middleware.Recoverer)\n\tmuxRouter.Use(middleware.Timeout(200 * time.Second))\n\n\tmuxRouter.Get(\"/appointment/{id}\", appointmentsController.GetAppointment)\n\tmuxRouter.Post(\"/appointment/\", appointmentsController.CreateAppointment)\n\tmuxRouter.Patch(\"/appointment/{id}\", appointmentsController.UpdateAppointmentStatus)\n\tmuxRouter.Delete(\"/appointment/{id}\", appointmentsController.DeleteAppointment)\n\tmuxRouter.Get(\"/appointments/range/\", appointmentsController.GetAppointmentsWithinDateRange)\n\n\treturn muxRouter\n}", "func InitializeRoutes() {\n\thttp.HandleFunc(\"/webrtc/offer\", createWebRTCOffer)\n}", "func SetUpRouteHandlers() *mux.Router {\n\trouter := mux.NewRouter()\n\trouter.Handle(\"/metrics\", promhttp.Handler())\n\trouter.HandleFunc(\"/snippets\", GetSnippetsEndpoint).Methods(\"GET\")\n\trouter.HandleFunc(\"/snippet/{id}\", GetSnippetEndpoint).Methods(\"GET\")\n\trouter.HandleFunc(\"/snippet/\", CreateSnippetEndpoint).Methods(\"POST\")\n\trouter.HandleFunc(\"/snippet/{id}\", DeleteSnippetEndpoint).Methods(\"DELETE\")\n\trouter.HandleFunc(\"/status\", StatusHandler).Methods(\"GET\")\n\treturn router\n}", "func (m *GraphBaseServiceClient) Subscriptions()(*idb8230b65f4a369c23b4d9b41ebe568c657c92f8f77fe36d16d64528b3a317a3.SubscriptionsRequestBuilder) {\n return idb8230b65f4a369c23b4d9b41ebe568c657c92f8f77fe36d16d64528b3a317a3.NewSubscriptionsRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (m *GraphBaseServiceClient) Subscriptions()(*idb8230b65f4a369c23b4d9b41ebe568c657c92f8f77fe36d16d64528b3a317a3.SubscriptionsRequestBuilder) {\n return idb8230b65f4a369c23b4d9b41ebe568c657c92f8f77fe36d16d64528b3a317a3.NewSubscriptionsRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func init() {\n\tevent = &pubSubEvent{ctx: context.Background()}\n\tevent.initClientAndTopic()\n}", "func InitRoutes(router *mux.Router) {\n\t//ping\n\trouter.HandleFunc(\"/ping\", ping).Methods(\"GET\")\n\n\t// modules routes\n\t// add new currency\n\trouter.HandleFunc(\"/v1/currency/addremove\", currency.AddRemoveCurrency).Methods(\"POST\")\n\t// get all currency\n\trouter.HandleFunc(\"/v1/currency/get\", currency.GetCurrency).Methods(\"GET\")\n\t// add new currency rates\n\trouter.HandleFunc(\"/v1/currency_rates/add\", currency.AddCurrencyRates).Methods(\"POST\")\n\t// get specific currency rates\n\trouter.HandleFunc(\"/v1/currency_rates/get/{date}\", currency.GetCurrencyRates).Methods(\"GET\")\n\t// get trend\n\trouter.HandleFunc(\"/v1/currency_rates/get/trend\", currency.GetCurrencyRatesTrend).Methods(\"POST\")\n\n\t// go templating routes\n\n\t// not found\n\trouter.MethodNotAllowedHandler = http.HandlerFunc(notfound)\n}", "func InitService() {\n\t// Create store connection.\n\tstream, err := store.NewStream(context.Background(), EventStream)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\thttp.HandleFunc(\"/events\", eventsHandler(*stream))\n}", "func InitHandler(router gin.IRouter, uc auction.Usecase) {\n\thandler := &Handler{uc}\n\trouter.GET(\"/list\", handler.listEndpoints)\n\troute := router.Group(\"/auction\")\n\troute.POST(\"/\", handler.create)\n\troute.POST(\"/bidder\", handler.registerBidder)\n\troute.GET(\"/bidder\", handler.getAllBidders)\n}", "func ServiceRoutes() *mux.Router {\n\tinitialize()\n\trouter := mux.NewRouter()\n\n\trouter.HandleFunc(SubscribeTopicRoute, controller.SubscribeTopic)\n\trouter.HandleFunc(PublishToTopicRoute, controller.PublishToTopic).Methods(\"POST\")\n\n\treturn router\n}", "func setupWebSocketRoutes() {\n\tpool := webSoc.NewPool()\n\tgo pool.Start()\n\n\thttp.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tif _, err := w.Write([]byte(\"welcome\")); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t})\n\n\thttp.HandleFunc(\"/ws\", func(w http.ResponseWriter, r *http.Request) {\n\t\treadWS(pool, w, r)\n\t})\n\n\thttp.HandleFunc(\"/write\", func(w http.ResponseWriter, r *http.Request) {\n\t\tclientID := \"132412\"\n\t\twriteWS(pool, w, r, clientID)\n\t})\n}", "func InitRouter() *mux.Router {\r\n\trouter := mux.NewRouter().StrictSlash(false)\r\n\r\n\t//dan sekarang kita membutuhkan sebuah fungsi untuk membuat EndPoint\r\n\trouter = setItemRouter(router)\r\n\treturn router\r\n}", "func getNodeSubscriptions(ctx echo.Context) error {\n\tglog.Infof(\"calling getNodeSubscriptions from %s\", ctx.Request().RemoteAddr)\n\n\tnodeName := ctx.Param(\"nodeName\")\n\tif nodeName == \"\" {\n\t\treturn ctx.JSON(http.StatusBadRequest,\n\t\t\t&response{\n\t\t\t\tSuccess: false,\n\t\t\t\tMessage: \"Invalid parameter\",\n\t\t\t})\n\t}\n\n\tconfig := ctx.(*apiContext).config\n\thosts := config.MustString(\"condutor\", \"mongo\")\n\tsession, err := mgo.Dial(hosts)\n\tif err != nil {\n\t\tglog.Errorf(\"getNodeSubscriptions:%v\", err)\n\t\treturn ctx.JSON(http.StatusInternalServerError,\n\t\t\t&response{\n\t\t\t\tSuccess: false,\n\t\t\t\tMessage: err.Error(),\n\t\t\t})\n\t}\n\tc := session.DB(\"iothub\").C(\"subscriptions\")\n\tdefer session.Close()\n\n\tsubs := []collector.Subscription{}\n\tif err := c.Find(bson.M{\"NodeName\": nodeName}).Limit(100).Iter().All(&subs); err != nil {\n\t\tglog.Errorf(\"getNodeSubscriptions:%v\", err)\n\t\treturn ctx.JSON(http.StatusNotFound,\n\t\t\t&response{\n\t\t\t\tSuccess: false,\n\t\t\t\tMessage: err.Error(),\n\t\t\t})\n\t}\n\treturn ctx.JSON(http.StatusOK, &response{\n\t\tSuccess: true,\n\t\tResult: subs,\n\t})\n}", "func subscribe(t *testing.T, wsc *client.WSClient, eventid string) {\n\tif err := wsc.Subscribe(eventid); err != nil {\n\t\tpanic(err)\n\t}\n}", "func InitializeAdmin(router *httptreemux.TreeMux) {\n\tif configuration.Config.SAMLCert != \"\" {\n\t\tkeyPair, err := tls.LoadX509KeyPair(configuration.Config.SAMLCert,\n\t\t\tconfiguration.Config.SAMLKey)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tkeyPair.Leaf, err = x509.ParseCertificate(keyPair.Certificate[0])\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\trootURL, err := url.Parse(configuration.Config.HTTPSUrl)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tvar metadata *saml.EntityDescriptor\n\t\tif strings.HasPrefix(configuration.Config.SAMLIDPUrl, \"https://\") {\n\t\t\tidpMetadataURL, err := url.Parse(configuration.Config.SAMLIDPUrl)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tmetadata, err = samlsp.FetchMetadata(context.TODO(), http.DefaultClient, *idpMetadataURL)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t} else if strings.HasPrefix(configuration.Config.SAMLIDPUrl, \"/\") {\n\t\t\tdata, err := ioutil.ReadFile(configuration.Config.SAMLIDPUrl)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tmetadata, err = samlsp.ParseMetadata(data)\n\t\t}\n\t\tsamlSP, err := samlsp.New(samlsp.Options{\n\t\t\tURL: *rootURL,\n\t\t\tKey: keyPair.PrivateKey.(*rsa.PrivateKey),\n\t\t\tCertificate: keyPair.Leaf,\n\t\t\tIDPMetadata: metadata,\n\t\t\t//\t\t\tCookieMaxAge: 12 * time.Hour, // consider moving this to the configuration\n\t\t})\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error initializing saml: %s\", err)\n\t\t}\n\n\t\tlog.Println(\"setting up /saml/ handler\")\n\t\trouter.GET(\"/saml/*all\", httptreemux.HandlerFunc(func(w http.ResponseWriter, r *http.Request, _ map[string]string) {\n\t\t\tsamlSP.ServeHTTP(w, r)\n\t\t}))\n\t\trouter.POST(\"/saml/*all\", httptreemux.HandlerFunc(func(w http.ResponseWriter, r *http.Request, _ map[string]string) {\n\t\t\tsamlSP.ServeHTTP(w, r)\n\t\t}))\n\t\tsessionHandler = &authentication.SAMLSession{Middleware: samlSP}\n\t} else {\n\t\tsessionHandler = &authentication.UsernamePasswordSession{}\n\t}\n\trouter.GET(\"/admin\", sessionHandler.RequireSession(adminHandler))\n\n\t// For admin panel\n\trouter.GET(\"/admin/login\", getLoginHandler)\n\trouter.POST(\"/admin/login\", postLoginHandler)\n\trouter.GET(\"/admin/register\", getRegistrationHandler)\n\trouter.POST(\"/admin/register\", postRegistrationHandler)\n\trouter.GET(\"/admin/logout\", logoutHandler)\n\trouter.GET(\"/admin/*filepath\", sessionHandler.RequireSession(adminFileHandler))\n\n\t// For admin API (no trailing slash)\n\t// Posts\n\trouter.GET(\"/admin/api/posts/:number\", sessionHandler.RequireSession(apiPostsHandler))\n\t// Post\n\trouter.GET(\"/admin/api/post/:id\", sessionHandler.RequireSession(getAPIPostHandler))\n\trouter.POST(\"/admin/api/post\", sessionHandler.RequireSession(postAPIPostHandler))\n\trouter.PATCH(\"/admin/api/post\", sessionHandler.RequireSession(patchAPIPostHandler))\n\trouter.DELETE(\"/admin/api/post/:id\", sessionHandler.RequireSession(deleteAPIPostHandler))\n\t// Upload\n\trouter.POST(\"/admin/api/upload\", sessionHandler.RequireSession(apiUploadHandler))\n\t// Images\n\trouter.GET(\"/admin/api/images/:number\", sessionHandler.RequireSession(apiImagesHandler))\n\trouter.DELETE(\"/admin/api/image\", sessionHandler.RequireSession(deleteAPIImageHandler))\n\t// Blog\n\trouter.GET(\"/admin/api/blog\", sessionHandler.RequireSession(getAPIBlogHandler))\n\trouter.PATCH(\"/admin/api/blog\", sessionHandler.RequireSession(patchAPIBlogHandler))\n\t// User\n\trouter.GET(\"/admin/api/user/:id\", sessionHandler.RequireSession(getAPIUserHandler))\n\trouter.PATCH(\"/admin/api/user\", sessionHandler.RequireSession(patchAPIUserHandler))\n\t// User id\n\trouter.GET(\"/admin/api/userid\", sessionHandler.RequireSession(getAPIUserIDHandler))\n}", "func Subscribe(name string, paths [][]string) *Handle {\n\tif len(paths) == 0 {\n\t\tlog.Printf(\"module doesn't have paths for Subscribe.\")\n\t\treturn nil\n\t}\n\n\tstreamOnce.Do(func() {\n\t\tconn = startConnection()\n\t})\n\n\tsubMutex.Lock()\n\tdefer subMutex.Unlock()\n\tlog.Printf(\"subscriberId: %d\\n\", subscriberId)\n\n\tif subscriberId == math.MaxInt32 {\n\t\tlog.Printf(\"Can't create subscriber anymore.\\n\")\n\t\treturn nil\n\t}\n\n\tname = fmt.Sprintf(\"%v-%v\", name, subscriberId)\n\thandle := &Handle{\n\t\tsubscriberId: subscriberId,\n\t\tpaths: paths,\n\t\tname: name,\n\t\tconn: conn,\n\n\t\tConfigMessage: make(chan *ConfigMessage),\n\t\tRc: make(chan bool),\n\t}\n\tconn.handles[subscriberId] = handle\n\n\t// set a message to send openconfig\n\tsMsg.confReq.Type = pb.ConfigType_SUBSCRIBE\n\tsMsg.confReq.Module = name\n\n\t// subscribe paths to the server\n\tfor _, path := range paths {\n\n\t\tsMsg.confReq.Path = path\n\t\tsc <- sMsg\n\t\terr := <-sMsg.err\n\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Sending subscription message faild: %v\", err)\n\t\t\tconn.stream.CloseSend()\n\t\t\tdefer delete(conn.handles, subscriberId)\n\t\t\treturn nil\n\t\t}\n\t}\n\tsMsg.confReq = &pb.ConfigRequest{}\n\n\tlog.Printf(\"Subscribe success\\n\")\n\tsubscriberId++\n\treturn handle\n}", "func init() {\n\trouter().GET(\n\t\t\"/hello/:userName\",\n\t\tvalidateUserName,\n\t\tsayHello,\n\t)\n}", "func SubListAll(w http.ResponseWriter, r *http.Request) {\n\n\tvar err error\n\tvar strPageSize string\n\tvar pageSize int\n\tvar res subscriptions.PaginatedSubscriptions\n\n\t// Init output\n\toutput := []byte(\"\")\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Grab context references\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\tprojectUUID := gorillaContext.Get(r, \"auth_project_uuid\").(string)\n\troles := gorillaContext.Get(r, \"auth_roles\").([]string)\n\n\turlValues := r.URL.Query()\n\tpageToken := urlValues.Get(\"pageToken\")\n\tstrPageSize = urlValues.Get(\"pageSize\")\n\n\t// if this route is used by a user who only has a consumer role\n\t// return all subscriptions that he has access to\n\tuserUUID := \"\"\n\tif !auth.IsProjectAdmin(roles) && !auth.IsServiceAdmin(roles) && auth.IsConsumer(roles) {\n\t\tuserUUID = gorillaContext.Get(r, \"auth_user_uuid\").(string)\n\t}\n\n\tif strPageSize != \"\" {\n\t\tif pageSize, err = strconv.Atoi(strPageSize); err != nil {\n\t\t\tlog.Errorf(\"Pagesize %v produced an error while being converted to int: %v\", strPageSize, err.Error())\n\t\t\terr := APIErrorInvalidData(\"Invalid page size\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif res, err = subscriptions.Find(projectUUID, userUUID, \"\", pageToken, int32(pageSize), refStr); err != nil {\n\t\terr := APIErrorInvalidData(\"Invalid page token\")\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Output result to JSON\n\tresJSON, err := res.ExportJSON()\n\tif err != nil {\n\t\terr := APIErrExportJSON()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Write Response\n\toutput = []byte(resJSON)\n\trespondOK(w, output)\n\n}", "func InitGetRoutes(e *echo.Echo) {\n\te.GET(\"news/data\", controllers.GetNewsData)\n}", "func Prepare() {\n\tsch, e := graphql.NewSchema(*Schema)\n\tif e != nil {\n\t\tlogger.Panic(\"graphql.NewSchema\",\n\t\t\tzap.Error(e),\n\t\t\tzap.Any(\"schema\", Schema),\n\t\t)\n\t}\n\tSchema = nil\n\n\tlogrus.SetLevel(logrus.PanicLevel)\n\tsubManager = gqlsub.NewManager(context.Background(), &sch, subHandlers)\n\thttp.Handle(\"/subscriptions\", graphqlws.NewHandler(graphqlws.HandlerConfig{\n\t\tSubscriptionManager: subManager,\n\t}))\n\n\thttp.Handle(\"/\", handler.New(&handler.Config{\n\t\tSchema: &sch,\n\t\tPretty: true,\n\t\tPlaygroundConfig: handler.NewDefaultPlaygroundConfig(),\n\t}))\n}", "func (s *BasetelephoneListener) EnterSubscriber(ctx *SubscriberContext) {}", "func InitHandler(router *mux.Router, chain *alice.Chain) error {\n\trouter.Path(\"/healthz\").Handler(chain.ThenFunc(healthEndpoint)).Methods(\"GET\")\n\treturn nil\n}", "func (s *Server) InitRouter() {\n\ts.Router = mux.NewRouter().StrictSlash(true)\n\tfor _, route := range s.GetRoutes() {\n\t\tvar handler http.Handler\n\t\thandler = route.HandlerFunc\n\t\thandler = s.Header(handler)\n\t\tif route.Auth {\n\t\t\thandler = s.AuthHandler(handler, &dauth.Perm{\n\t\t\t\tService: route.Service,\n\t\t\t\tName: route.Name,\n\t\t\t})\n\t\t}\n\n\t\ts.Router.\n\t\t\tMethods(route.Method).\n\t\t\tPath(route.Path).\n\t\t\tName(route.Name).\n\t\t\tHandler(handler)\n\t}\n\n\ts.Router.NotFoundHandler = http.HandlerFunc(s.NotFoundHandler)\n}", "func NewSubscriptionHandler(conn *graphqlws.Conn, dialer Dialer) SubscriptionHandler {\n\thandler := &redisSubscriptionHandler{\n\t\tconn: conn,\n\t\tdialer: dialer,\n\t}\n\treturn handler\n}", "func initRoutes() {\n\tif webMux.routesSetup {\n\t\treturn\n\t}\n\tvar wildcardOrigin bool\n\tvar c *cors.Cors\n\tauthorizationOn := (len(tc.Auth.ProxyAddress) != 0)\n\tif len(corsDomains) > 0 {\n\t\tcopts := cors.Options{\n\t\t\tAllowedMethods: []string{\"GET\", \"POST\", \"DELETE\", \"HEAD\"},\n\t\t}\n\t\tif authorizationOn {\n\t\t\tcopts.AllowOriginFunc = corsValidator\n\t\t\tcopts.AllowedHeaders = []string{\"Authorization\", \"authorization\"}\n\t\t\tcopts.AllowCredentials = true\n\t\t\tc = cors.New(copts)\n\t\t} else {\n\t\t\tvar allowed []string\n\t\t\tfor domain := range corsDomains {\n\t\t\t\tif domain == \"*\" {\n\t\t\t\t\tdvid.Infof(\"setting allowed origins to wildcard *\\n\")\n\t\t\t\t\twildcardOrigin = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tallowed = append(allowed, domain)\n\t\t\t}\n\t\t\tif !wildcardOrigin {\n\t\t\t\tcopts.AllowedOrigins = allowed\n\t\t\t\tdvid.Infof(\"setting allowed origins to %v\\n\", allowed)\n\t\t\t\tc = cors.New(copts)\n\t\t\t}\n\t\t}\n\t}\n\n\twebMuxMu.Lock()\n\tsilentMux := web.New()\n\twebMux.Handle(\"/api/load\", silentMux)\n\twebMux.Handle(\"/api/heartbeat\", silentMux)\n\twebMux.Handle(\"/api/user-latencies\", silentMux)\n\tif c != nil {\n\t\tsilentMux.Use(c.Handler)\n\t} else if wildcardOrigin {\n\t\tsilentMux.Use(wildcardAccessHandler)\n\t}\n\tsilentMux.Use(latencyHandler)\n\tsilentMux.Get(\"/api/load\", loadHandler)\n\tsilentMux.Get(\"/api/heartbeat\", heartbeatHandler)\n\tsilentMux.Get(\"/api/user-latencies\", latenciesHandler)\n\n\tmainMux := web.New()\n\twebMux.Handle(\"/*\", mainMux)\n\tmainMux.Use(middleware.Logger)\n\tmainMux.Use(middleware.AutomaticOptions)\n\tmainMux.Use(httpAvailHandler)\n\tmainMux.Use(recoverHandler)\n\tmainMux.Use(adminPrivHandler)\n\tif c != nil {\n\t\tmainMux.Use(c.Handler)\n\t} else if wildcardOrigin {\n\t\tmainMux.Use(wildcardAccessHandler)\n\t}\n\n\tmainMux.Get(\"/interface\", interfaceHandler)\n\tmainMux.Get(\"/interface/version\", versionHandler)\n\n\tmainMux.Get(\"/api/help\", helpHandler)\n\tmainMux.Get(\"/api/help/\", helpHandler)\n\tmainMux.Get(\"/api/help/:typename\", typehelpHandler)\n\n\tmainMux.Get(\"/api/storage\", serverStorageHandler)\n\n\t// -- server API\n\n\tserverMux := web.New()\n\tmainMux.Handle(\"/api/server/:action\", serverMux)\n\tserverMux.Use(activityLogHandler)\n\tserverMux.Get(\"/api/server/info\", serverInfoHandler)\n\tserverMux.Get(\"/api/server/info/\", serverInfoHandler)\n\tserverMux.Get(\"/api/server/note\", serverNoteHandler)\n\tserverMux.Get(\"/api/server/note/\", serverNoteHandler)\n\tserverMux.Get(\"/api/server/config\", serverConfigHandler)\n\tserverMux.Get(\"/api/server/config/\", serverConfigHandler)\n\tserverMux.Get(\"/api/server/types\", serverTypesHandler)\n\tserverMux.Get(\"/api/server/types/\", serverTypesHandler)\n\tserverMux.Get(\"/api/server/compiled-types\", serverCompiledTypesHandler)\n\tserverMux.Get(\"/api/server/compiled-types/\", serverCompiledTypesHandler)\n\tserverMux.Get(\"/api/server/groupcache\", serverGroupcacheHandler)\n\tserverMux.Get(\"/api/server/groupcache/\", serverGroupcacheHandler)\n\tserverMux.Get(\"/api/server/blobstore/:ref\", blobstoreHandler)\n\tserverMux.Get(\"/api/server/token\", serverTokenHandler)\n\tserverMux.Get(\"/api/server/token/\", serverTokenHandler)\n\n\tserverMux.Post(\"/api/server/settings\", serverSettingsHandler)\n\tserverMux.Post(\"/api/server/reload-auth\", serverReloadAuthHandler)\n\tserverMux.Post(\"/api/server/reload-auth/\", serverReloadAuthHandler)\n\tserverMux.Post(\"/api/server/reload-blocklist\", serverReloadBlocklistHandler)\n\tserverMux.Post(\"/api/server/reload-blocklist/\", serverReloadBlocklistHandler)\n\n\t// -- repos API\n\n\tmainMux.Post(\"/api/repos\", reposPostHandler)\n\tmainMux.Get(\"/api/repos/info\", reposInfoHandler)\n\n\t// -- repo API\n\n\trepoRawMux := web.New()\n\tmainMux.Handle(\"/api/repo/:uuid\", repoRawMux)\n\trepoRawMux.Use(activityLogHandler)\n\trepoRawMux.Use(repoRawSelector)\n\trepoRawMux.Head(\"/api/repo/:uuid\", repoHeadHandler)\n\n\trepoMux := web.New()\n\tmainMux.Handle(\"/api/repo/:uuid/:action\", repoMux)\n\tmainMux.Handle(\"/api/repo/:uuid/:action/:name\", repoMux)\n\trepoMux.Use(repoRawSelector)\n\tif authorizationOn {\n\t\trepoMux.Use(isAuthorized)\n\t}\n\trepoMux.Use(mutationsHandler)\n\trepoMux.Use(activityLogHandler)\n\trepoMux.Use(repoSelector)\n\trepoMux.Get(\"/api/repo/:uuid/info\", repoInfoHandler)\n\trepoMux.Post(\"/api/repo/:uuid/info\", repoPostInfoHandler)\n\trepoMux.Post(\"/api/repo/:uuid/instance\", repoNewDataHandler)\n\trepoMux.Get(\"/api/repo/:uuid/branch-versions/:name\", repoBranchVersionsHandler)\n\trepoMux.Get(\"/api/repo/:uuid/log\", getRepoLogHandler)\n\trepoMux.Post(\"/api/repo/:uuid/log\", postRepoLogHandler)\n\trepoMux.Post(\"/api/repo/:uuid/merge\", repoMergeHandler)\n\trepoMux.Post(\"/api/repo/:uuid/resolve\", repoResolveHandler)\n\n\tnodeMux := web.New()\n\tmainMux.Handle(\"/api/node/:uuid\", nodeMux)\n\tmainMux.Handle(\"/api/node/:uuid/:action\", nodeMux)\n\tnodeMux.Use(repoRawSelector)\n\tif authorizationOn {\n\t\tnodeMux.Use(isAuthorized)\n\t}\n\tnodeMux.Use(mutationsHandler)\n\tnodeMux.Use(activityLogHandler)\n\tnodeMux.Use(nodeSelector)\n\tnodeMux.Get(\"/api/node/:uuid/note\", getNodeNoteHandler)\n\tnodeMux.Post(\"/api/node/:uuid/note\", postNodeNoteHandler)\n\tnodeMux.Get(\"/api/node/:uuid/log\", getNodeLogHandler)\n\tnodeMux.Post(\"/api/node/:uuid/log\", postNodeLogHandler)\n\tnodeMux.Get(\"/api/node/:uuid/commit\", repoCommitStateHandler)\n\tnodeMux.Get(\"/api/node/:uuid/status\", repoCommitStateHandler)\n\tnodeMux.Post(\"/api/node/:uuid/commit\", repoCommitHandler)\n\tnodeMux.Post(\"/api/node/:uuid/branch\", repoBranchHandler)\n\tnodeMux.Post(\"/api/node/:uuid/newversion\", repoNewVersionHandler)\n\tnodeMux.Post(\"/api/node/:uuid/tag\", repoTagHandler)\n\n\tinstanceMux := web.New()\n\tmainMux.Handle(\"/api/node/:uuid/:dataname/:keyword\", instanceMux)\n\tmainMux.Handle(\"/api/node/:uuid/:dataname/:keyword/*\", instanceMux)\n\tinstanceMux.Use(repoRawSelector)\n\tif authorizationOn {\n\t\tinstanceMux.Use(isAuthorized)\n\t}\n\tinstanceMux.Use(mutationsHandler)\n\tinstanceMux.Use(instanceSelector)\n\tinstanceMux.NotFound(notFound)\n\n\tmainMux.Get(\"/*\", mainHandler)\n\n\twebMux.routesSetup = true\n\twebMuxMu.Unlock()\n}", "func initMultitenantDatabases(apiRouter *mux.Router, context *Context) {\n\taddContext := func(handler contextHandlerFunc) *contextHandler {\n\t\treturn newContextHandler(context, handler)\n\t}\n\n\tMultitenantDatabasesRouter := apiRouter.PathPrefix(\"/multitenant_databases\").Subrouter()\n\tMultitenantDatabasesRouter.Handle(\"\", addContext(handleGetMultitenantDatabases)).Methods(\"GET\")\n\n\tMultitenantDatabaseRouter := apiRouter.PathPrefix(\"/multitenant_database/{multitenant_database:[A-Za-z0-9]{26}}\").Subrouter()\n\tMultitenantDatabaseRouter.Handle(\"\", addContext(handleGetMultitenantDatabase)).Methods(\"GET\")\n\tMultitenantDatabaseRouter.Handle(\"\", addContext(handleUpdateMultitenantDatabase)).Methods(\"PUT\")\n\tMultitenantDatabaseRouter.Handle(\"\", addContext(handleDeleteMultitenantDatabase)).Methods(\"DELETE\")\n}", "func InitRoutes() *mux.Router {\n\trouter := mux.NewRouter()\n\t// router = SetPackagesRoutes(router)\n\t// // router = SetWordsRoutes(router)\n\t// // router = SetUsersRoutes(router)\n\t// router = SetResultsRoutes(router)\n\treturn router.StrictSlash(false)\n}", "func (_m *Callbacks) SubscriptionCreated(id *fftypes.UUID) {\n\t_m.Called(id)\n}", "func getSubscriptions(request router.Request) (int, []byte) {\n\n\tquery := datastore.NewQuery(SUBSCRIPTION_KEY).Filter(\"Project =\", request.GetPathParams()[\"project_id\"])\n\tsubscriptions := make([]Subscription, 0)\n\t_, err := query.GetAll(request.GetContext(), &subscriptions)\n\n\tif err != nil {\n\t\tlog.Errorf(request.GetContext(), \"Error retriving Subscriptions: %v\", err)\n\t\treturn http.StatusInternalServerError, []byte(err.Error())\n\t}\n\n\tsubscriptionBytes, err := json.MarshalIndent(subscriptions, \"\", \"\t\")\n\n\tif err != nil {\n\t\tlog.Errorf(request.GetContext(), \"Error retriving Subscriptions: %v\", err)\n\t\treturn http.StatusInternalServerError, []byte(err.Error())\n\t}\n\n\treturn http.StatusOK, subscriptionBytes\n\n}", "func InitializeRouter(s *config.Server) {\n\tr := &Router{\n\t\tRouter: mux.NewRouter(),\n\t\tServer: s,\n\t}\n\n\tinitializeUserRoutes(r)\n\n\tlog.Fatal(http.ListenAndServe(\":8080\", r.Router))\n}", "func (a *App) getSubscribersOnline(w http.ResponseWriter, r *http.Request) {\n\n\tsubs, err := models.GetSubscribersOnline(a.jsonrpcHTTPAddr, a.httpClient)\n\tif err != nil {\n\t\trespond.ERROR(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\trespond.JSON(w, http.StatusOK, subs)\n\treturn\n}", "func NewSubscriptionHandler() *SubscriptionHandler {\n\treturn &SubscriptionHandler{\n\t\tentitiesChangedSubscriptions: make(map[uuid.UUID]EntitiesChangedFn),\n\t}\n}", "func (h *handler) Subscribe(c *session.Client, topics *[]string) {\n\tif c == nil {\n\t\th.logger.Error(LogErrFailedSubscribe + (ErrClientNotInitialized).Error())\n\t\treturn\n\t}\n\th.logger.Info(fmt.Sprintf(LogInfoSubscribed, c.ID, strings.Join(*topics, \",\")))\n}", "func (env *Env) SubscribeWSS(c *gin.Context) {\n\tdevice := c.Param(\"device\")\n\tchannel := c.Param(\"channel\")\n\tgroup := c.Param(\"group\")\n\ttopic := group + \".\" + device + \".\" + channel\n\tlog.Info(\"Subscribing to topic: \", topic)\n\tsubscriber, err := env.natsConn.GetSubscriber(topic)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\tc.AbortWithError(404, err)\n\t\treturn\n\t}\n\tlog.Info(\"Got subscriber from NATS Connection\")\n\tqueue := make(chan []byte, queuelen)\n\terrs := make(chan error, 1)\n\tsubscriber.newClients <- queue //Add our new client to the recipient list\n\tclientGone := c.Writer.CloseNotify()\n\tconn, err := websocket.Upgrade(c.Writer, c.Request, nil, readBuffer, writeBuffer)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\tc.AbortWithError(404, err)\n\t\treturn\n\t}\n\tfor {\n\t\tselect {\n\t\tcase <-clientGone:\n\t\t\tsubscriber.defunctClients <- queue //Remove our client from the client list\n\t\t\tconn.Close()\n\t\t\treturn\n\t\tcase message := <-queue:\n\t\t\tconn.WriteMessage(websocket.TextMessage, json.RawMessage(message))\n\t\tcase <-errs:\n\t\t\tsubscriber.defunctClients <- queue //Remove our client from the client list\n\t\t\tconn.Close()\n\t\t\treturn\n\t\tcase <-subscriber.errors:\n\t\t\tconn.Close()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (client *Client) RegisterSubscription() error {\n\tif client.events_http == nil {\n\t\tif ip_address, err := getInterfaceAddress(client.config.EventsInterface); err != nil {\n\t\t\treturn errors.New(fmt.Sprintf(\"Unable to get the ip address from the interface: %s, error: %s\",\n\t\t\t\tclient.config.EventsInterface, err))\n\t\t} else {\n\t\t\t// step: set the ip address\n\t\t\tclient.ipaddress = ip_address\n\t\t\tbinding := fmt.Sprintf(\"%s:%d\", ip_address, client.config.EventsPort)\n\t\t\t// step: register the handler\n\t\t\thttp.HandleFunc(DEFAULT_EVENTS_URL, client.HandleMarathonEvent)\n\t\t\t// step: create the http server\n\t\t\tclient.events_http = &http.Server{\n\t\t\t\tAddr: binding,\n\t\t\t\tHandler: nil,\n\t\t\t\tReadTimeout: 10 * time.Second,\n\t\t\t\tWriteTimeout: 10 * time.Second,\n\t\t\t\tMaxHeaderBytes: 1 << 20,\n\t\t\t}\n\t\t\tclient.log(\"RegisterSubscription() Attempting to listen on binding: %s\", binding)\n\n\t\t\t// @todo need to add a timeout value here\n\t\t\tlistener, err := net.Listen(\"tcp\", binding)\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tclient.log(\"RegisterSubscription() Starting to listen on http events service\")\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tclient.events_http.Serve(listener)\n\t\t\t\t\tclient.log(\"RegisterSubscription() Exitted the http events service\")\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n\n\t// step: get the callback url\n\tcallback := client.SubscriptionURL()\n\n\t// step: check if the callback is registered\n\tclient.log(\"RegisterSubscription() Checking if we already have a subscription for callback %s\", callback)\n\tif found, err := client.HasSubscription(callback); err != nil {\n\t\treturn err\n\t} else if !found {\n\t\tclient.log(\"RegisterSubscription() Registering a subscription with Marathon: callback: %s\", callback)\n\t\t// step: we need to register our self\n\t\turi := fmt.Sprintf(\"%s?callbackUrl=%s\", MARATHON_API_SUBSCRIPTION, callback)\n\t\tif err := client.apiPost(uri, \"\", nil); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tclient.log(\"RegisterSubscription() A subscription already exists for this callback: %s\", callback)\n\t}\n\treturn nil\n}", "func InitializeRoutes(router *mux.Router) {\n\n\t//Employees routes\n\trouter.HandleFunc(\"/emp\", SetMiddlewareJSON(c.CreateEmp)).Methods(\"POST\")\n\trouter.HandleFunc(\"/emp\", SetMiddlewareJSON(c.GetAllEmp)).Methods(\"GET\")\n\trouter.HandleFunc(\"/emp/{id}\", SetMiddlewareJSON(c.GetEmp)).Methods(\"GET\")\n\trouter.HandleFunc(\"/emp/{id}\", SetMiddlewareJSON(c.UpdateEmp)).Methods(\"PUT\")\n\trouter.HandleFunc(\"/emp/{id}\", SetMiddlewareJSON(c.DeleteEmp)).Methods(\"DELETE\")\n}", "func (app *Application) Subscribe(store *todo.Store) {\n\tstore.Register(app.subscriber)\n}", "func (h *notificationHandler) handleRegistrations(w http.ResponseWriter, r *http.Request) {\n\tvar sub webpush.Subscription\n\tif err := json.NewDecoder(r.Body).Decode(&sub); err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\th.mutex.Lock()\n\tdefer h.mutex.Unlock()\n\n\tif h.subscriptions == nil {\n\t\th.subscriptions = make(map[string]webpush.Subscription)\n\t}\n\th.subscriptions[sub.Endpoint] = sub\n}", "func InitRouter() {\n\tr := mux.NewRouter()\n\n\tr.HandleFunc(\"/login\", service.Session{}.Login)\n\tr.HandleFunc(\"/logmein\", service.Session{}.LogMeIn).Methods(\"GET\")\n\tr.HandleFunc(\"/logout\", service.Session{}.Logout).Methods(\"GET\")\n\n\tr.HandleFunc(\"/faq\", func(w http.ResponseWriter, r *http.Request) {\n\t\thc := &kinli.HttpContext{W: w, R: r}\n\t\tpage := kinli.NewPage(hc, \"Frequently Asked Questions\", \"\", \"\", nil)\n\t\tkinli.DisplayPage(w, \"faq\", page)\n\t}).Methods(\"GET\")\n\n\tr.HandleFunc(\"/example\", func(w http.ResponseWriter, r *http.Request) {\n\t\thc := &kinli.HttpContext{W: w, R: r}\n\t\tpage := kinli.NewPage(hc, \"Example Form\", \"\", \"\", nil)\n\t\tkinli.DisplayPage(w, \"example\", page)\n\t}).Methods(\"GET\")\n\n\tr.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\thc := &kinli.HttpContext{W: w, R: r}\n\t\tpage := kinli.NewPage(hc, \"hello page\", \"\", \"\", nil)\n\t\tkinli.DisplayPage(w, \"home\", page)\n\t}).Methods(\"GET\")\n\n\tr.HandleFunc(\"/{uid}\", service.FormSubmissionRequest).Methods(\"POST\")\n\n\tr.NotFoundHandler = http.HandlerFunc(notFound)\n\n\tinitStatic(r)\n\n\tsrv := &http.Server{\n\t\tHandler: r,\n\t\tAddr: common.Config.LocalServer,\n\t\tWriteTimeout: 60 * time.Second,\n\t\tReadTimeout: 60 * time.Second,\n\t}\n\tlog.Println(\"Starting server on\", common.Config.LocalServer)\n\tlog.Fatal(srv.ListenAndServe())\n\n}", "func InitRoutes(router *mux.Router) *mux.Router {\n\tlogger = log.Logger(\"apps.app\")\n\tappRouter := router.PathPrefix(\"/v1/cloud\").Subrouter()\n\tappRouter.Use(format.FormatResponseMiddleware)\n\tappRouter.HandleFunc(\"/region\", getRegion).Methods(\"GET\")\n\tappRouter.HandleFunc(\"/region\", createRegion).Methods(\"POST\")\n\tappRouter.HandleFunc(\"/sync\", syncHost).Methods(\"POST\")\n\treturn router\n}", "func registerFollowerSubscriptions(srv *server) {\n\tsrv.pubSub.Reset()\n\tsrv.pubSub.Subscribe(events.RASTypeAny, srv.evtLogger)\n\tsrv.pubSub.Subscribe(events.RASTypeStateChange, srv.evtForwarder)\n}", "func handleRequests() {\n //============ creates a new instance of a mux router ===================\n Router:= mux.NewRouter().StrictSlash(true)\n // replace http.HandleFunc with myRouter.HandleFunc\n Router.HandleFunc(\"/\", homePage)\n Router.HandleFunc(\"/addStudent\", JSONHeaders(student.AddStudent)).Methods(\"POST\")\n Router.HandleFunc(\"/updateStudent/{id}\", JSONHeaders(student.UpdateStudent)).Methods(\"PUT\")\n Router.HandleFunc(\"/deleteStudent/{id}\", JSONHeaders(student.DeleteStudent)).Methods(\"DELETE\")\n Router.HandleFunc(\"/allStudent\", JSONHeaders(student.GetAllStudents)).Methods(\"GET\")\n Router.HandleFunc(\"/getSingleStudent/{id}\", JSONHeaders(student.GetStudentByID)).Methods(\"GET\")\n\n log.Fatal(http.ListenAndServe(\":10000\", Router))\n}", "func (b *base) initRouter(services []Service) {\n\trouter := &router{b, services}\n\trouter.init()\n\tb.router = router\n}", "func setRoutes() {\n\t// Set routes\n\trouter := mux.NewRouter().StrictSlash(true)\n\trouter.HandleFunc(\"/ws\", handleConnections)\n\trouter.HandleFunc(\"/api/messages\", getMessages).Methods(\"GET\")\n\trouter.HandleFunc(\"/api/messages\", sendMessage).Methods(\"POST\")\n\n\t// Handling the static page in SPA\n\tspa := spaHandler{staticPath: \"public\", indexPath: \"index.html\"} // Set default page folder\n\trouter.PathPrefix(\"/\").Handler(spa)\n\n\tlog.Println(\"http server started on port 8000\")\n\tlog.Fatal(http.ListenAndServe(\":8000\", router))\n}", "func (opcuaExport *OpcuaExport) Subscribe() {\n\tglog.Infof(\"-- Initializing message bus context\")\n\tdefer opcuaExport.configMgr.Destroy()\n\n\tnumOfSubscriber, _ := opcuaExport.configMgr.GetNumSubscribers()\n\tfor i := 0; i < numOfSubscriber; i++ {\n\t\tsubctx, err := opcuaExport.configMgr.GetSubscriberByIndex(i)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Failed to get subscriber context : %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tsubTopics, err := subctx.GetTopics()\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Failed to fetch topics : %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tconfig, err := subctx.GetMsgbusConfig()\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Failed to fetch msgbus config : %v\", err)\n\t\t\treturn\n\t\t}\n\t\tgo worker(opcuaExport, config, subTopics[0])\n\t\tsubctx.Destroy()\n\t}\n\t\n}", "func (b *EventStreamBroker) EventStreamHandler(w http.ResponseWriter, r *http.Request) {\n\n\t// Make request SSE compatible.\n\th := w.Header()\n\th.Set(\"Content-Type\", \"text/event-stream\")\n\th.Set(\"Cache-Control\", \"no-cache\")\n\th.Set(\"Connection\", \"keep-alive\")\n\th.Set(\"Access-Control-Allow-Origin\", \"*\")\n\n\t// Make sure that the writer supports flushing.\n\tflusher, ok := w.(http.Flusher)\n\tif !ok {\n\t\thttp.Error(w, \"Streaming unsupported!\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\ttopics := strings.Split(r.URL.Query().Get(\"topics\"), \",\")\n\tif len(topics) == 0 {\n\t\thttp.Error(w, \"Atleast one topic is required.\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Create a new client\n\tclient := &client{\n\t\tsessID: uuid.New().String(),\n\t\twriteChannel: make(chan []byte, 10),\n\t\ttopics: make(map[string]bool),\n\t}\n\n\tb.mu.Lock()\n\tb.clients[client.sessID] = client\n\tb.mu.Unlock()\n\n\tb.mu.RLock()\n\tlog.Printf(\"New client with sessID '%v', total clients: %v \\n\", client.sessID, len(b.clients))\n\tb.mu.RUnlock()\n\n\t// Write the session ID as the first message for using with 'UpdateSubscriptionsHandler'\n\td, _ := json.Marshal(map[string]interface{}{\"topic\": \"/httpsub/config\", \"payload\": map[string]string{\"session_id\": client.sessID}})\n\tfmt.Fprintf(w, \"data: %s\\n\\n\", d)\n\tflusher.Flush()\n\n\tctx := r.Context()\n\n\tfor _, topic := range topics {\n\t\tif err := b.subscriptionBroker.SubscribeClient(client, topic); err != nil {\n\t\t\tlog.Println(\"Error:\", err)\n\n\t\t\td, _ := json.Marshal(map[string]interface{}{\n\t\t\t\t\"error\": map[string]string{\n\t\t\t\t\t\"code\": \"subscription-failure\",\n\t\t\t\t\t\"message\": fmt.Sprintf(\"Cannot subscribe to topic %v\", topic),\n\t\t\t\t},\n\t\t\t})\n\t\t\tfmt.Fprintf(w, \"data: %s\\n\\n\", d)\n\t\t\tflusher.Flush()\n\t\t}\n\t}\n\n\tgo func() {\n\t\t<-ctx.Done()\n\n\t\tvar wg sync.WaitGroup\n\n\t\tfor topic := range client.topics {\n\t\t\twg.Add(1)\n\t\t\tgo func(t string) {\n\t\t\t\tb.subscriptionBroker.UnsubscribeClient(ctx, client, t)\n\t\t\t\twg.Done()\n\t\t\t}(topic)\n\t\t}\n\n\t\twg.Wait()\n\n\t\tb.mu.Lock()\n\t\tdelete(b.clients, client.sessID)\n\t\tb.mu.Unlock()\n\n\t\tb.mu.RLock()\n\t\tlog.Printf(\"Client removed '%v', total clients: %v \\n\", client.sessID, len(b.clients))\n\t\tb.mu.RUnlock()\n\t}()\n\n\tfor {\n\t\tfmt.Fprintf(w, \"data: %s\\n\\n\", <-client.writeChannel)\n\t\tflusher.Flush()\n\t}\n\n}", "func RegisterHandlers(router EchoRouter, si ServerInterface) {\n\n\twrapper := ServerInterfaceWrapper{\n\t\tHandler: si,\n\t}\n\n\trouter.GET(\"/v1/api/claims\", wrapper.GetClaims)\n\trouter.POST(\"/v1/api/claims\", wrapper.CreateClaim)\n\trouter.GET(\"/v1/api/claims/find\", wrapper.FindClaimByName)\n\trouter.DELETE(\"/v1/api/claims/:id\", wrapper.DeleteClaim)\n\trouter.GET(\"/v1/api/claims/:id\", wrapper.GetClaim)\n\trouter.PUT(\"/v1/api/claims/:id\", wrapper.UpdateClaim)\n\trouter.GET(\"/v1/api/scopes\", wrapper.GetScopes)\n\trouter.POST(\"/v1/api/scopes\", wrapper.CreateScope)\n\trouter.GET(\"/v1/api/scopes/find\", wrapper.FindScopeByName)\n\trouter.DELETE(\"/v1/api/scopes/:id\", wrapper.DeleteScope)\n\trouter.GET(\"/v1/api/scopes/:id\", wrapper.GetScope)\n\trouter.PUT(\"/v1/api/scopes/:id\", wrapper.UpdateScope)\n\trouter.POST(\"/v1/api/scopes/:id/claim\", wrapper.AddClaimToScope)\n\trouter.DELETE(\"/v1/api/scopes/:id/claim/:claimId\", wrapper.RemoveClaimFromScope)\n\trouter.GET(\"/v1/api/secretchannels\", wrapper.GetSecretChannels)\n\trouter.POST(\"/v1/api/secretchannels\", wrapper.CreateSecretChannel)\n\trouter.GET(\"/v1/api/secretchannels/find/algouse\", wrapper.FindSecretChannelByAlgouse)\n\trouter.GET(\"/v1/api/secretchannels/find/name\", wrapper.FindSecretChannelByName)\n\trouter.DELETE(\"/v1/api/secretchannels/:id\", wrapper.DeleteSecretChannel)\n\trouter.GET(\"/v1/api/secretchannels/:id\", wrapper.GetSecretChannel)\n\trouter.POST(\"/v1/api/secretchannels/:id\", wrapper.RenewSecretChannel)\n\trouter.GET(\"/v1/api/serviceproviders\", wrapper.GetServiceProviders)\n\trouter.POST(\"/v1/api/serviceproviders\", wrapper.CreateServiceProvider)\n\trouter.GET(\"/v1/api/serviceproviders/find\", wrapper.FindServiceProvider)\n\trouter.DELETE(\"/v1/api/serviceproviders/:id\", wrapper.DeleteServiceProvider)\n\trouter.GET(\"/v1/api/serviceproviders/:id\", wrapper.GetServiceProvider)\n\trouter.PATCH(\"/v1/api/serviceproviders/:id\", wrapper.PatchServiceProvider)\n\trouter.PUT(\"/v1/api/serviceproviders/:id\", wrapper.UpdateServiceProvider)\n\trouter.GET(\"/v1/api/serviceproviders/:id/credentials\", wrapper.GetCredentials)\n\trouter.POST(\"/v1/api/serviceproviders/:id/credentials\", wrapper.GenerateCredentials)\n\trouter.POST(\"/v1/api/serviceproviders/:id/status\", wrapper.UpdateServiceProviderStatus)\n\trouter.GET(\"/v1/api/users\", wrapper.GetUsers)\n\trouter.POST(\"/v1/api/users\", wrapper.CreateUser)\n\trouter.GET(\"/v1/api/users/find\", wrapper.FindUser)\n\trouter.POST(\"/v1/api/users/recover/password\", wrapper.InitiatePasswordRecovery)\n\trouter.PUT(\"/v1/api/users/recover/password\", wrapper.ResetUserPassword)\n\trouter.DELETE(\"/v1/api/users/:id\", wrapper.DeleteUser)\n\trouter.GET(\"/v1/api/users/:id\", wrapper.GetUser)\n\trouter.PUT(\"/v1/api/users/:id\", wrapper.UpdateUser)\n\trouter.POST(\"/v1/api/users/:id/password\", wrapper.ChangeUserPassword)\n\trouter.POST(\"/v1/api/users/:id/status\", wrapper.UpdateUserStatus)\n\n}", "func init() {\n //make user\n UsersMux.HandleFunc(\"/users/make\", make)\n //get user info\n UsersMux.HandleFunc(\"/users/activate\", activate)\n //login user\n UsersMux.HandleFunc(\"/users/login\", login)\n}", "func init() {\n\t_ = router.Register(\"httprouter\", New)\n}", "func ExampleRDS_DescribeEventSubscriptions_shared00() {\n\tsvc := rds.New(session.New())\n\tinput := &rds.DescribeEventSubscriptionsInput{}\n\n\tresult, err := svc.DescribeEventSubscriptions(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase rds.ErrCodeSubscriptionNotFoundFault:\n\t\t\t\tfmt.Println(rds.ErrCodeSubscriptionNotFoundFault, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func searchSubscriptions(writer http.ResponseWriter, request *http.Request) {\n\tcontactValue := request.FormValue(\"contact\")\n\tsubscriptions, err := controller.GetSubscriptionsByContactValue(database, contactValue)\n\tif err != nil {\n\t\t_ = render.Render(writer, request, err)\n\t\treturn\n\t}\n\tif err := render.Render(writer, request, subscriptions); err != nil {\n\t\t_ = render.Render(writer, request, api.ErrorRender(err))\n\t\treturn\n\t}\n}", "func registerRoutes(jwtMiddleware *jwtmiddleware.JWTMiddleware) *mux.Router {\n\tr := mux.NewRouter()\n\n\tr.Handle(\"/healthcheck\", http.HandlerFunc(healthCheck)).Methods(\"GET\")\n\n\tr.Handle(\"/message\", http.HandlerFunc(message)).Methods(\"POST\")\n\tr.Handle(\"/message/{id}\", http.HandlerFunc(messageDelete)).Methods(\"DELETE\")\n\tr.Handle(\"/publish\", http.HandlerFunc(publish)).Methods(\"POST\")\n\n\tmsgRouter := mux.NewRouter().PathPrefix(\"/message\").Subrouter()\n\tpubRouter := mux.NewRouter().PathPrefix(\"/publish\").Subrouter()\n\n\tr.PathPrefix(\"/message\").Handler(negroni.New(\n\t\tnegroni.HandlerFunc(jwtMiddleware.HandlerWithNext),\n\t\tnegroni.Wrap(msgRouter),\n\t))\n\n\tr.PathPrefix(\"/publish\").Handler(negroni.New(\n\t\tnegroni.HandlerFunc(jwtMiddleware.HandlerWithNext),\n\t\tnegroni.Wrap(pubRouter),\n\t))\n\n\t// GET - handles upgrading http/https connections to ws/wss.\n\t// the JWT middleware is expecting an access_token\n\t// query parameter within the request\n\tr.Handle(\"/ws\", negroni.New(\n\t\tnegroni.HandlerFunc(jwtMiddleware.HandlerWithNext),\n\t\tnegroni.HandlerFunc(AddUserID),\n\t\tnegroni.Wrap(broker),\n\t))\n\n\treturn r\n}", "func (c *CartServer) InitializeHandler() http.Handler {\n\trouter := mux.NewRouter()\n\n\trouter.HandleFunc(\"/v1/item\", c.handler.AddItem()).Methods(\"POST\")\n\trouter.HandleFunc(\"/v1/item\", c.handler.RemoveItem()).Methods(\"DELETE\")\n\trouter.HandleFunc(\"/v1/cart/totalCost\", c.handler.TotalCost()).Methods(\"GET\")\n\trouter.HandleFunc(\"/v1/cart/items\", c.handler.GetItems()).Methods(\"GET\")\n\treturn router\n}", "func (s *subscriberdbServicer) ListSubscribers(ctx context.Context, req *lte_protos.ListSubscribersRequest) (*lte_protos.ListSubscribersResponse, error) {\n\tgateway := protos.GetClientGateway(ctx)\n\tif gateway == nil {\n\t\treturn nil, status.Errorf(codes.PermissionDenied, \"missing gateway identity\")\n\t}\n\tif !gateway.Registered() {\n\t\treturn nil, status.Errorf(codes.PermissionDenied, \"gateway is not registered\")\n\t}\n\tnetworkID := gateway.NetworkId\n\n\tapnsByName, apnResourcesByAPN, err := loadAPNs(gateway)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsubProtos, nextToken, err := subscriberdb.LoadSubProtosPage(req.PageSize, req.PageToken, networkID, apnsByName, apnResourcesByAPN)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tflatDigest := &lte_protos.Digest{Md5Base64Digest: \"\"}\n\tperSubDigests := []*lte_protos.SubscriberDigestWithID{}\n\t// The digests are sent back during the request for the first page of subscriber data\n\tif req.PageToken == \"\" {\n\t\tflatDigest, _ = s.getDigestInfo(&lte_protos.Digest{Md5Base64Digest: \"\"}, networkID)\n\t\tperSubDigests, err = s.perSubDigestStore.GetDigest(networkID)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Failed to get per-sub digests from store for network %+v: %+v\", networkID, err)\n\t\t}\n\t}\n\n\tlistRes := &lte_protos.ListSubscribersResponse{\n\t\tSubscribers: subProtos,\n\t\tNextPageToken: nextToken,\n\t\tFlatDigest: flatDigest,\n\t\tPerSubDigests: perSubDigests,\n\t}\n\treturn listRes, nil\n}", "func initRouter() *mux.Router {\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"/pokemans\", GetPokemans).Methods(\"GET\")\n\n\treturn r\n\n}", "func ensureSubscribe(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\t\tvar channelUUID string\n\t\tvar terminal string\n\n\t\tif channelUUID = r.FormValue(\"channel_uuid\"); len(strings.TrimSpace(channelUUID)) == 0 {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\tlog.Printf(\"<< %s %s %v\", r.Method, r.URL.Path, time.Since(start))\n\t\t\treturn\n\t\t}\n\n\t\tif terminal = r.FormValue(\"terminal_uuid\"); len(strings.TrimSpace(terminal)) == 0 {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\tlog.Printf(\"<< %s %s %v\", r.Method, r.URL.Path, time.Since(start))\n\t\t\treturn\n\t\t}\n\n\t\tif _, ok := orderClient.channels[channelUUID]; !ok {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\tlog.Printf(\"<< %s %s %v\", r.Method, r.URL.Path, time.Since(start))\n\t\t\treturn\n\t\t}\n\n\t\t//fazer outras verificacoes para garantir EC, etc\n\t\tchannel := orderClient.channels[channelUUID]\n\t\tchannel.Terminals[terminal] = &Terminal{\n\t\t\tUUID: terminal,\n\t\t\tSub: orderClient.redisClient.Subscribe(channelUUID),\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\t\tlog.Printf(\"<< %s %s %v\", r.Method, r.URL.Path, time.Since(start))\n\t})\n}", "func init() {\n\trefreshPolicies()\n}" ]
[ "0.6481434", "0.59553516", "0.54364514", "0.5402741", "0.53732777", "0.5366589", "0.53303486", "0.53290343", "0.5289545", "0.5288495", "0.52735746", "0.5230477", "0.5226967", "0.5205135", "0.5198127", "0.5193747", "0.5155629", "0.5143773", "0.5122913", "0.5113694", "0.51102215", "0.51069415", "0.50873786", "0.5085441", "0.5078671", "0.5066163", "0.5055772", "0.50490314", "0.50225717", "0.5019285", "0.50179976", "0.50030965", "0.50001067", "0.49812475", "0.4974981", "0.49697334", "0.49679396", "0.49637955", "0.49501058", "0.4946727", "0.4935893", "0.49355745", "0.4925152", "0.49117476", "0.4894216", "0.48931965", "0.48730806", "0.48730806", "0.48716858", "0.48706806", "0.48678392", "0.4858433", "0.48529196", "0.4849373", "0.48360565", "0.48294038", "0.48261252", "0.48244885", "0.48233888", "0.4812778", "0.48093268", "0.48067755", "0.48024672", "0.4802287", "0.4798418", "0.47975293", "0.47954416", "0.47949892", "0.4793115", "0.47921622", "0.47911733", "0.47890246", "0.4785105", "0.47842428", "0.4782731", "0.4780428", "0.4777032", "0.47763652", "0.47707382", "0.47674748", "0.4761048", "0.47558355", "0.47520158", "0.47414175", "0.4737062", "0.47284505", "0.4727539", "0.47202513", "0.4714879", "0.47140446", "0.46999988", "0.46969438", "0.46948913", "0.4692955", "0.46825594", "0.46824116", "0.46821895", "0.4681199", "0.46783072", "0.4678205" ]
0.8779293
0
convert entities to a response
func getLocaleUpdate(localeId string) (map[string]interface{}, error) { resp := map[string]interface{}{} entities, err := model.GetEntitiesAtLocale(localeId) if err != nil { return resp, fmt.Errorf("error retrieving entities to broadcast for locale %v: %v", localeId, err) } resp["entities"] = entities if err != nil { return resp, fmt.Errorf("error marshalling response: %v", err) } return resp, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r Response) AsEntities() (*Entities, bool) {\n\treturn nil, false\n}", "func (rb ResponseBase) AsEntities() (*Entities, bool) {\n\treturn nil, false\n}", "func (r Response) WriteEntity(value interface{}) Response {\n\tif \"\" == r.accept || \"*/*\" == r.accept {\n\t\tfor _, each := range r.produces {\n\t\t\tif MIME_JSON == each {\n\t\t\t\tr.WriteAsJson(value)\n\t\t\t\treturn r\n\t\t\t}\n\t\t\tif MIME_XML == each {\n\t\t\t\tr.WriteAsXml(value)\n\t\t\t\treturn r\n\t\t\t}\n\t\t}\n\t} else { // Accept header specified ; scan for each element in Route.Produces\n\t\tfor _, each := range r.produces {\n\t\t\tif strings.Index(r.accept, each) != -1 {\n\t\t\t\tif MIME_JSON == each {\n\t\t\t\t\tr.WriteAsJson(value)\n\t\t\t\t\treturn r\n\t\t\t\t}\n\t\t\t\tif MIME_XML == each {\n\t\t\t\t\tr.WriteAsXml(value)\n\t\t\t\t\treturn r\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif DefaultResponseMimeType == MIME_JSON {\n\t\tr.WriteAsJson(value)\n\t} else if DefaultResponseMimeType == MIME_XML {\n\t\tr.WriteAsXml(value)\n\t} else {\n\t\tr.WriteHeader(http.StatusNotAcceptable)\n\t\tr.Write([]byte(\"406: Not Acceptable\"))\n\t}\n\treturn r\n}", "func encodeGetByMultiCriteriaResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}", "func (e Entities) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func DefaultEntityHandler(rsp http.ResponseWriter, req *Request, status int, content interface{}) error {\n switch e := content.(type) {\n \n case nil:\n rsp.WriteHeader(status)\n \n case Entity:\n rsp.Header().Add(\"Content-Type\", e.ContentType())\n rsp.WriteHeader(status)\n \n n, err := io.Copy(rsp, e)\n if err != nil {\n return fmt.Errorf(\"Could not write entity: %v\\nIn response to: %v %v\\nEntity: %d bytes written\", err, req.Method, req.URL, n)\n }\n \n case json.RawMessage:\n rsp.Header().Add(\"Content-Type\", \"application/json\")\n rsp.WriteHeader(status)\n \n _, err := rsp.Write([]byte(e))\n if err != nil {\n return fmt.Errorf(\"Could not write entity: %v\\nIn response to: %v %v\\nEntity: %d bytes\", err, req.Method, req.URL, len(e))\n }\n \n default:\n rsp.Header().Add(\"Content-Type\", \"application/json\")\n rsp.WriteHeader(status)\n \n data, err := json.Marshal(content)\n if err != nil {\n return fmt.Errorf(\"Could not marshal entity: %v\\nIn response to: %v %v\", err, req.Method, req.URL)\n }\n \n _, err = rsp.Write(data)\n if err != nil {\n return fmt.Errorf(\"Could not write entity: %v\\nIn response to: %v %v\\nEntity: %d bytes\", err, req.Method, req.URL, len(data))\n }\n \n }\n return nil\n}", "func (sr SearchResponse) AsEntities() (*Entities, bool) {\n\treturn nil, false\n}", "func encodeGetByCreteriaResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}", "func (analyzer *Analyzer) Entities(flavor, payload string, options url.Values) (*EntitiesResponse, error) {\n\tif !entryPoints.hasFlavor(\"entities\", flavor) {\n\t\treturn nil, errors.New(fmt.Sprintf(\"entities info for %s not available\", flavor))\n\t}\n\n\toptions.Add(flavor, payload)\n\turl := entryPoints.urlFor(analyzer.baseUrl, \"entities\", flavor)\n\tdata, err := analyzer.analyze(url, options, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tresponse := new(EntitiesResponse)\n\t\terr := json.Unmarshal(data, &response)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tif response.Status != \"OK\" {\n\t\t\t\treturn nil, errors.New(response.StatusInfo)\n\t\t\t} else {\n\t\t\t\treturn response, nil\n\t\t\t}\n\t\t}\n\t}\n}", "func UnprocessabeEntityResponse(w http.ResponseWriter, ers validation.Errors) error {\n\tw.WriteHeader(http.StatusUnprocessableEntity)\n\n\tver := validationResponse{\n\t\tMessage: ers.Error(),\n\t\tErrors: ers,\n\t}\n\n\tdata, err := ver.MarshalJSON()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"marshal json\")\n\t}\n\tif _, err := w.Write(data); err != nil {\n\t\treturn errors.Wrap(err, \"write response\")\n\t}\n\n\treturn nil\n}", "func (s *Service) sendEntity(rsp http.ResponseWriter, req *Request, status int, headers map[string]string, content interface{}) {\n \n if headers != nil {\n for k, v := range headers {\n rsp.Header().Add(k, v)\n }\n }\n if ua := s.userAgent; ua != \"\" {\n rsp.Header().Add(\"User-Agent\", ua)\n }\n \n var err error\n if s.entityHandler != nil {\n err = s.entityHandler(rsp, req, status, content)\n }else{\n err = DefaultEntityHandler(rsp, req, status, content)\n }\n if err != nil {\n alt.Errorf(\"%s: %v\", s.name, err)\n return\n }\n \n}", "func encodeGetDealByStateResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}", "func (o ObjectAccessControlResponseOutput) Entity() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ObjectAccessControlResponse) string { return v.Entity }).(pulumi.StringOutput)\n}", "func encodeGetUserDealByStateResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}", "func (r *Resolver) Entity() generated.EntityResolver { return &entityResolver{r} }", "func (r *Resolver) Entity() generated.EntityResolver { return &entityResolver{r} }", "func (e Entities) AsBasicResponse() (BasicResponse, bool) {\n\treturn &e, true\n}", "func (client *IncidentsClient) listEntitiesHandleResponse(resp *http.Response) (IncidentsClientListEntitiesResponse, error) {\n\tresult := IncidentsClientListEntitiesResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IncidentEntitiesResponse); err != nil {\n\t\treturn IncidentsClientListEntitiesResponse{}, err\n\t}\n\treturn result, nil\n}", "func encodeGetDealByDIDResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}", "func (o BucketAccessControlResponseOutput) Entity() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BucketAccessControlResponse) string { return v.Entity }).(pulumi.StringOutput)\n}", "func (r *Resolver) Entity() EntityResolver { return &entityResolver{r} }", "func allArticles(w http.ResponseWriter, r *http.Request) {\n w.Header().Set(\"Content-Type\", \"application/json\")\n json.NewEncoder(w).Encode(articles)\n}", "func (o BucketOwnerResponseOutput) Entity() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BucketOwnerResponse) string { return v.Entity }).(pulumi.StringOutput)\n}", "func responseList (w http.ResponseWriter, response *model.Response, invoices *model.InvoicesResponse) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tvar err error\n\tif response.Code < 0{\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\terr = json.NewEncoder(w).Encode(&response)\n\t}else{\n\t\tw.WriteHeader(http.StatusOK)\n\t\terr = json.NewEncoder(w).Encode(&invoices)\n\t}\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (client ModelClient) ListEntitiesResponder(resp *http.Response) (result ListEntityExtractor, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (e Entities) AsResponseBase() (*ResponseBase, bool) {\n\treturn nil, false\n}", "func (r renderer) Entity(out *bytes.Buffer, entity []byte) {\n\tout.Write(entity)\n}", "func (o BucketObjectOwnerResponseOutput) Entity() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BucketObjectOwnerResponse) string { return v.Entity }).(pulumi.StringOutput)\n}", "func encodeGetUserResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}", "func ToJSON(entity Entity) *bytes.Buffer {\n\treturn common.ToJSON(entity)\n}", "func encodeGetAllIndustriesResponse(_ context.Context, response interface{}) (interface{}, error) {\n\tres := response.(endpoints.GetAllIndustriesResponse)\n\terr := getError(res.Err)\n\tif err == nil {\n\t\tvar industries []*pb.Industry\n\t\tfor _, industry := range res.Industries {\n\t\t\tindustries = append(industries, industry.ToProto())\n\t\t}\n\t\treturn &pb.GetAllIndustriesResponse{Industries: industries}, nil\n\t}\n\treturn nil, err\n}", "func (e Entities) MarshalJSON() ([]byte, error) {\n\te.Type = TypeEntities\n\tobjectMap := make(map[string]interface{})\n\tif e.QueryScenario != \"\" {\n\t\tobjectMap[\"queryScenario\"] = e.QueryScenario\n\t}\n\tif e.Value != nil {\n\t\tobjectMap[\"value\"] = e.Value\n\t}\n\tif e.QueryContext != nil {\n\t\tobjectMap[\"queryContext\"] = e.QueryContext\n\t}\n\tif e.ContractualRules != nil {\n\t\tobjectMap[\"contractualRules\"] = e.ContractualRules\n\t}\n\tif e.WebSearchURL != nil {\n\t\tobjectMap[\"webSearchUrl\"] = e.WebSearchURL\n\t}\n\tif e.ID != nil {\n\t\tobjectMap[\"id\"] = e.ID\n\t}\n\tif e.Type != \"\" {\n\t\tobjectMap[\"_type\"] = e.Type\n\t}\n\treturn json.Marshal(objectMap)\n}", "func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {\n\t// Set JSON type\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\n\t// Check error\n\tif e, ok := response.(errorer); ok {\n\t\t// This is a errorer class, now check for error\n\t\tif err := e.error(); err != nil {\n\t\t\tencodeError(ctx, e.error(), w)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// cast to dataHolder to get Data, otherwise just encode the resposne\n\tif holder, ok := response.(dataHolder); ok {\n\t\treturn json.NewEncoder(w).Encode(holder.getData())\n\t} else {\n\t\treturn json.NewEncoder(w).Encode(response)\n\t}\n}", "func (a Answer) AsEntities() (*Entities, bool) {\n\treturn nil, false\n}", "func toForecastResponse(domainFC *domain.Forecast) *models.Forecast {\n\tmodel := &models.Forecast{\n\t\tTemperature: formatTemp(domainFC.Temperature),\n\t\tWind: formatWind(domainFC.WindSpeed, domainFC.WindDegree),\n\t\tPressure: formatPressure(domainFC.Pressure),\n\t\tHumidity: formatHumidity(domainFC.Humidity),\n\t\tSunrise: formatSunriseOrSunset(domainFC.Sunrise),\n\t\tSunset: formatSunriseOrSunset(domainFC.Sunset),\n\t\tDate: time.Unix(int64(domainFC.DateTime), 0).Format(\"2006-02-01 15:04:05\"),\n\t}\n\treturn model\n}", "func encodeGetByIDResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}", "func encodeGetByIDResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}", "func Response(c *gin.Context, status int, body interface{}) {\n\taccType := c.GetHeader(\"Accept\")\n\tif accType == \"application/xml\" {\n\t\tc.XML(status, body)\n\t\treturn\n\t}\n\tc.JSON(status, body)\n}", "func encodeCreatePostResponse(ctx context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\tif err != nil {\n\t\tlogrus.Warn(err.Error())\n\t}\n\treturn\n}", "func encodePostAcceptDealResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}", "func (s *SaleResponse) FormatResponse() *g.Response {\n\tresponse := new(g.Response)\n\tresponse.Acquirer = Name\n\n\tif s.OrderResult != nil {\n\t\tresponse.Id = s.OrderResult.OrderReference\n\t\tresponse.AuthorizationCode = s.OrderResult.OrderKey\n\t}\n\n\t// If CreditCard\n\tif len(s.CreditCardTransactionResultCollection) > 0 {\n\t\ttransaction := s.CreditCardTransactionResultCollection[0]\n\n\t\tresponse.Amount = transaction.AmountInCents\n\t\t//response.CreditCard = &g.CreditCard{}\n\t\tresponse.NSU = transaction.UniqueSequentialNumber\n\t\tresponse.TID = transaction.TransactionIdentifier\n\t}\n\n\t// If BankingBillet\n\tif len(s.BoletoTransactionResultCollection) > 0 {\n\t\ttransaction := s.BoletoTransactionResultCollection[0]\n\n\t\tresponse.Amount = transaction.AmountInCents\n\t\tresponse.BarCode = transaction.Barcode\n\t\tresponse.BoletoUrl = transaction.BoletoUrl\n\t}\n\n\treturn response\n}", "func encodeGetTagResponse(ctx context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\tif err != nil {\n\t\t\tlogrus.Warn(err.Error())\n\t\t}\n\treturn\n}", "func encodeResponse(w http.ResponseWriter, resp interface{}) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(resp)\n}", "func (i Intangible) AsEntities() (*Entities, bool) {\n\treturn nil, false\n}", "func encodePutDealStateResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}", "func encodeGetAllJobPostsResponse(_ context.Context, response interface{}) (interface{}, error) {\n\tres := response.(endpoints.GetAllJobPostsResponse)\n\terr := getError(res.Err)\n\tif err == nil {\n\t\tvar jobPosts []*pb.JobPost\n\t\tfor _, jobPost := range res.JobPosts {\n\t\t\tjobPosts = append(jobPosts, jobPost.ToProto())\n\t\t}\n\t\treturn &pb.GetAllJobPostsResponse{JobPosts: jobPosts}, nil\n\t}\n\treturn nil, err\n}", "func Utf8ToEntity(entity string) string {\n\tif rune, size := utf8.DecodeRuneInString(entity); size != 0 {\n\t\treturn fmt.Sprintf(\"&#%d;\", rune)\n\t}\n\treturn entity\n}", "func getEntity(w http.ResponseWriter, r *http.Request, entity interface{}) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tif id == \"\" {\n\t\twriteErrorResponse(w, http.StatusBadRequest, \"id not provided\")\n\t\treturn\n\t}\n\n\tdata, err := doGetEntity(id, entity)\n\tif err != nil {\n\t\twriteErrorResponse(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\n\tif len(data) == 0 {\n\t\twriteErrorResponse(w, http.StatusNotFound, \"id not found\")\n\t\treturn\n\t}\n\n\twriteJSONResponse(w, http.StatusOK, entity)\n}", "func FormatResponse(transaccion *models.Transaccion) TransaccionResponse {\n\ttResponse := TransaccionResponse{}\n\ttResponse.ID = transaccion.ID\n\ttResponse.Fecha = transaccion.Fecha\n\ttResponse.Monto = transaccion.Monto\n\tif len(transaccion.NumeroTarjetaOrigen) == 16 {\n\t\ttResponse.NumeroTarjetaOrigen = utils.HideCard(transaccion.NumeroTarjetaOrigen)\n\t}\n\ttResponse.NumeroTarjetaDestino = utils.HideCard(transaccion.NumeroTarjetaDestino)\n\tif transaccion.Estado == 1{\n\t\ttResponse.Estado=\"Movimiento exitoso\"\n\t} else {\n\t\ttResponse.Estado=\"Failed\"\n\t}\n\n\tif transaccion.IDTipoTransaccion == 1 {\n\t\ttResponse.TipoTransaccion = \"DEPOSITO\"\n\t} else if transaccion.IDTipoTransaccion == 2 {\n\t\ttResponse.TipoTransaccion = \"TRANSFERENCIA\"\n\t}\n\n\ttitular,_:= models.GetClienteByNumeroTarjeta(transaccion.NumeroTarjetaOrigen)\n\ttResponse.TitularTarjeta = titular.Nombre\n\tif titular.ApellidoPaterno != \"\" {\n\t\ttResponse.TitularTarjeta+= \" \"+titular.ApellidoPaterno\n\t}\n\n\treturn tResponse\n}", "func (r Restaurant) AsEntities() (*Entities, bool) {\n\treturn nil, false\n}", "func (x respExt) ConvertExt(v interface{}) interface{} {\n\tresponse := v.(Response)\n\n\t// Assemble the \"over-the-wire\" response dictionary\n\twire := wireFormat{\n\t\t[]byte(\"id\"),\n\t\tuint64(response.ID),\n\t}\n\tif response.Result != nil {\n\t\twire = append(wire, []byte(\"result\"), response.Result)\n\t}\n\tif response.Error != \"\" {\n\t\terrorDict := make(map[string]string)\n\t\terrorDict[\"message\"] = response.Error\n\t\twire = append(wire, []byte(\"error\"), errorDict)\n\t}\n\n\t// Convert it to CBOR bytes\n\tvar resp []byte\n\tencoder := codec.NewEncoderBytes(&resp, x.cbor)\n\tencoder.MustEncode(wire)\n\treturn resp\n}", "func outputIdentities(o *edgeOptions, children []*gabs.Container, pagingInfo *paging) error {\n\tif o.OutputJSONResponse {\n\t\treturn nil\n\t}\n\n\tfor _, entity := range children {\n\t\tid, _ := entity.Path(\"id\").Data().(string)\n\t\tname, _ := entity.Path(\"name\").Data().(string)\n\t\ttypeName, _ := entity.Path(\"type.name\").Data().(string)\n\t\troleAttributes := entity.Path(\"roleAttributes\").String()\n\t\tif _, err := fmt.Fprintf(o.Out, \"id: %v name: %v type: %v role attributes: %v\\n\", id, name, typeName, roleAttributes); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tpagingInfo.output(o)\n\n\treturn nil\n}", "func encodeGetPostResponse(ctx context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\tif err != nil {\n\t\t\tlogrus.Warn(err.Error())\n\t\t}\n\treturn\n}", "func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tresp := response.(*common.XmidtResponse)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Header().Set(common.HeaderWPATID, ctx.Value(common.ContextKeyRequestTID).(string))\n\tcommon.ForwardHeadersByPrefix(\"\", resp.ForwardedHeaders, w.Header())\n\n\tw.WriteHeader(resp.Code)\n\t_, err = w.Write(resp.Body)\n\treturn\n}", "func encodeCreateTagResponse(ctx context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\tif err != nil {\n\t\t\tlogrus.Warn(err.Error())\n\t\t}\n\treturn\n}", "func (am *analysisMapper) toEntity(dto analysisDTO) entity.AnalysisResults {\n\treturn entity.AnalysisResults{\n\t\tID: uuid.MustParse(dto.ID),\n\t\tDateCreated: dto.CreatedAt,\n\t\tProjectName: dto.ProjectRef,\n\t\tProjectID: uuid.MustParse(dto.ProjectID),\n\t\tPipelineMiners: dto.Miners,\n\t\tPipelineSplitters: dto.Splitters,\n\t\tPipelineExpanders: dto.Expanders,\n\t\tFilesTotal: int(dto.Files.Total),\n\t\tFilesValid: int(dto.Files.Valid),\n\t\tFilesError: int(dto.Files.Failed),\n\t\tFilesErrorSamples: dto.Files.ErrorSamples,\n\t\tIdentifiersTotal: int(dto.Identifiers.Total),\n\t\tIdentifiersValid: int(dto.Identifiers.Valid),\n\t\tIdentifiersError: int(dto.Identifiers.Failed),\n\t\tIdentifiersErrorSamples: dto.Identifiers.ErrorSamples,\n\t}\n}", "func (client MultipleResponsesClient) Get200ModelA200NoneResponder(resp *http.Response) (result A, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n}", "func encodeDeleteResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}", "func encodeDeleteResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}", "func encodeDeletePostResponse(ctx context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\tif err != nil {\n\t\t\tlogrus.Warn(err.Error())\n\t\t}\n\treturn\n}", "func (client *GroupClient) getEntityTagHandleResponse(resp *http.Response) (GroupGetEntityTagResponse, error) {\n\tresult := GroupGetEntityTagResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif resp.StatusCode >= 200 && resp.StatusCode < 300 {\n\t\tresult.Success = true\n\t}\n\treturn result, nil\n}", "func (client MultipleResponsesClient) Get200ModelA400NoneResponder(resp *http.Response) (result A, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n}", "func (h Hotel) AsEntities() (*Entities, bool) {\n\treturn nil, false\n}", "func encodeUpdatePostResponse(ctx context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\tif err != nil {\n\t\t\tlogrus.Warn(err.Error())\n\t\t}\n\treturn\n}", "func encodeCreateUserResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}", "func NewResponseEntity() *ResponseEntity {\n\tthis := ResponseEntity{}\n\treturn &this\n}", "func encodeGenericResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\treturn json.NewEncoder(w).Encode(response)\n}", "func encodeCreateCompanyResponse(_ context.Context, response interface{}) (interface{}, error) {\n\tres := response.(endpoints.CreateCompanyResponse)\n\terr := getError(res.Err)\n\tif err == nil {\n\t\treturn res.Company.ToProto(), nil\n\t}\n\treturn nil, err\n}", "func (e Entities) AsBasicResponseBase() (BasicResponseBase, bool) {\n\treturn &e, true\n}", "func encodeGetAllKeyPersonsResponse(_ context.Context, response interface{}) (interface{}, error) {\n\tres := response.(endpoints.GetAllKeyPersonsResponse)\n\terr := getError(res.Err)\n\tif err == nil {\n\t\tvar keyPersons []*pb.KeyPerson\n\t\tfor _, keyPerson := range res.KeyPersons {\n\t\t\tkeyPersons = append(keyPersons, keyPerson.ToProto())\n\t\t}\n\t\treturn &pb.GetAllKeyPersonsResponse{KeyPersons: keyPersons}, nil\n\t}\n\treturn nil, err\n}", "func (s *serviceOutlite) EntityResults() (*[]models.ModelOutlet, schemas.SchemaDatabaseError) {\n\tres, err := s.outlet.EntityResults()\n\treturn res, err\n}", "func (client *APIClient) getEntityTagHandleResponse(resp *http.Response) (APIClientGetEntityTagResponse, error) {\n\tresult := APIClientGetEntityTagResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tresult.Success = resp.StatusCode >= 200 && resp.StatusCode < 300\n\treturn result, nil\n}", "func ToEntity(om *OrganizationMongo) *Organization {\n\tID := \"\"\n\tif !om.ID.IsZero() {\n\t\tID = om.ID.Hex()\n\t}\n\tIDsCategory := make([]string, 0)\n\tif om.IDsCategory != nil && len(om.IDsCategory) > 0 {\n\t\tfor _, id := range om.IDsCategory {\n\t\t\tIDsCategory = append(IDsCategory, id.Hex())\n\t\t}\n\t}\n\n\treturn &Organization{\n\t\tID: ID,\n\t\tName: om.Name,\n\t\tEmail: om.Email,\n\t\tPhone: om.Phone,\n\t\tStatus: om.Status,\n\t\tIDsCategory: IDsCategory,\n\t}\n}", "func (a Airport) AsEntities() (*Entities, bool) {\n\treturn nil, false\n}", "func (c *converter) ToEntity(in *model.FormationTemplate) (*Entity, error) {\n\tif in == nil {\n\t\treturn nil, nil\n\t}\n\tmarshalledApplicationTypes, err := json.Marshal(in.ApplicationTypes)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"while marshalling application types\")\n\t}\n\tmarshalledRuntimeTypes, err := json.Marshal(in.RuntimeTypes)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"while marshalling application types\")\n\t}\n\tmarshalledLeadingProductIDs, err := json.Marshal(in.LeadingProductIDs)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"while marshalling leading product IDs\")\n\t}\n\n\truntimeArtifactKind := repo.NewNullableString(nil)\n\tif in.RuntimeArtifactKind != nil {\n\t\tkind := string(*in.RuntimeArtifactKind)\n\t\truntimeArtifactKind = repo.NewNullableString(&kind)\n\t}\n\n\treturn &Entity{\n\t\tID: in.ID,\n\t\tName: in.Name,\n\t\tApplicationTypes: string(marshalledApplicationTypes),\n\t\tRuntimeTypes: repo.NewValidNullableString(string(marshalledRuntimeTypes)),\n\t\tRuntimeTypeDisplayName: repo.NewNullableString(in.RuntimeTypeDisplayName),\n\t\tRuntimeArtifactKind: runtimeArtifactKind,\n\t\tLeadingProductIDs: repo.NewValidNullableString(string(marshalledLeadingProductIDs)),\n\t\tTenantID: repo.NewNullableString(in.TenantID),\n\t\tSupportsReset: in.SupportsReset,\n\t}, nil\n}", "func encodeGetResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}", "func encodeGetResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}", "func (t *SimpleChaincode) updateEntities(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tlogger.Debug(\"Enter updateInvoices\")\n\tdefer logger.Debug(\"Exited updateInvoices\")\n\tvar entity []EntityMaster\n\n\terr := json.Unmarshal([]byte(args[0]), &entity)\n\tif err != nil {\n\t\tlogger.Error(\"Error unmarshing invoice json:\", err)\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tfor _, v := range entity {\n\t\tcn, err := getCN(stub)\n\t\tif err != nil{\n\t\t\tlogger.Debug(err)\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\t\tvar attr []string = []string{cn}\n\n\t\tpks, err := buildPK(stub, \"EntityMaster\", attr)\n\t\tif err != nil{\n\t\t\tlogger.Debug(err.Error())\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\t\t//logger.Debugf(\"Adding: %-v\", v)\n\t\tpk := pks\n\n\t\tvBytes, err := json.Marshal(v)\n\n\t\tif err != nil {\n\t\t\tlogger.Debug(\"error marshaling\", err)\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\t\tstub.PutState(pk, vBytes)\n\t}\n\n\treturn shim.Success(nil)\n}", "func (s *CategoriesSerializer) Response() CategoriesResponse{\n\t// get value of categories key from context\n\t// cast it to array of Category Model\n\tcategories := s.C.MustGet(\"categories\").([]Category)\n\n\t// create response schema\n\tresponse := CategoriesResponse{}\n\tfor _, c := range categories {\n\t\tserializer := CategorySerializer{s.C , c}\n\t\t// each category will be serialized by its own serializer\n\t\tresponse.Categories = append(response.Categories, serializer.Response())\n\t}\n\treturn response\n}", "func (client *SubscriptionClient) getEntityTagHandleResponse(resp *http.Response) (SubscriptionClientGetEntityTagResponse, error) {\n\tresult := SubscriptionClientGetEntityTagResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tresult.Success = resp.StatusCode >= 200 && resp.StatusCode < 300\n\treturn result, nil\n}", "func encodeBulkCreateJobPostResponse(_ context.Context, response interface{}) (interface{}, error) {\n\tres := response.(endpoints.BulkCreateJobPostResponse)\n\terr := getError(res.Err)\n\tif err == nil {\n\t\tvar jobPosts []*pb.JobPost\n\t\tfor _, jobPost := range res.JobPosts {\n\t\t\tjobPosts = append(jobPosts, jobPost.ToProto())\n\t\t}\n\t\treturn &pb.BulkCreateJobPostResponse{JobPosts: jobPosts}, nil\n\t}\n\treturn nil, err\n}", "func (mt MovieTheater) AsEntities() (*Entities, bool) {\n\treturn nil, false\n}", "func (client ModelClient) ListHierarchicalEntitiesResponder(resp *http.Response) (result ListHierarchicalEntityExtractor, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func encodeGetEventsResponse(_ context.Context, r interface{}) (interface{}, error) {\r\n\treturn nil, errors.New(\"'Events' Encoder is not impelemented\")\r\n}", "func (r Response) IsEntity() bool {\n\treturn r.isType(TypeEntity)\n}", "func (e entityYAMLAccess) Write(resp *restful.Response, status int, v interface{}) error {\n\treturn writeYAML(resp, status, e.ContentType, v)\n}", "func (fe FoodEstablishment) AsEntities() (*Entities, bool) {\n\treturn nil, false\n}", "func (client ModelClient) GetRegexEntityEntityInfoResponder(resp *http.Response) (result RegexEntityExtractor, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func EntityToUtf8(entity string) string {\n\tvar ok bool\n\tif ok = reg_entnamed.MatchString(entity); ok {\n\t\treturn namedEntityToUtf8(entity[1 : len(entity)-1])\n\t}\n\n\tif ok = reg_entnumeric.MatchString(entity); !ok {\n\t\treturn \"&amp;\" + entity[2:len(entity)-1] + \";\"\n\t}\n\n\tvar err os.Error\n\tvar num int\n\n\tentity = entity[2 : len(entity)-1]\n\tif num, err = strconv.Atoi(entity); err != nil {\n\t\treturn \"&amp;#\" + entity + \";\"\n\t}\n\n\tvar arr [4]byte\n\tif size := utf8.EncodeRune(arr[:], num); size == 0 {\n\t\treturn \"&amp;#\" + entity + \";\"\n\t}\n\n\treturn string(arr[:])\n}", "func (client *PortalConfigClient) getEntityTagHandleResponse(resp *http.Response) (PortalConfigClientGetEntityTagResponse, error) {\n\tresult := PortalConfigClientGetEntityTagResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tresult.Success = resp.StatusCode >= 200 && resp.StatusCode < 300\n\treturn result, nil\n}", "func (t Thing) AsEntities() (*Entities, bool) {\n\treturn nil, false\n}", "func (sra SearchResultsAnswer) AsEntities() (*Entities, bool) {\n\treturn nil, false\n}", "func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {\n\tif e, ok := response.(errorer); ok && e.error() != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\t\tw.WriteHeader(codeFrom(e.error()))\n\t\treturn marshalStructWithError(response, w)\n\t}\n\n\t// Used for pagination\n\tif e, ok := response.(counter); ok {\n\t\tw.Header().Set(\"X-Total-Count\", strconv.Itoa(e.count()))\n\t}\n\n\t// Don't overwrite a header (i.e. called from encodeTextResponse)\n\tif v := w.Header().Get(\"Content-Type\"); v == \"\" {\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\t\t// Only write json body if we're setting response as json\n\t\treturn json.NewEncoder(w).Encode(response)\n\t}\n\treturn nil\n}", "func (ft *DTDFormatter) RenderEntity(b DTD.IDTDBlock) string {\n\tvar m string\n\tvar eType string\n\tvar exportedStr string\n\tvar url string\n\n\textra := b.GetExtra()\n\n\tif extra.IsParameter {\n\t\tm = \" % \"\n\t} else {\n\t\tm = \" \"\n\t}\n\n\tif extra.IsPublic {\n\t\teType += \" PUBLIC \"\n\t}\n\tif extra.IsSystem {\n\t\teType += \" SYSTEM \"\n\t}\n\n\tif extra.IsExported {\n\t\texportedStr = join(\"\\n%\", b.GetName(), \";\")\n\t}\n\n\tif extra.Url != \"\" {\n\t\turl = renderQuoted(extra.Url)\n\t}\n\n\treturn join(\"<!ENTITY\", m, b.GetName(), \" \", eType, \"\\\"\\n\", ft.delimitter, b.GetValue(), \"\\n\\\"\", url, \">\", exportedStr)\n}", "func (handler *sLegacyHandlers) emitLegacyResponse(ctx *gin.Context, resp *http.Response) {\n\tif resp == nil {\n\t\tctx.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"error\": \"Internal request error\",\n\t\t})\n\t\treturn\n\t}\n\n\tif resp.StatusCode >= 500 && resp.StatusCode <= 599 {\n\t\tctx.JSON(http.StatusServiceUnavailable, gin.H{\n\t\t\t\"error\": \"Legacy server error\",\n\t\t})\n\t\treturn\n\t}\n\n\tif resp.StatusCode >= 400 && resp.StatusCode <= 499 {\n\t\tctx.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\"error\": \"Legacy server bad request\",\n\t\t})\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, resp.Body)\n}", "func ResponseWriter(customResponse model.CustomResponse, transform string, cc *model.CustomContext) error {\n\tvar statusCode int\n\tstatusCode = customResponse.StatusCode\n\n\tresponseBody := customResponse.Body\n\tresponseHeader := customResponse.Header\n\n\tif customResponse.Error != nil {\n\t\tlog.Error(\"response error : \", customResponse.Error.Error())\n\t\tresponseBody[\"error\"] = customResponse.Error.Error()\n\t}\n\n\tSetHeaderResponse(responseHeader, cc)\n\tif statusCode == 0 {\n\t\tlog.Warn(\"Status Code is not defined, set Status code to 4000\")\n\t\tstatusCode = 400\n\t}\n\n\tswitch strings.ToLower(transform) {\n\tcase strings.ToLower(\"ToJson\"):\n\t\treturn cc.JSON(statusCode, responseBody)\n\tcase strings.ToLower(\"ToXml\"):\n\n\t\tresByte, err := service.ToXml(responseBody)\n\n\t\tif err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t\tres := make(map[string]interface{})\n\t\t\tres[\"message\"] = err.Error()\n\t\t\treturn cc.XML(500, res)\n\t\t}\n\t\treturn cc.XMLBlob(statusCode, resByte)\n\tdefault:\n\t\treturn cc.JSON(statusCode, responseBody)\n\t}\n}", "func (i Identifiable) AsEntities() (*Entities, bool) {\n\treturn nil, false\n}", "func encodeUpdateTagResponse(ctx context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\tif err != nil {\n\t\t\tlogrus.Warn(err.Error())\n\t\t}\n\treturn\n}", "func (a *App) GetEntities(w http.ResponseWriter, r *http.Request) {\n\tcount := DefaultEntityListSize\n\tcStr := r.URL.Query().Get(\"count\")\n\tif cStr != \"\" {\n\t\tcount, _ = strconv.Atoi(cStr)\n\t\tif count < 1 {\n\t\t\tcount = DefaultEntityListSize\n\t\t}\n\t}\n\n\tfilter := r.URL.Query().Get(\"filter\")\n\tentities, err := getEntities(a.DB, count, filter)\n\n\tif err != nil {\n\t\trespondWithError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\trespondWithJSON(w, http.StatusOK, entities)\n}", "func actionEntityToModel(actions []Entity.ResourceActionEntity) []models.ResourceActionV1Model {\n\n\tresourceActionModels := []models.ResourceActionV1Model{}\n\n\tfor _, action := range actions {\n\t\tresourceActionModel := models.ResourceActionV1Model{\n\t\t\tResourceUuid: action.ResourceUuid,\n\t\t\tResourceType: action.ResourceType,\n\t\t\tSource: action.Source,\n\t\t\tActionType: action.ActionType,\n\t\t\tVersion: action.Version,\n\t\t\tTarget: action.Target,\n\t\t\tStatus: action.Status,\n\t\t\tStatusUpdatedOn: action.StatusUpdatedOn,\n\t\t}\n\n\t\tresourceActionModels = append(resourceActionModels, resourceActionModel)\n\t}\n\n\treturn resourceActionModels\n}", "func (client *ProductPolicyClient) getEntityTagHandleResponse(resp *http.Response) (ProductPolicyClientGetEntityTagResponse, error) {\n\tresult := ProductPolicyClientGetEntityTagResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tresult.Success = resp.StatusCode >= 200 && resp.StatusCode < 300\n\treturn result, nil\n}" ]
[ "0.6419725", "0.6268671", "0.6093652", "0.60556287", "0.6050197", "0.5891716", "0.58705986", "0.5824377", "0.57977885", "0.57548875", "0.57182443", "0.5651574", "0.5629503", "0.5601155", "0.5591449", "0.5591449", "0.5590175", "0.5575622", "0.551894", "0.55042535", "0.5479917", "0.5468321", "0.54293805", "0.54274017", "0.54217994", "0.53784424", "0.53391397", "0.5318103", "0.53163856", "0.5316191", "0.530797", "0.5269427", "0.526228", "0.52611816", "0.525636", "0.5237438", "0.5237438", "0.5234867", "0.52332056", "0.5203819", "0.51884574", "0.51833606", "0.5177019", "0.51768214", "0.5151578", "0.51501113", "0.5149873", "0.51328045", "0.51258796", "0.51232123", "0.51133317", "0.5099644", "0.5097989", "0.50882703", "0.50853145", "0.5049829", "0.50307125", "0.50273335", "0.50273335", "0.5026564", "0.5021683", "0.5012546", "0.5011622", "0.5011597", "0.5006942", "0.5002598", "0.50015295", "0.49934664", "0.4989149", "0.4987601", "0.49764815", "0.4975493", "0.49701345", "0.49696964", "0.49609107", "0.49585724", "0.49585724", "0.49513036", "0.4949193", "0.49456996", "0.49445114", "0.4930143", "0.49199226", "0.4904919", "0.49043357", "0.4901791", "0.48961568", "0.48954484", "0.48933622", "0.48856804", "0.4884446", "0.48794603", "0.48769605", "0.48732898", "0.4872412", "0.48688805", "0.48672605", "0.48638612", "0.48608148", "0.48581827", "0.48549253" ]
0.0
-1
/ Notes: Only allow entities manipulate locales they exist in
func broadcastLocale(localeId string, stop chan bool) error { ticker := time.NewTicker(BroadcastInterval * time.Second) for { select { case <-ticker.C: // todo ensure the query never takes more than BroadcastInterval time resp, _ := getLocaleUpdate(localeId) err := BroadcastToLocale(localeId, resp) if err != nil { log.Warnf("Error broadcasting: %v", err) } case <-stop: ticker.Stop() log.Debug("Manually stopping broadcast") } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func getLocaleUpdate(localeId string) (map[string]interface{}, error) {\n\tresp := map[string]interface{}{}\n\n\tentities, err := model.GetEntitiesAtLocale(localeId)\n\tif err != nil {\n\t\treturn resp, fmt.Errorf(\"error retrieving entities to broadcast for locale %v: %v\", localeId, err)\n\t}\n\n\tresp[\"entities\"] = entities\n\tif err != nil {\n\t\treturn resp, fmt.Errorf(\"error marshalling response: %v\", err)\n\t}\n\n\treturn resp, nil\n}", "func Locale(v string) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldLocale), v))\n\t})\n}", "func localesCoreEnToml() (*asset, error) {\n\tpath := \"/Users/ravipradhan/Documents/personal-projects/test-modules/render/assets/locales/core.en.toml\"\n\tname := \"locales/core.en.toml\"\n\tbytes, err := bindataRead(path, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error reading asset info %s at %s: %w\", name, path, err)\n\t}\n\n\ta := &asset{bytes: bytes, info: fi}\n\treturn a, err\n}", "func (d DescriptionL10N) Locale() string {\n\tif d == nil {\n\t\treturn \"\"\n\t}\n\t// TODO: clarify whether it should be default locale e.g \"en\" or empty string\n\treturn d[\"locale\"]\n}", "func (m *ChatMessage) SetLocale(value *string)() {\n m.locale = value\n}", "func (ln *localen) Locale() string {\n\treturn ln.fnLocale(ln)\n}", "func getLocale(locale string) string {\n\tvalidLocales := [...]string{\"en\", \"ar\"}\n\n\tfor _, l := range validLocales {\n\t\tif l == locale {\n\t\t\treturn l\n\t\t}\n\t}\n\treturn \"en\"\n}", "func (c *connAttrs) Locale() string { c.mu.RLock(); defer c.mu.RUnlock(); return c._locale }", "func SetLocale(o orm.Ormer, lang, code, message string) error {\n\tvar it Locale\n\terr := o.QueryTable(&it).\n\t\tFilter(\"lang\", lang).\n\t\tFilter(\"code\", code).\n\t\tOne(&it, \"id\")\n\n\tif err == nil {\n\t\t_, err = o.QueryTable(&it).Filter(\"id\", it.ID).Update(orm.Params{\n\t\t\t\"message\": message,\n\t\t\t\"updated_at\": time.Now(),\n\t\t})\n\t} else if err == orm.ErrNoRows {\n\t\tit.Lang = lang\n\t\tit.Code = code\n\t\tit.Message = message\n\t\t_, err = o.Insert(&it)\n\t}\n\treturn err\n}", "func LocaleContains(v string) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldLocale), v))\n\t})\n}", "func (me TisoLanguageCodes) IsEn() bool { return me.String() == \"EN\" }", "func (c *connAttrs) SetLocale(locale string) { c.mu.Lock(); defer c.mu.Unlock(); c._locale = locale }", "func validateLocale(\n\tvalidationSettings *validator.Validate,\n\ttopStruct reflect.Value,\n\tcurrentStructOrField reflect.Value,\n\tfield reflect.Value,\n\tfieldType reflect.Type,\n\tfieldKind reflect.Kind,\n\tparam string,\n) bool {\n\tif localeFieldValue, isLocaleField := field.Interface().(Locale); isLocaleField {\n\t\tif _, isLocaleSupported := localeSupported[localeFieldValue]; isLocaleSupported {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func I18nLocale(c *gin.Context) string {\n\tlocale, exists := c.Get(I18nLocaleCtxKey)\n\n\tif locale == \"\" || !exists {\n\t\treturn \"en\"\n\t}\n\n\treturn locale.(string)\n}", "func LocaleGTE(v string) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.GTE(s.C(FieldLocale), v))\n\t})\n}", "func OverrideLocale(loc *Locale) error {\n\tif err := loc.Check(); err != nil {\n\t\treturn err\n\t}\n\tif _, exists := locales[loc.ISOCode]; !exists {\n\t\treturn fmt.Errorf(\"locale with ISO code %s does not exist\", loc.ISOCode)\n\t}\n\tlocales[loc.ISOCode] = loc\n\tupdateAllLanguageList()\n\treturn nil\n}", "func (c *Client) Locale() string {\n\treturn c.locale.String()\n}", "func (a *Client) Locales(params *LocalesParams, authInfo runtime.ClientAuthInfoWriter) (*LocalesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewLocalesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"locales\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/system/locales\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &LocalesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*LocalesOK), nil\n\n}", "func Locale() string {\n\tbuffer := make([]uint16, 128)\n\tproc := kernel32.NewProc(\"GetUserDefaultLocaleName\")\n\n\tret, _, _ := proc.Call(\n\t\tuintptr(unsafe.Pointer(&buffer[0])),\n\t\tuintptr(len(buffer)),\n\t)\n\tif ret == 0 {\n\t\treturn \"en-US\"\n\t}\n\n\treturn windows.UTF16ToString(buffer)\n}", "func localeToLanguage(l string) string {\n\tl = strings.ToLower(l)\n\tif len(l) <= 2 {\n\t\treturn l\n\t}\n\treturn l[0:2]\n}", "func (o *StackpathRpcLocalizedMessageAllOf) GetLocaleOk() (*string, bool) {\n\tif o == nil || o.Locale == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Locale, true\n}", "func parseLocale(data interface{}) (model.Locale, error) {\n\tvar locale model.Locale\n\n\tmData, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn locale, fmt.Errorf(\"failed to parse locale from data: %v\", err)\n\t}\n\n\terr = json.Unmarshal(mData, &locale)\n\tif err != nil {\n\t\treturn locale, fmt.Errorf(\"failed to un parse locale from data: %v\", err)\n\t}\n\n\treturn locale, nil\n}", "func LocaleEQ(v string) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldLocale), v))\n\t})\n}", "func getLocalizedMonthNames(locale string) (monthnames [13]string) {\n\n\tfor page := 1; page < 13; page++ {\n\t\tt := time.Date(2013, time.Month(page), 1, 0, 0, 0, 0, time.UTC)\n\t\tmonthnames[page] = convertCP(fmt.Sprintf(\"%s\", monday.Format(t, \"January\", monday.Locale(locale))))\n\t}\n\n\treturn monthnames\n}", "func Supported() map[string]Locale {\n\treturn map[string]Locale{\n\t\t\"en\": en_locale(),\n\t\t\"pt-br\": pt_br_locale(),\n\t}\n}", "func (o *StackpathRpcLocalizedMessageAllOf) HasLocale() bool {\n\tif o != nil && o.Locale != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func locale() string {\n\tif clientLocale != \"\" {\n\t\treturn clientLocale\n\t}\n\n\tlang := os.Getenv(\"LANG\")\n\tif lang == \"\" {\n\t\treturn \"\"\n\t}\n\n\tlocale := strings.Split(lang, \".\")[0]\n\n\treturn locale\n}", "func (me XHasAttr_Language_XsdtString_Eng) LanguageDefault() xsdt.String { return xsdt.String(\"eng\") }", "func GetLocaleService(sess *session.Session) Locale {\n\treturn Locale{Session: sess}\n}", "func LocaleIn(vs ...string) predicate.Location {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(v) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.In(s.C(FieldLocale), v...))\n\t})\n}", "func (b *Builder) Locale(t language.Tag) *Index {\n\tindex := &Index{\n\t\tmeta: b.rootMeta,\n\t}\n\tb.locales = append(b.locales, locale{tag: t, root: index})\n\treturn index\n}", "func initLocales(opt Options) {\n\tfor i, lang := range opt.Langs {\n\t\tfname := fmt.Sprintf(opt.Format, lang)\n\t\t// Append custom locale file.\n\t\tcustom := []interface{}{}\n\t\tcustomPath := path.Join(opt.CustomDirectory, fname)\n\t\tif com.IsFile(customPath) {\n\t\t\tcustom = append(custom, customPath)\n\t\t}\n\n\t\tvar locale interface{}\n\t\tif data, ok := opt.Files[fname]; ok {\n\t\t\tlocale = data\n\t\t} else {\n\t\t\tlocale = path.Join(opt.Directory, fname)\n\t\t}\n\n\t\terr := i18n.SetMessageWithDesc(lang, opt.Names[i], locale, custom...)\n\t\tif err != nil && err != i18n.ErrLangAlreadyExist {\n\t\t\tpanic(fmt.Errorf(\"fail to set message file(%s): %v\", lang, err))\n\t\t}\n\t}\n}", "func Locales() []string {\n\tvar res []string\n\tfor k, _ := range allResources.translations {\n\t\tres = append(res, k.String())\n\t}\n\tsort.Strings(res)\n\treturn res\n}", "func generateLocales() map[string]*LocaleInfo {\n\tout := make(map[string]*LocaleInfo)\n\tloaded := make(map[string]string)\n\tdata, err := srcutil.LoadFile(\"deploy/metadata/standard_locales.json\")\n\tif err != nil {\n\t\tglog.Errorf(\"failed to load locales: %v\", err)\n\t\treturn out\n\t}\n\tif err := json.Unmarshal([]byte(data), &loaded); err != nil {\n\t\tglog.Errorf(\"failed to unmarshal locale data: %v\", err)\n\t\treturn out\n\t}\n\tfor k, v := range loaded {\n\t\tout[k] = genLocaleInfo(k, v)\n\t}\n\treturn out\n}", "func Initialize(locale *gotext.Locale) error {\n\tif locale == nil {\n\t\treturn errors.New(\"Initialize expected locale but got nil\")\n\t}\n\tlocale.AddDomain(defaultDomain)\n\treturn nil\n}", "func SetLocale(locale string) error {\n\tif matched, _ := regexp.MatchString(localeRegexp, locale); !matched {\n\t\treturn fmt.Errorf(\"Malformated locale string %s\", locale)\n\t}\n\tlocale = strings.Replace(locale, \"-\", \"_\", -1)\n\tparts := strings.Split(locale, \"_\")\n\tlang := parts[0]\n\tlog.Debugf(\"Setting locale %v\", locale)\n\tnewTrMap := make(map[string]string)\n\tmergeLocaleToMap(newTrMap, defaultLang)\n\tmergeLocaleToMap(newTrMap, defaultLocale)\n\tmergeLocaleToMap(newTrMap, lang)\n\tmergeLocaleToMap(newTrMap, locale)\n\tif len(newTrMap) == 0 {\n\t\treturn fmt.Errorf(\"Not found any translations, locale not set\")\n\t}\n\tlog.Tracef(\"Translations: %v\", newTrMap)\n\ttrMutex.Lock()\n\tdefer trMutex.Unlock()\n\ttrMap = newTrMap\n\treturn nil\n}", "func RegisterLocale(loc *Locale) error {\n\tif err := loc.Check(); err != nil {\n\t\treturn err\n\t}\n\tif _, exists := locales[loc.ISOCode]; exists {\n\t\treturn fmt.Errorf(\"locale with ISO code %s already exists\", loc.ISOCode)\n\t}\n\tlocales[loc.ISOCode] = loc\n\tupdateAllLanguageList()\n\treturn nil\n}", "func (il *IL) Locale() string {\n\treturn il.locales[0]\n}", "func LocaleTypeText(loc int) string {\n\treturn localeTypeText[loc]\n}", "func ArticleCountries(name string) string {\n\tlastCharacters := name[len(name)-2:]\n\tvar article string\n\n\tif lastCharacters == \"as\" {\n\t\tarticle = \"de las \"\n\t} else if lastCharacters == \"os\" {\n\t\tarticle = \"de los \"\n\t} else if string(lastCharacters[1]) == \"a\" {\n\t\tarticle = \"de \"\n\t} else {\n\t\tarticle = \"del \"\n\t}\n\n\treturn article + name\n}", "func (m Message) LocaleOfIssue() (*field.LocaleOfIssueField, quickfix.MessageRejectError) {\n\tf := &field.LocaleOfIssueField{}\n\terr := m.Body.Get(f)\n\treturn f, err\n}", "func (m Message) LocaleOfIssue() (*field.LocaleOfIssueField, quickfix.MessageRejectError) {\n\tf := &field.LocaleOfIssueField{}\n\terr := m.Body.Get(f)\n\treturn f, err\n}", "func (m Message) LocaleOfIssue() (*field.LocaleOfIssueField, quickfix.MessageRejectError) {\n\tf := &field.LocaleOfIssueField{}\n\terr := m.Body.Get(f)\n\treturn f, err\n}", "func (m Message) LocaleOfIssue() (*field.LocaleOfIssueField, quickfix.MessageRejectError) {\n\tf := &field.LocaleOfIssueField{}\n\terr := m.Body.Get(f)\n\treturn f, err\n}", "func (t *Translator) AssertValidLocale(l string) error {\n\tmatched, err := regexp.MatchString(\"^(?:[a-z]{2}|[a-z]{2}(([_-]{1})([a-zA-Z]{2}){1,2}))$\", l)\n\tif err != nil {\n\t\treturn errors.New(\"unable to match locale code : \" + err.Error())\n\t}\n\tif !matched {\n\t\treturn errors.New(\"invalid locale code : \" + l)\n\t}\n\n\treturn nil\n}", "func (m *ChatMessage) GetLocale()(*string) {\n return m.locale\n}", "func Translate(nor string, language string) string {\r\n\tif nor == \"nord og sør\" {\r\n\t\tif language == \"jp\" {\r\n\t\t\treturn jp\r\n\t\t}\r\n\t\tif language == \"is\" {\r\n\t\t\treturn is\r\n\t\t}\r\n\t}\r\n\treturn \"\"\r\n}", "func IsLocale(name string) bool {\n\tif _, ok := localeMap[name]; ok {\n\t\treturn true\n\t}\n\t// Fallback to environment check.\n\tif _, err := language.Parse(name); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func (m *AgreementFile) GetLocalizations()([]AgreementFileLocalizationable) {\n return m.localizations\n}", "func CtxLocale(ctx context.Context) *Locale {\n\treturn ctx.Value(ctxKey).(*Locale)\n}", "func localesCoreCyToml() (*asset, error) {\n\tpath := \"/Users/ravipradhan/Documents/personal-projects/test-modules/render/assets/locales/core.cy.toml\"\n\tname := \"locales/core.cy.toml\"\n\tbytes, err := bindataRead(path, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error reading asset info %s at %s: %w\", name, path, err)\n\t}\n\n\ta := &asset{bytes: bytes, info: fi}\n\treturn a, err\n}", "func UseOSLocale() error {\n\tuserLocale, err := jibber_jabber.DetectIETF()\n\tif err != nil || userLocale == \"C\" {\n\t\tuserLocale = defaultLocale\n\t}\n\tlog.Tracef(\"Using OS locale of current user: %v\", userLocale)\n\treturn SetLocale(userLocale)\n}", "func (d Document) Language() string { return d.language }", "func main() {\n\tvar decoder cldr.Decoder\n\tcldr, err := decoder.DecodePath(\"data/core\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tnumbers := map[string]i18n.Number{}\n\tcalendars := map[string]i18n.Calendar{}\n\tlocales := cldr.Locales()\n\tfor _, loc := range locales {\n\t\tldml := cldr.RawLDML(loc)\n\t\tif ldml.Numbers == nil {\n\t\t\tcontinue\n\t\t}\n\t\tvar number i18n.Number\n\t\tif len(ldml.Numbers.Symbols) > 0 {\n\t\t\tsymbol := ldml.Numbers.Symbols[0]\n\t\t\tif len(symbol.Decimal) > 0 {\n\t\t\t\tnumber.Symbols.Decimal = symbol.Decimal[0].Data()\n\t\t\t}\n\t\t\tif len(symbol.Group) > 0 {\n\t\t\t\tnumber.Symbols.Group = symbol.Group[0].Data()\n\t\t\t}\n\t\t\tif len(symbol.MinusSign) > 0 {\n\t\t\t\tnumber.Symbols.Negative = symbol.MinusSign[0].Data()\n\t\t\t}\n\t\t\tif len(symbol.PercentSign) > 0 {\n\t\t\t\tnumber.Symbols.Percent = symbol.PercentSign[0].Data()\n\t\t\t}\n\t\t\tif len(symbol.PerMille) > 0 {\n\t\t\t\tnumber.Symbols.PerMille = symbol.PerMille[0].Data()\n\t\t\t}\n\t\t}\n\t\tif len(ldml.Numbers.DecimalFormats) > 0 && len(ldml.Numbers.DecimalFormats[0].DecimalFormatLength) > 0 {\n\t\t\tnumber.Formats.Decimal = ldml.Numbers.DecimalFormats[0].DecimalFormatLength[0].DecimalFormat[0].Pattern[0].Data()\n\t\t}\n\t\tif len(ldml.Numbers.CurrencyFormats) > 0 && len(ldml.Numbers.CurrencyFormats[0].CurrencyFormatLength) > 0 {\n\t\t\tnumber.Formats.Currency = ldml.Numbers.CurrencyFormats[0].CurrencyFormatLength[0].CurrencyFormat[0].Pattern[0].Data()\n\t\t}\n\t\tif len(ldml.Numbers.PercentFormats) > 0 && len(ldml.Numbers.PercentFormats[0].PercentFormatLength) > 0 {\n\t\t\tnumber.Formats.Percent = ldml.Numbers.PercentFormats[0].PercentFormatLength[0].PercentFormat[0].Pattern[0].Data()\n\t\t}\n\t\tif ldml.Numbers.Currencies != nil {\n\t\t\tfor _, currency := range ldml.Numbers.Currencies.Currency {\n\t\t\t\tvar c i18n.Currency\n\t\t\t\tc.Currency = currency.Type\n\t\t\t\tif len(currency.DisplayName) > 0 {\n\t\t\t\t\tc.DisplayName = currency.DisplayName[0].Data()\n\t\t\t\t}\n\t\t\t\tif len(currency.Symbol) > 0 {\n\t\t\t\t\tc.Symbol = currency.Symbol[0].Data()\n\t\t\t\t}\n\t\t\t\tnumber.Currencies = append(number.Currencies, c)\n\t\t\t}\n\t\t}\n\t\tnumbers[loc] = number\n\n\t\tif ldml.Dates != nil && ldml.Dates.Calendars != nil {\n\t\t\tvar calendar i18n.Calendar\n\t\t\tldmlCar := ldml.Dates.Calendars.Calendar[0]\n\t\t\tfor _, cal := range ldml.Dates.Calendars.Calendar {\n\t\t\t\tif cal.Type == \"gregorian\" {\n\t\t\t\t\tldmlCar = cal\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ldmlCar.DateFormats != nil {\n\t\t\t\tfor _, datefmt := range ldmlCar.DateFormats.DateFormatLength {\n\t\t\t\t\tswitch datefmt.Type {\n\t\t\t\t\tcase \"full\":\n\t\t\t\t\t\tcalendar.Formats.Date.Full = datefmt.DateFormat[0].Pattern[0].Data()\n\t\t\t\t\tcase \"long\":\n\t\t\t\t\t\tcalendar.Formats.Date.Long = datefmt.DateFormat[0].Pattern[0].Data()\n\t\t\t\t\tcase \"medium\":\n\t\t\t\t\t\tcalendar.Formats.Date.Medium = datefmt.DateFormat[0].Pattern[0].Data()\n\t\t\t\t\tcase \"short\":\n\t\t\t\t\t\tcalendar.Formats.Date.Short = datefmt.DateFormat[0].Pattern[0].Data()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif ldmlCar.TimeFormats != nil {\n\t\t\t\tfor _, datefmt := range ldmlCar.TimeFormats.TimeFormatLength {\n\t\t\t\t\tswitch datefmt.Type {\n\t\t\t\t\tcase \"full\":\n\t\t\t\t\t\tcalendar.Formats.Time.Full = datefmt.TimeFormat[0].Pattern[0].Data()\n\t\t\t\t\tcase \"long\":\n\t\t\t\t\t\tcalendar.Formats.Time.Long = datefmt.TimeFormat[0].Pattern[0].Data()\n\t\t\t\t\tcase \"medium\":\n\t\t\t\t\t\tcalendar.Formats.Time.Medium = datefmt.TimeFormat[0].Pattern[0].Data()\n\t\t\t\t\tcase \"short\":\n\t\t\t\t\t\tcalendar.Formats.Time.Short = datefmt.TimeFormat[0].Pattern[0].Data()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ldmlCar.DateTimeFormats != nil {\n\t\t\t\tfor _, datefmt := range ldmlCar.DateTimeFormats.DateTimeFormatLength {\n\t\t\t\t\tswitch datefmt.Type {\n\t\t\t\t\tcase \"full\":\n\t\t\t\t\t\tcalendar.Formats.DateTime.Full = datefmt.DateTimeFormat[0].Pattern[0].Data()\n\t\t\t\t\tcase \"long\":\n\t\t\t\t\t\tcalendar.Formats.DateTime.Long = datefmt.DateTimeFormat[0].Pattern[0].Data()\n\t\t\t\t\tcase \"medium\":\n\t\t\t\t\t\tcalendar.Formats.DateTime.Medium = datefmt.DateTimeFormat[0].Pattern[0].Data()\n\t\t\t\t\tcase \"short\":\n\t\t\t\t\t\tcalendar.Formats.DateTime.Short = datefmt.DateTimeFormat[0].Pattern[0].Data()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ldmlCar.Months != nil {\n\t\t\t\tfor _, monthctx := range ldmlCar.Months.MonthContext {\n\t\t\t\t\tfor _, months := range monthctx.MonthWidth {\n\t\t\t\t\t\tvar i18nMonth i18n.CalendarMonthFormatNameValue\n\t\t\t\t\t\tfor _, m := range months.Month {\n\t\t\t\t\t\t\tswitch m.Type {\n\t\t\t\t\t\t\tcase \"1\":\n\t\t\t\t\t\t\t\ti18nMonth.Jan = m.Data()\n\t\t\t\t\t\t\tcase \"2\":\n\t\t\t\t\t\t\t\ti18nMonth.Feb = m.Data()\n\t\t\t\t\t\t\tcase \"3\":\n\t\t\t\t\t\t\t\ti18nMonth.Mar = m.Data()\n\t\t\t\t\t\t\tcase \"4\":\n\t\t\t\t\t\t\t\ti18nMonth.Apr = m.Data()\n\t\t\t\t\t\t\tcase \"5\":\n\t\t\t\t\t\t\t\ti18nMonth.May = m.Data()\n\t\t\t\t\t\t\tcase \"6\":\n\t\t\t\t\t\t\t\ti18nMonth.Jun = m.Data()\n\t\t\t\t\t\t\tcase \"7\":\n\t\t\t\t\t\t\t\ti18nMonth.Jul = m.Data()\n\t\t\t\t\t\t\tcase \"8\":\n\t\t\t\t\t\t\t\ti18nMonth.Aug = m.Data()\n\t\t\t\t\t\t\tcase \"9\":\n\t\t\t\t\t\t\t\ti18nMonth.Sep = m.Data()\n\t\t\t\t\t\t\tcase \"10\":\n\t\t\t\t\t\t\t\ti18nMonth.Oct = m.Data()\n\t\t\t\t\t\t\tcase \"11\":\n\t\t\t\t\t\t\t\ti18nMonth.Nov = m.Data()\n\t\t\t\t\t\t\tcase \"12\":\n\t\t\t\t\t\t\t\ti18nMonth.Dec = m.Data()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tswitch months.Type {\n\t\t\t\t\t\tcase \"abbreviated\":\n\t\t\t\t\t\t\tcalendar.FormatNames.Months.Abbreviated = i18nMonth\n\t\t\t\t\t\tcase \"narrow\":\n\t\t\t\t\t\t\tcalendar.FormatNames.Months.Narrow = i18nMonth\n\t\t\t\t\t\tcase \"short\":\n\t\t\t\t\t\t\tcalendar.FormatNames.Months.Short = i18nMonth\n\t\t\t\t\t\tcase \"wide\":\n\t\t\t\t\t\t\tcalendar.FormatNames.Months.Wide = i18nMonth\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ldmlCar.Days != nil {\n\t\t\t\tfor _, dayctx := range ldmlCar.Days.DayContext {\n\t\t\t\t\tfor _, days := range dayctx.DayWidth {\n\t\t\t\t\t\tvar i18nDay i18n.CalendarDayFormatNameValue\n\t\t\t\t\t\tfor _, d := range days.Day {\n\t\t\t\t\t\t\tswitch d.Type {\n\t\t\t\t\t\t\tcase \"sun\":\n\t\t\t\t\t\t\t\ti18nDay.Sun = d.Data()\n\t\t\t\t\t\t\tcase \"mon\":\n\t\t\t\t\t\t\t\ti18nDay.Mon = d.Data()\n\t\t\t\t\t\t\tcase \"tue\":\n\t\t\t\t\t\t\t\ti18nDay.Tue = d.Data()\n\t\t\t\t\t\t\tcase \"wed\":\n\t\t\t\t\t\t\t\ti18nDay.Wed = d.Data()\n\t\t\t\t\t\t\tcase \"thu\":\n\t\t\t\t\t\t\t\ti18nDay.Thu = d.Data()\n\t\t\t\t\t\t\tcase \"fri\":\n\t\t\t\t\t\t\t\ti18nDay.Fri = d.Data()\n\t\t\t\t\t\t\tcase \"sat\":\n\t\t\t\t\t\t\t\ti18nDay.Sat = d.Data()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tswitch days.Type {\n\t\t\t\t\t\tcase \"abbreviated\":\n\t\t\t\t\t\t\tcalendar.FormatNames.Days.Abbreviated = i18nDay\n\t\t\t\t\t\tcase \"narrow\":\n\t\t\t\t\t\t\tcalendar.FormatNames.Days.Narrow = i18nDay\n\t\t\t\t\t\tcase \"short\":\n\t\t\t\t\t\t\tcalendar.FormatNames.Days.Short = i18nDay\n\t\t\t\t\t\tcase \"wide\":\n\t\t\t\t\t\t\tcalendar.FormatNames.Days.Wide = i18nDay\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ldmlCar.DayPeriods != nil {\n\t\t\t\tfor _, ctx := range ldmlCar.DayPeriods.DayPeriodContext {\n\t\t\t\t\tfor _, width := range ctx.DayPeriodWidth {\n\t\t\t\t\t\tvar i18nPeriod i18n.CalendarPeriodFormatNameValue\n\t\t\t\t\t\tfor _, d := range width.DayPeriod {\n\t\t\t\t\t\t\tswitch d.Type {\n\t\t\t\t\t\t\tcase \"am\":\n\t\t\t\t\t\t\t\tif i18nPeriod.AM == \"\" {\n\t\t\t\t\t\t\t\t\ti18nPeriod.AM = d.Data()\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcase \"pm\":\n\t\t\t\t\t\t\t\tif i18nPeriod.PM == \"\" {\n\t\t\t\t\t\t\t\t\ti18nPeriod.PM = d.Data()\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tswitch width.Type {\n\t\t\t\t\t\tcase \"abbreviated\":\n\t\t\t\t\t\t\tcalendar.FormatNames.Periods.Abbreviated = i18nPeriod\n\t\t\t\t\t\tcase \"narrow\":\n\t\t\t\t\t\t\tcalendar.FormatNames.Periods.Narrow = i18nPeriod\n\t\t\t\t\t\tcase \"short\":\n\t\t\t\t\t\t\tcalendar.FormatNames.Periods.Short = i18nPeriod\n\t\t\t\t\t\tcase \"wide\":\n\t\t\t\t\t\t\tcalendar.FormatNames.Periods.Wide = i18nPeriod\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// var empty i18n.CalendarPeriodFormatNameValue\n\t\t\t\t// if calendar.FormatNames.Periods.Abbreviated == empty {\n\t\t\t\t// \tcalendar.FormatNames.Periods.Abbreviated = calendar.FormatNames.Periods.Wide\n\t\t\t\t// }\n\t\t\t}\n\t\t\tcalendars[loc] = calendar\n\t\t}\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(len(numbers))\n\tfor locale, number := range numbers {\n\t\tgo func(locale string, number i18n.Number) {\n\t\t\tdefer func() { wg.Done() }()\n\t\t\tpath := \"resources/locales/\" + locale\n\t\t\tif _, err := os.Stat(path); err != nil {\n\t\t\t\tif err = os.MkdirAll(path, 0777); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tnumberFile, err := os.Create(path + \"/number.go\")\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdefer func() { numberFile.Close() }()\n\t\t\tmainFile, err := os.Create(path + \"/main.go\")\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdefer func() { mainFile.Close() }()\n\t\t\tcurrencyFile, err := os.Create(path + \"/currency.go\")\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdefer func() { currencyFile.Close() }()\n\n\t\t\tmainCodes, err := format.Source([]byte(fmt.Sprintf(`package %s\n\t\t\timport \"github.com/theplant/cldr\"\n\n\t\t\tvar Locale = &cldr.Locale{\n\t\t\t\tLocale: %q,\n\t\t\t\tNumber: cldr.Number{\n\t\t\t\t\tSymbols: symbols,\n\t\t\t\t\tFormats: formats,\n\t\t\t\t\tCurrencies: currencies,\n\t\t\t\t},\n\t\t\t\tCalendar: calendar,\n\t\t\t\tPluralRule: pluralRule,\n\t\t\t}\n\n\t\t\tfunc init() {\n\t\t\t\tcldr.RegisterLocale(Locale)\n\t\t\t}\n\t\t`, locale, locale)))\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tfmt.Fprintf(mainFile, \"%s\", mainCodes)\n\n\t\t\tnumberCodes, err := format.Source([]byte(fmt.Sprintf(`package %s\n\t\t\timport \"github.com/theplant/cldr\"\n\n\t\t\tvar (\n\t\t\t\tsymbols = %# v\n\t\t\t\tformats = %# v\n\t\t\t)\n\t\t`, locale, pretty.Formatter(number.Symbols), pretty.Formatter(number.Formats))))\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tfmt.Fprintf(numberFile, \"%s\", numberCodes)\n\n\t\t\tcurrencyCodes, err := format.Source([]byte(fmt.Sprintf(`package %s\n\t\t\timport \"github.com/theplant/cldr\"\n\n\t\t\tvar currencies = %# v\n\t\t`, locale, pretty.Formatter(number.Currencies))))\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tfmt.Fprintf(currencyFile, \"%s\", currencyCodes)\n\n\t\t\tcalendar := calendars[locale]\n\t\t\tcalendarFile, err := os.Create(path + \"/calendar.go\")\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdefer func() { calendarFile.Close() }()\n\n\t\t\tcalendarCodes, err := format.Source([]byte(fmt.Sprintf(`package %s\n\t\t\timport \"github.com/theplant/cldr\"\n\n\t\t\tvar calendar = %# v\n\t\t`, locale, pretty.Formatter(calendar))))\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tfmt.Fprintf(calendarFile, \"%s\", calendarCodes)\n\n\t\t\tpluralFile, err := os.Create(path + \"/plural.go\")\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdefer func() { pluralFile.Close() }()\n\n\t\t\tpluralCodes, err := format.Source([]byte(fmt.Sprintf(`package %s\n\n\t\t\tvar pluralRule = \"1\"\n\t\t`, locale)))\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tfmt.Fprintf(pluralFile, \"%s\", pluralCodes)\n\t\t}(locale, number)\n\t}\n\n\twg.Wait()\n\n\tallFile, err := os.Create(\"resources/locales/all.go\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer func() { allFile.Close() }()\n\ttmpl, err := template.New(\"\").Parse(`package locales\n\t\timport (\n\t\t\t{{range $locale, $_ := .}}_ \"github.com/theplant/cldr/resources/locales/{{$locale}}\"\n\t\t{{end}})\n\t`)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tvar buf bytes.Buffer\n\tif err := tmpl.Execute(&buf, numbers); err != nil {\n\t\tpanic(err)\n\t}\n\tallCodes, err := format.Source(buf.Bytes())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Fprintf(allFile, \"%s\", allCodes)\n}", "func (d *Library) Translate(w Word, l string) []string {\n\tif v, ok := d.Dictionaries[w.Locale]; ok {\n\t\ttr := v.Translate(w, l)\n\t\treturn tr\n\t}\n\n\treturn nil\n}", "func LocalizedDateFormatter(locale string, timezone string) pongo2.FilterFunction {\n\tlocation, err := time.LoadLocation(timezone)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to load timezone: %v\", err)\n\t\tlocation, _ = time.LoadLocation(\"Europe/London\")\n\t}\n\n\treturn func(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) {\n\t\ttstampEpoch, err := strconv.ParseInt(in.String(), 10, 64)\n\t\ttimestamp := time.Unix(tstampEpoch, 0)\n\n\t\tif err != nil {\n\t\t\ttimestamp = ParseTimeFromString(in.String())\n\t\t}\n\n\t\tformat := param.String()\n\n\t\t// Find monday.Locale for locale string\n\t\tvar mLocale monday.Locale\n\t\tmLocale = monday.LocaleEnGB\n\t\tfor _, l := range monday.ListLocales() {\n\t\t\tif string(l) == locale {\n\t\t\t\tmLocale = l\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tout := monday.Format(timestamp.In(location), format, mLocale)\n\n\t\tlog.Debugf(\"[LocalizedDateFormatter] Location: %s (%s) Monday fmt: %s Time fmt: %s (%s)\", locale, timezone, out, timestamp.Format(format), timestamp.In(location).Format(format))\n\n\t\treturn pongo2.AsValue(out), nil\n\t}\n}", "func (e *engine) setDefaults(ctx *Context) {\n\tif ctx.Req.Locale == nil {\n\t\tctx.Req.Locale = ahttp.NewLocale(AppConfig().StringDefault(\"i18n.default\", \"en\"))\n\t}\n}", "func LocaleHasSuffix(v string) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.HasSuffix(s.C(FieldLocale), v))\n\t})\n}", "func LocaleLT(v string) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldLocale), v))\n\t})\n}", "func (o *StackpathRpcLocalizedMessageAllOf) GetLocale() string {\n\tif o == nil || o.Locale == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Locale\n}", "func (l MemoryLocalizer) GetLocale(index int) context.Locale {\n\t// loc, ok := l[index]\n\t// if !ok {\n\t// \tpanic(fmt.Sprintf(\"locale of index [%d] not found\", index))\n\t// }\n\t// return loc\n\n\treturn l[index]\n}", "func ArticleCountries(name string) string {\n\tif name == \"vereinigten Staaten\" {\n\t\treturn \"die \" + name\n\t}\n\n\treturn name\n}", "func LocaleGT(v string) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.GT(s.C(FieldLocale), v))\n\t})\n}", "func LocaleContainsFold(v string) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldLocale), v))\n\t})\n}", "func SetLocale(locale string) {\n\tcurrentLocale = locale\n}", "func (m SecurityListRequest) HasLocaleOfIssue() bool {\n\treturn m.Has(tag.LocaleOfIssue)\n}", "func translations() buffalo.MiddlewareFunc {\n\tvar err error\n\tif T, err = i18n.New(packr.New(\"app:locales\", \"../locales\"), \"en-US\"); err != nil {\n\t\tapp.Stop(err)\n\t}\n\treturn T.Middleware()\n}", "func EnglishName(loc string) string {\n\tif n, ok := Names[loc]; ok {\n\t\treturn n.English\n\t}\n\treturn \"\"\n}", "func (o *os) GetLocale() gdnative.String {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.GetLocale()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"get_locale\")\n\n\t// Call the parent method.\n\t// String\n\tretPtr := gdnative.NewEmptyString()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewStringFromPointer(retPtr)\n\treturn ret\n}", "func (c Config) Lookup(locale, msg string) string {\n\n\tif len(langs[locale]) < 1 {\n\t\tlocale = c.mainLocale\n\t}\n\n\tif len(langs[locale][msg]) < 1 {\n\t\treturn msg\n\t}\n\n\treturn langs[locale][msg]\n}", "func InitWithLocaleName(locale string) {\n\tif strings.HasPrefix(locale, LanguageCodeBulgarian+\"_\") {\n\t\tLanguage = LanguageCodeBulgarian\n\t} else {\n\t\tLanguage = LanguageCodeEnglish\n\t}\n}", "func LocaleLTE(v string) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldLocale), v))\n\t})\n}", "func (m *ProfileCardAnnotation) GetLocalizations()([]DisplayNameLocalizationable) {\n val, err := m.GetBackingStore().Get(\"localizations\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]DisplayNameLocalizationable)\n }\n return nil\n}", "func (repo *mongoBaseRepo) SetLocale(code string) {\n\tif code == \"\" {\n\t\trepo.locale = nil\n\t} else {\n\t\trepo.locale = &code\n\t}\n}", "func (t *Translator) loadResource(l string) error {\n\tlText, err := lang.LoadLocaleText(l)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(lText, &t.resources)\n\tif err != nil {\n\t\treturn errors.New(\"unable to unmarshall locale data : \" + err.Error())\n\t}\n\n\treturn nil\n}", "func MissingLocale(w http.ResponseWriter, r *http.Request, h *render.Renderer) {\n\tInternalError(w, r, h, errMissingLocale)\n\treturn\n}", "func GetLocale(lang, code string) (string, error) {\n\tvar it Locale\n\tif err := orm.NewOrm().QueryTable(&it).\n\t\tFilter(\"lang\", lang).\n\t\tFilter(\"code\", code).\n\t\tOne(&it, \"Message\"); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn it.Message, nil\n}", "func LocaleEqualFold(v string) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.EqualFold(s.C(FieldLocale), v))\n\t})\n}", "func timeMonthsTranslate(en string) string {\n\n\ten = strings.ToLower(en)\n\n\tmon := \"\"\n\tswitch en {\n\tcase \"january\":\n\t\tmon = \"一\"\n\tcase \"february\":\n\t\tmon = \"二\"\n\tcase \"march\":\n\t\tmon = \"三\"\n\tcase \"april\":\n\t\tmon = \"四\"\n\tcase \"may\":\n\t\tmon = \"五\"\n\tcase \"june\":\n\t\tmon = \"六\"\n\tcase \"july\":\n\t\tmon = \"七\"\n\tcase \"august\":\n\t\tmon = \"八\"\n\tcase \"september\":\n\t\tmon = \"九\"\n\tcase \"october\":\n\t\tmon = \"十\"\n\tcase \"november\":\n\t\tmon = \"十一\"\n\tcase \"december\":\n\t\tmon = \"十二\"\n\tdefault:\n\t\treturn \"\"\n\t}\n\n\treturn mon + \"月\"\n}", "func LocaleIsNil() predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.IsNull(s.C(FieldLocale)))\n\t})\n}", "func (m CrossOrderCancelReplaceRequest) HasLocaleOfIssue() bool {\n\treturn m.Has(tag.LocaleOfIssue)\n}", "func IsValid(locale string) bool {\n\t_, ok := Supported()[locale]\n\treturn ok\n}", "func (me TisoLanguageCodes) IsEs() bool { return me.String() == \"ES\" }", "func (c Currency) Localize() ([]byte, error) {\n\tvar bufC buf\n\t_, err := c.LocalizeWriter(&bufC)\n\treturn bufC, err\n}", "func Lang(localeName string) *Locale {\n\treturn DefaultEngine.Lang(localeName)\n}", "func Translate(sitecontext applications.Datasource, theme string, key string, maindata xdominion.XRecordDef, fields map[string]interface{}, fromLang language.Tag, toLang language.Tag) {\n\n\tlastmodif, _ := maindata.GetTime(\"lastmodif\")\n\ttrtbl := NewTranslationBlock(theme, key, lastmodif, fromLang, toLang)\n\n\tfor campo, sub := range fields {\n\t\tval := \"\"\n\t\tswitch sub.(type) {\n\t\tcase bool, int, string:\n\t\t\tval, _ = maindata.GetString(campo)\n\t\t\tif val == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttrtbl.Set(campo, val)\n\n\t\tcase map[string]string:\n\t\t\tsubdata, _ := maindata.Get(campo)\n\t\t\tif subdata == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch subdata.(type) {\n\t\t\tcase *xdominion.XRecords:\n\t\t\t\tfor _, subrecord := range *subdata.(*xdominion.XRecords) {\n\t\t\t\t\tfor subcampo, prefix := range sub.(map[string]string) {\n\t\t\t\t\t\tsubval, _ := subrecord.GetString(subcampo)\n\t\t\t\t\t\tif subval == \"\" {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tsubclave, _ := subrecord.GetString(\"clave\")\n\t\t\t\t\t\tif subclave == \"\" {\n\t\t\t\t\t\t\tsubclave, _ = subrecord.GetString(\"key\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttrtbl.Set(prefix+subclave, subval)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\ttrtbl.Verify(sitecontext)\n\n\tfor campo, sub := range fields {\n\t\tswitch sub.(type) {\n\t\tcase bool, int, string:\n\t\t\tmaindata.Set(campo, trtbl.Get(campo))\n\t\tcase map[string]string:\n\t\t\tsubdata, _ := maindata.Get(campo)\n\t\t\tif subdata == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch subdata.(type) {\n\t\t\tcase *xdominion.XRecords:\n\t\t\t\tfor _, subrecord := range *subdata.(*xdominion.XRecords) {\n\t\t\t\t\tfor subcampo, prefix := range sub.(map[string]string) {\n\t\t\t\t\t\tsubclave, _ := subrecord.GetString(\"clave\")\n\t\t\t\t\t\tif subclave == \"\" {\n\t\t\t\t\t\t\tsubclave, _ = subrecord.GetString(\"key\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tsubrecord.Set(subcampo, trtbl.Get(prefix+subclave))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func Utf8ToEntity(entity string) string {\n\tif rune, size := utf8.DecodeRuneInString(entity); size != 0 {\n\t\treturn fmt.Sprintf(\"&#%d;\", rune)\n\t}\n\treturn entity\n}", "func gcTranslate(gcname string, m map[string]string) string {\n\tswitch gcname {\n\tcase \"Langara\",\"L\", \"1\":\n\t\treturn m[\"Langara\"]\n\tcase \"Fraserview\", \"F\", \"2\":\n\t\treturn m[\"Fraserview\"]\n\tcase \"McCleery\"\t, \"M\", \"3\":\n\t\treturn m[\"McCleery\"]\n\tcase \"All\":\n\t\treturn \"1,2,3\"\n\t}\n\treturn \"no courses options recognized\"\n}", "func header(locale string) string {\n\tt := localisations[locale]\n\treturn fmt.Sprintf(headerFormatString, t.date, t.description, t.change)\n}", "func (o AlarmContactOutput) Lang() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *AlarmContact) pulumi.StringPtrOutput { return v.Lang }).(pulumi.StringPtrOutput)\n}", "func (europ europeDeprecatedTimeZones) Ljubljana() string { return \"Europe/Belgrade\" }", "func (m *MailboxSettings) GetLanguage()(LocaleInfoable) {\n val, err := m.GetBackingStore().Get(\"language\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(LocaleInfoable)\n }\n return nil\n}", "func OrdinalPlural(loc Locale) Plural {\n\treturn lookupPlural(loc, ordinalRules)\n}", "func LocaleNotNil() predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.NotNull(s.C(FieldLocale)))\n\t})\n}", "func InitCulture() {\n\t// seed random generator\n\trand.Seed(time.Now().UnixNano())\n\n\t// init data\n\tPathogens = make([]string, 0)\n\tAntimicrobials = make([]string, 0)\n\n\t// Initialize Pathogens and Antimicrobials\n\tfor _, letter := range letters {\n\t\tPathogens = append(Pathogens, fmt.Sprintf(\"pathogen-%s\", letter))\n\t\tAntimicrobials = append(Antimicrobials, fmt.Sprintf(\"antimicrobial-%s\", letter))\n\t}\n}", "func TestLocaleParser(t *testing.T) {\n\n\tfor k, v := range [][2]string{\n\t\t{\"cs-u-rg-czzzzz\", \"cs\"},\n\t\t{\"\", \"cs\"},\n\t\t{\"cs\", \"cs\"},\n\t\t{\"ru\", \"cs\"},\n\t} {\n\t\tresult := localeFromAcceptLanguageString(v[0])\n\t\tif result != v[1] {\n\t\t\tt.Fatal(k, v[0])\n\t\t}\n\t}\n}", "func GetMessages(locale string) map[string]interface{} {\n\treturn locales[locale].Langs\n}", "func (afric africaDeprecatedTimeZones) Mogadishu() string { return \"Africa/Nairobi\" }", "func Localize(lang string, field FieldType, width Width, value int) string {\n\tfi, ok := Fields[field]\n\tif !ok {\n\t\treturn \"\"\n\t}\n\n\tkey, ok := fi.Key(width, value)\n\tif !ok {\n\t\treturn \"\"\n\t}\n\n\tloc, ok := locales[lang]\n\tif !ok {\n\t\treturn \"\"\n\t}\n\n\tdata, _ := loc.Lookup(key)\n\treturn data\n}", "func (t Time) StringEN() string {\n\treturn t.In(time.UTC).Format(time.RFC1123Z)\n}", "func key(name string, locale string) string {\n\tkey := name\n\tvar shortened bool\n\tfor {\n\t\tkey, shortened = data.ShortenName(key)\n\t\tif !shortened {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tvar location []string\n\tfor _, word := range strings.Split(locale, \" \") {\n\t\tif word == strings.Title(word) {\n\t\t\tif _, exists := data.ExtraWords[strings.ToUpper(word)]; !exists {\n\t\t\t\tlocation = append(location, word)\n\t\t\t}\n\t\t} else if len(location) > 1 {\n\t\t\tlocation = []string{}\n\t\t}\n\t}\n\n\tif len(location) > 2 {\n\t\tlocation = location[0:2]\n\t}\n\treturn key + \" - \" + strings.Join(location, \" \")\n}" ]
[ "0.5990479", "0.57793176", "0.57150275", "0.5671891", "0.56348187", "0.5617904", "0.5606874", "0.5580314", "0.55197036", "0.5379397", "0.5370767", "0.53688985", "0.53483874", "0.53311455", "0.53258777", "0.5303042", "0.5290124", "0.5245603", "0.5230171", "0.5212097", "0.51987684", "0.5173215", "0.5158555", "0.5153178", "0.5125848", "0.51180625", "0.51046723", "0.5074273", "0.5054439", "0.50341976", "0.5025233", "0.5017781", "0.50092864", "0.50009805", "0.49992752", "0.4992479", "0.49521422", "0.4943658", "0.49222866", "0.4901752", "0.48995495", "0.48995495", "0.48995495", "0.48995495", "0.48983756", "0.4897475", "0.48937574", "0.48794758", "0.48573795", "0.4851847", "0.4829227", "0.482893", "0.48233584", "0.48202762", "0.48075977", "0.48049954", "0.48030555", "0.48002324", "0.47874314", "0.4780092", "0.47792953", "0.47752914", "0.47735184", "0.47704569", "0.47416943", "0.4735375", "0.47315988", "0.47234547", "0.47159964", "0.47118953", "0.4711276", "0.46824646", "0.4664515", "0.4664278", "0.46602046", "0.4654681", "0.46324933", "0.46304533", "0.46230456", "0.46205077", "0.4620282", "0.46184725", "0.46184573", "0.46115047", "0.46106544", "0.46096852", "0.46071526", "0.46070117", "0.4605249", "0.46002096", "0.45972264", "0.4585302", "0.45797712", "0.45736262", "0.45706967", "0.45683426", "0.45640677", "0.45480293", "0.45430216", "0.45422637", "0.4540761" ]
0.0
-1
parse string map into local struct
func parseLocale(data interface{}) (model.Locale, error) { var locale model.Locale mData, err := json.Marshal(data) if err != nil { return locale, fmt.Errorf("failed to parse locale from data: %v", err) } err = json.Unmarshal(mData, &locale) if err != nil { return locale, fmt.Errorf("failed to un parse locale from data: %v", err) } return locale, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func mapFromString(s string, t reflect.Type) (interface{}, error) {\n\tmp := reflect.New(t)\n\tif s != \"\" {\n\t\terr := json.Unmarshal([]byte(s), mp.Interface())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tmp.Elem().Set(reflect.MakeMap(t))\n\t}\n\treturn mp.Elem().Interface(), nil\n}", "func parseMap(v interface{}) (map[string]interface{}, error) {\n\tstructRfl := reflect.ValueOf(v)\n\n\tresult := map[string]interface{}{}\n\n\t// Fail if the passed type is not a map\n\tif structRfl.Kind() != reflect.Map {\n\t\treturn result, fmt.Errorf(\"Expected map, got: %s\", structRfl.Kind().String())\n\t}\n\n\tmapRange := structRfl.MapRange()\n\tfor mapRange.Next() {\n\t\tresult[mapRange.Key().String()] = mapRange.Value().Interface()\n\t}\n\n\treturn result, nil\n}", "func ParseMap(buf []byte) (map[string]interface{}, int, error) {\n\tif buf == nil {\n\t\tpanic(\"cannot parse nil byte array for structs\")\n\t}\n\n\tif len(buf) < 1 {\n\t\treturn nil, 0, errors.New(\"bytes empty, cannot parse struct\")\n\t}\n\n\tif buf[0]>>4 != 0xa && (buf[0] < 0xd8 || buf[0] > 0xda) {\n\t\treturn nil, 0, errors.New(\"expected a map\")\n\t}\n\n\tnumMembers := 0\n\tpos := 1\n\n\tif buf[0]>>4 == 0xa {\n\t\t// Tiny Map\n\t\tnumMembers = int(buf[0] & 0xf)\n\t} else {\n\t\tswitch buf[0] & 0x0f {\n\t\tcase 0x08:\n\t\t\tnumMembers = int(buf[pos])\n\t\t\tpos++\n\t\tcase 0x09:\n\t\t\tnumMembers = int(binary.BigEndian.Uint16(buf[pos : pos+2]))\n\t\t\tpos = pos + 2\n\t\tcase 0x0a:\n\t\t\tnumMembers = int(binary.BigEndian.Uint32(buf[pos : pos+4]))\n\t\t\tpos = pos + 4\n\t\tdefault:\n\t\t\treturn nil, 0, errors.New(\"invalid map prefix\")\n\t\t}\n\t}\n\n\tresult := make(map[string]interface{}, numMembers)\n\n\tfor i := 0; i < numMembers; i++ {\n\t\t// map keys are Strings\n\t\tname, n, err := ParseString(buf[pos:])\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tpos = pos + n\n\n\t\t// now for the value\n\t\tswitch buf[pos] >> 4 {\n\t\tcase 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7: // tiny-int\n\t\t\tval, err := ParseTinyInt(buf[pos])\n\t\t\tif err != nil {\n\t\t\t\treturn result, pos, err\n\t\t\t}\n\t\t\tresult[name] = val\n\t\t\tpos++\n\t\tcase 0x8: // tiny-string\n\t\t\tval, n, err := ParseTinyString(buf[pos:])\n\t\t\tif err != nil {\n\t\t\t\treturn result, pos, err\n\t\t\t}\n\t\t\tresult[name] = val\n\t\t\tpos = pos + n\n\t\tcase 0x9: // tiny-array\n\t\t\tval, n, err := ParseArray(buf[pos:])\n\t\t\tif err != nil {\n\t\t\t\treturn result, pos, err\n\t\t\t}\n\t\t\tresult[name] = val\n\t\t\tpos = pos + n\n\t\tcase 0xa: // tiny-map\n\t\t\tvalue, n, err := ParseMap(buf[pos:])\n\t\t\tif err != nil {\n\t\t\t\treturn result, pos, err\n\t\t\t}\n\t\t\tresult[name] = value\n\t\t\tpos = pos + n\n\t\tcase 0xc: // floats, nil, and bools\n\t\t\tnib := int(buf[pos] & 0xf)\n\t\t\tswitch nib {\n\t\t\tcase 0: // packed nil/null\n\t\t\t\tresult[name] = nil\n\t\t\t\tpos++\n\t\t\tcase 1: // packed float\n\t\t\t\tpanic(\"can't do floats yet\")\n\t\t\tcase 2:\n\t\t\t\tresult[name] = false\n\t\t\t\tpos++\n\t\t\tcase 3:\n\t\t\t\tresult[name] = true\n\t\t\t\tpos++\n\t\t\tcase 0x8, 0x9, 0xa, 0xb:\n\t\t\t\tval, n, err := ParseInt(buf[pos:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn result, pos, err\n\t\t\t\t}\n\t\t\t\tresult[name] = val\n\t\t\t\tpos = pos + n\n\t\t\t}\n\t\tcase 0xd:\n\t\t\tnib := int(buf[pos] & 0xf)\n\t\t\tswitch nib {\n\t\t\tcase 0x0, 0x1, 0x2: // string\n\t\t\t\tval, n, err := ParseString(buf[pos:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn result, pos, err\n\t\t\t\t}\n\t\t\t\tresult[name] = val\n\t\t\t\tpos = pos + n\n\t\t\tcase 0x4, 0x5, 0x6: // array\n\t\t\t\tval, n, err := ParseArray(buf[pos:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn result, pos, err\n\t\t\t\t}\n\t\t\t\tresult[name] = val\n\t\t\t\tpos = pos + n\n\t\t\tcase 0x7:\n\t\t\t\tpanic(\"invalid prefix 0xd7\")\n\t\t\tcase 0x8, 0x9, 0xa:\n\t\t\t\t// err\n\t\t\t\tpanic(\"not ready\")\n\t\t\t}\n\n\t\tdefault:\n\t\t\terrMsg := fmt.Sprintf(\"found unsupported encoding type: %#v\\n\", buf[pos])\n\t\t\treturn result, pos, errors.New(errMsg)\n\t\t}\n\t}\n\treturn result, pos, nil\n}", "func (c *Config) parseMap(resMap map[string]map[string]string, v interface{}) error {\n // resolve struct\n typ0 := reflect.TypeOf(v)\n if typ0.Kind() != reflect.Ptr {\n return errors.New(\"cannot map to non-pointor struct\")\n }\n ele0 := reflect.ValueOf(v).Elem()\n typ0 = ele0.Type()\n len0 := typ0.NumField()\n // sections\n for i := 0; i < len0; i++ {\n val1 := ele0.Field(i)\n name1 := typ0.Field(i).Name\n typ1 := val1.Type()\n key1 := typ0.Field(i).Tag.Get(\"conf\")\n if \"\" == key1 {\n key1 = c.CamelToSnake(name1)\n }\n len1 := typ1.NumField()\n // items in one section\n for j := 0; j < len1; j++ {\n val2 := val1.Field(j)\n name2 := typ1.Field(j).Name\n typ2 := typ1.Field(j).Type\n key2 := typ1.Field(j).Tag.Get(\"conf\")\n if \"\" == key2 {\n key2 = c.CamelToSnake(name2)\n }\n if _, ok := resMap[key1][key2]; !ok {\n continue\n }\n valStr := resMap[key1][key2]\n switch typ2.Kind() {\n case reflect.String:\n val2.SetString(valStr)\n case reflect.Bool:\n valBool, err := strconv.ParseBool(valStr)\n if err != nil {\n return err\n }\n val2.SetBool(valBool)\n case reflect.Int8:\n valInt, err := strconv.ParseInt(valStr, 10, 8)\n if err != nil {\n return err\n }\n val2.SetInt(valInt)\n case reflect.Int16:\n valInt, err := strconv.ParseInt(valStr, 10, 16)\n if err != nil {\n return err\n }\n val2.SetInt(valInt)\n case reflect.Int:\n valInt, err := strconv.ParseInt(valStr, 10, 32)\n if err != nil {\n return err\n }\n val2.SetInt(valInt)\n case reflect.Int32:\n valInt, err := strconv.ParseInt(valStr, 10, 32)\n if err != nil {\n return err\n }\n val2.SetInt(valInt)\n case reflect.Int64:\n valInt, err := strconv.ParseInt(valStr, 10, 64)\n if err != nil {\n return err\n }\n val2.SetInt(valInt)\n case reflect.Uint8:\n valUint, err := strconv.ParseUint(valStr, 10, 8)\n if err != nil {\n return err\n }\n val2.SetUint(valUint)\n case reflect.Uint16:\n valUint, err := strconv.ParseUint(valStr, 10, 16)\n if err != nil {\n return err\n }\n val2.SetUint(valUint)\n case reflect.Uint:\n valUint, err := strconv.ParseUint(valStr, 10, 32)\n if err != nil {\n return err\n }\n val2.SetUint(valUint)\n case reflect.Uint32:\n valUint, err := strconv.ParseUint(valStr, 10, 32)\n if err != nil {\n return err\n }\n val2.SetUint(valUint)\n case reflect.Uint64:\n valUint, err := strconv.ParseUint(valStr, 10, 64)\n if err != nil {\n return err\n }\n val2.SetUint(valUint)\n case reflect.Float32:\n valFloat, err := strconv.ParseFloat(valStr, 32)\n if err != nil {\n return err\n }\n val2.SetFloat(valFloat)\n case reflect.Float64:\n valFloat, err := strconv.ParseFloat(valStr, 64)\n if err != nil {\n return err\n }\n val2.SetFloat(valFloat)\n default:\n return errors.New(fmt.Sprintf(\"unsupport kind: %v\", typ2.Kind()))\n }\n }\n\n }\n return nil\n}", "func mapifyStr(content string) map[string]string {\n\tm := make(map[string]string)\n\ta := strings.Split(content, \",\")\n\tfor i := range a {\n\t\ts := strings.Split(a[i], \"=\")\n\t\tm[s[0]] = s[1]\n\t}\n\treturn m\n}", "func parseMap(option string) (*Option, error) {\n\tsplitoption := strings.Fields(option)\n\n\tif len(splitoption) == 0 {\n\t\treturn nil, fmt.Errorf(\"there is an unspecified map option at an unknown line\")\n\t} else if len(splitoption) == 1 || len(splitoption) > 2 {\n\t\treturn nil, fmt.Errorf(\"there is a misconfigured map option: %q.\\nIs it in format <option>:<whitespaces><regex><whitespaces><regex>?\", option)\n\t}\n\n\tfromRe, err := regexp.Compile(\"^\" + splitoption[0] + \"$\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"an error occurred compiling the regex for the from field in the map option: %q\\n%v\", option, err)\n\t}\n\n\t// map options are compared in the matcher\n\treturn &Option{\n\t\tCategory: categoryMap,\n\t\tRegex: map[int]*regexp.Regexp{0: fromRe},\n\t\tValue: splitoption[1],\n\t}, nil\n}", "func ParseMap(t string, typ reflect.Type, indent int) (interface{}, error) {\n\tr := bytes.NewReader([]byte(t))\n\tcsvR := csv.NewReader(r)\n\trecords, err := csvR.ReadAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm := reflect.MakeMap(typ)\n\n\tktyp := typ.Key()\n\tvtyp := typ.Elem()\n\n\tfor _, slc := range records {\n\t\tfor _, s := range slc {\n\t\t\t// TODO - fix this, this is bad and will break if there are any colons inside of a string\n\t\t\tkvslc := strings.Split(s, \":\")\n\t\t\tif len(kvslc) != 2 {\n\t\t\t\treturn nil, fmt.Errorf(\"cfgen: Missing full k/v pair for map, got %d of 2 entries\", len(kvslc))\n\t\t\t}\n\t\t\tk, err := ParseType(kvslc[0], ktyp, indent)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tt, err := ParseType(kvslc[1], vtyp, indent)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tm.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(t))\n\t\t}\n\t}\n\treturn m.Interface(), nil\n}", "func parseToMap(input string) map[string]string {\n\n\tflagMap := make(map[string]string)\n\tdoParse(input, func(flag, key, canonicalKey, value, trimmedValue string) {\n\t\t// We store the value twice, once with dash, once with underscores\n\t\t// Just in case people check with the wrong method\n\t\tflagMap[canonicalKey] = trimmedValue\n\t\tflagMap[key] = trimmedValue\n\t})\n\n\treturn flagMap\n}", "func CollectFromString(raw string, to map[string]UnparsedVariableValue) error {\n\teq := strings.Index(raw, \"=\")\n\tif eq == -1 {\n\t\treturn errors.Errorf(\"tfvar: bad var string '%s'\", raw)\n\t}\n\n\tname := raw[:eq]\n\trawVal := raw[eq+1:]\n\n\tto[name] = unparsedVariableValueString{\n\t\tstr: rawVal,\n\t\tname: name,\n\t}\n\n\treturn nil\n}", "func stringToMap(str string) (map[string]interface{}, error) {\n\tvar jsonDataMap map[string]interface{}\n\td := json.NewDecoder(strings.NewReader(str))\n\td.UseNumber()\n\terr := d.Decode(&jsonDataMap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn jsonDataMap, nil\n}", "func StringToMap(labels string) map[string]string {\n\tl := map[string]string{}\n\tslice := strings.Split(labels, labelSeparator)\n\tif len(slice) != 2 {\n\t\treturn nil\n\t}\n\n\tl[slice[0]] = slice[1]\n\treturn l\n}", "func parseMap(m map[string]interface{}) (map[string]interface{}, error) {\n\n\tresult := make(map[string]interface{})\n\n\tfor k, v := range m {\n\t\tswitch vt := v.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tnestedMap, err := parseMap(vt)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresult[k] = nestedMap\n\t\tcase []interface{}:\n\t\t\tnestedList, err := parseListOfMaps(vt)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresult[k] = nestedList\n\t\tcase float64:\n\t\t\ttestInt, err := strconv.ParseInt((fmt.Sprintf(\"%v\", vt)), 10, 0)\n\t\t\tif err == nil {\n\t\t\t\tresult[k] = int(testInt)\n\t\t\t} else {\n\t\t\t\tresult[k] = vt\n\t\t\t}\n\t\tdefault:\n\t\t\tresult[k] = vt\n\t\t}\n\n\t}\n\n\treturn result, nil\n}", "func FromLogMsgToStringMap(doc *log.LogMessage, m map[string]string) {\n\n\tmsg := reflect.Indirect(reflect.ValueOf(*doc))\n\n\tfor i := 0; i < msg.NumField(); i++ {\n\t\tcurrName := msg.Type().Field(i).Name\n\t\tcurrValue := msg.Field(i)\n\t\tswitch currValue.Kind() {\n\t\tcase reflect.String:\n\t\t\tm[currName] = currValue.Interface().(string)\n\t\tcase reflect.Int32:\n\t\t\tcurrInt := currValue.Interface().(int32)\n\t\t\tif currName == common.KEY_TS {\n\t\t\t\tcurrTs := time.Unix(int64(currInt), 0)\n\t\t\t\tm[currName] = currTs.Format(time.RFC3339)\n\t\t\t} else {\n\t\t\t\tm[currName] = strconv.FormatInt(int64(currInt), 10)\n\t\t\t}\n\t\t}\n\t\t// else skip unknown and unexpected format\n\t}\n}", "func deserialiseEnvMap(s string) map[string]string {\n\tresult := map[string]string{}\n\n\tfor _, item := range strings.Split(s, \",\") {\n\t\tentry := strings.Split(item, \":\")\n\t\tif len(entry) >= 2 {\n\t\t\tresult[entry[0]] = entry[1]\n\t\t}\n\t}\n\n\treturn result\n}", "func NewStringValueMapFromString(line string) *StringValueMap {\n\tresult := NewEmptyStringValueMap()\n\tif line == \"\" {\n\t\treturn result\n\t}\n\n\t// Todo: User tokenizer / decoder\n\ttokens := strings.Split(line, \";\")\n\n\tfor index := 0; index < len(tokens); index++ {\n\t\ttoken := tokens[index]\n\t\tif len(token) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tpos := strings.Index(token, \"=\")\n\n\t\tvar key string\n\t\tif pos > 0 {\n\t\t\tkey = token[0:pos]\n\t\t\tkey = strings.TrimSpace(key)\n\t\t} else {\n\t\t\tkey = strings.TrimSpace(token)\n\t\t}\n\n\t\tvar value string\n\t\tif pos > 0 {\n\t\t\tvalue = token[pos+1:]\n\t\t\tvalue = strings.TrimSpace(value)\n\t\t} else {\n\t\t\tvalue = \"\"\n\t\t}\n\n\t\tresult.Put(key, value)\n\t}\n\n\treturn result\n}", "func parseTag(stag StructTag) map[string]string {\n\ttag := string(stag)\n\tm := make(map[string]string)\n\tfor tag != \"\" {\n\t\ti := 0\n\t\tfor i < len(tag) && tag[i] == ' ' {\n\t\t\ti += 1\n\t\t}\n\t\ttag = tag[i:]\n\t\tif tag == \"\" {\n\t\t\tbreak\n\t\t}\n\n\t\ti = 0\n\t\tfor i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '\"' && tag[i] != 0x7f {\n\t\t\ti++\n\t\t}\n\t\tif i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '\"' {\n\t\t\tbreak\n\t\t}\n\t\tname := string(tag[:i])\n\t\ttag = tag[i+1:]\n\n\t\ti = 1\n\t\tfor i < len(tag) && tag[i] != '\"' {\n\t\t\tif tag[i] == '\\\\' {\n\t\t\t\ti++\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t\tif i >= len(tag) {\n\t\t\tbreak\n\t\t}\n\t\tqvalue := string(tag[:i+1])\n\t\ttag = tag[i+1:]\n\n\t\tvalue, err := strconv.Unquote(qvalue)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t\tm[name] = value\n\t\t}\n\t\treturn m\n\t}\n\treturn m\n}", "func ParseMap(pid string, entries EntryMap) {\n\tmaps, err := os.Open(\"/proc/\" + pid + \"/maps\")\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\tdefer maps.Close()\n\tbuff := bufio.NewReader(maps)\n\tline, _, err := buff.ReadLine()\n\t// For each line\n\tfor err == nil {\n\t\tparseRow(line, entries)\n\t\tline, _, err = buff.ReadLine()\n\t}\n\tif err != nil && err != io.EOF {\n\t\tlog.Fatal(err)\n\t}\n}", "func ToMapStringInterface(r io.Reader) (map[string]interface{}, error) {\n\tresults := make(map[string]interface{})\n\n\tdecoder := json.NewDecoder(r)\n\tdecoder.UseNumber()\n\tdecodeErr := decoder.Decode(&results)\n\n\tswitch {\n\tcase decodeErr == io.EOF:\n\t\tfmt.Println(\"request has no body, decoding skipped returning nil\")\n\t\treturn nil, nil\n\tcase decodeErr != nil:\n\t\treturn nil, fmt.Errorf(\"Failed to decode reader, error %s\", decodeErr.Error())\n\t}\n\n\treturn results, nil\n}", "func (parser *Parser) parseMap(input string, targetType reflect.Type) (interface{}, error) {\n\tmatrix, err := parseHTMLTable(input)\n\tif err != nil {\n\t\treturn nil, toErrorf(\"'%v' is not a valid specification for '%v'\", input, targetType)\n\t}\n\tlength := len(matrix)\n\treturnValue := reflect.MakeMapWithSize(targetType, length)\n\tfor _, row := range matrix {\n\t\tif len(row) != 2 {\n\t\t\treturn nil, toErrorf(\"row '%v' in hash '%v' does not have two cells\", row, targetType)\n\t\t}\n\t\tvar key, value interface{}\n\t\tvar err error\n\t\tif key, err = parser.Parse(row[0], targetType.Key()); err != nil {\n\t\t\treturn nil, toErrorf(\"Could not parse key '%v' in hash '%v'\", row[0], targetType)\n\t\t}\n\t\tif value, err = parser.Parse(row[1], targetType.Elem()); err != nil {\n\t\t\treturn nil, toErrorf(\"Could not parse value '%v' in hash '%v'\", row[1], targetType)\n\t\t}\n\t\treturnValue.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(value))\n\t}\n\treturn returnValue.Interface(), nil\n}", "func (d *Char) FromMap(name string, options map[string]string) error {\n\td.KeyName = name\n\td.Path = options[\"path\"]\n\td.Source = options[\"source\"]\n\n\treturn nil\n}", "func parseInfo(s string) map[string]string {\n\tr := map[string]string{}\n\t// s is a string with lines, with 'key:somevalue\\n' lines. And comments.\n\tfor _, line := range strings.Split(s, \"\\r\\n\") {\n\t\tfields := strings.SplitN(line, \":\", 2)\n\t\tif len(fields) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tr[fields[0]] = fields[1]\n\t}\n\treturn r\n}", "func messageFromMap(input *dynamic.Message, data *map[string]interface{}) error {\n\tstrData, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = jsonpb.UnmarshalString(string(strData), input)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func ParseFeatureMap(str string) (FeatureMap, error) {\n\t// Create the feature map we will be returning.\n\tres := make(FeatureMap, len(defaultFeatureMap))\n\t// Set all features to their default status.\n\tfor feature, status := range defaultFeatureMap {\n\t\tres[feature] = status\n\t}\n\t// Split the provided string by \",\" in order to obtain all the \"key=value\" pairs.\n\tkvs := strings.Split(str, \",\")\n\t// Iterate over all the \"key=value\" pairs and set the status of the corresponding feature in the feature map.\n\tfor _, kv := range kvs {\n\t\t// Skip \"empty\" key/value pairs.\n\t\tif len(kv) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\t// Split the key/value pair by \"=\".\n\t\tp := strings.Split(kv, \"=\")\n\t\tif len(p) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"invalid key/value pair: %q\", kv)\n\t\t}\n\t\t// Grab the key and its value.\n\t\tk, v := p[0], p[1]\n\t\t// Make sure the feature corresponding to the key exists.\n\t\tif _, exists := defaultFeatureMap[Feature(k)]; !exists {\n\t\t\treturn nil, fmt.Errorf(\"invalid feature key: %q\", k)\n\t\t}\n\t\t// Attempt to parse the value as a boolean.\n\t\tb, err := strconv.ParseBool(v)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse %q as a boolean value\", v)\n\t\t}\n\t\t// Set the feature's status in the feature map.\n\t\tres[Feature(k)] = b\n\t}\n\t// Return the feature map.\n\treturn res, nil\n}", "func ParseGameString(c *Connection, gameString string) Map {\n\ttokens := strings.Split(gameString, \" \")\n\tnumPlayers, _ := strconv.Atoi(tokens[0])\n\ttokens = tokens[1:]\n\n\tgameMap := Map{\n\t\tMyID: c.PlayerTag,\n\t\tWidth: c.width,\n\t\tHeight: c.height,\n\t\tPlanets: nil,\n\t\tPlayers: make([]Player, numPlayers),\n\t\tShips: make(map[int]Ship),\n\t\tEntities: make([]Entitier, 0),\n\t}\n\n\tfor i := 0; i < numPlayers; i++ {\n\t\tplayer, tokensnew := ParsePlayer(tokens)\n\t\ttokens = tokensnew\n\t\tgameMap.Players[player.ID] = player\n\t\tfor j := 0; j < len(player.Ships); j++ {\n\t\t\tship := player.Ships[j]\n\t\t\tgameMap.Entities = append(gameMap.Entities, ship.Entity)\n\t\t\tgameMap.Ships[ship.id] = ship\n\t\t}\n\t}\n\n\tnumPlanets, _ := strconv.Atoi(tokens[0])\n\tgameMap.Planets = make([]Planet, 0, numPlanets)\n\ttokens = tokens[1:]\n\n\tfor i := 0; i < numPlanets; i++ {\n\t\tplanet, tokensnew := ParsePlanet(tokens)\n\t\ttokens = tokensnew\n\t\tgameMap.Planets = append(gameMap.Planets, planet)\n\t\tgameMap.Entities = append(gameMap.Entities, planet.Entity)\n\t}\n\n\treturn gameMap\n}", "func runStringToStringMap() {\n\tvar string2stringMap = make(String2StringMap) // No capacity set. Let system handle it\n\tstring2stringMap[\"USA\"] = \"Chicago\"\n\tstring2stringMap[\"USA\"] = \"Washington DC\"\n\tstring2stringMap[\"China\"] = \"Beijing\"\n\tfmt.Printf(\"Map says %s \\r\\n\", string2stringMap[\"USA\"])\n\n}", "func Parse(r io.Reader) (map[string]string, error) {\n\treturn ParseWithLookup(r, nil)\n}", "func populateMap(m map[int]string, nameMap map[string]intBool, file string) {\n\tf, err := os.Open(file)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer f.Close()\n\tbufr := bufio.NewReader(f)\n\tfor {\n\t\tline, err := bufr.ReadString('\\n')\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tparts := strings.SplitN(line, \":\", 4)\n\t\tif len(parts) >= 3 {\n\t\t\tidstr := parts[2]\n\t\t\tid, err := strconv.Atoi(idstr)\n\t\t\tif err == nil {\n\t\t\t\tm[id] = parts[0]\n\t\t\t\tif nameMap != nil {\n\t\t\t\t\tnameMap[parts[0]] = intBool{id, true}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func ResolveMap(t v1.MapTransform, input any) (any, error) {\n\tswitch i := input.(type) {\n\tcase string:\n\t\tp, ok := t.Pairs[i]\n\t\tif !ok {\n\t\t\treturn nil, errors.Errorf(errFmtMapNotFound, i)\n\t\t}\n\t\tvar val interface{}\n\t\tif err := json.Unmarshal(p.Raw, &val); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, errFmtMapInvalidJSON, i)\n\t\t}\n\t\treturn val, nil\n\tdefault:\n\t\treturn nil, errors.Errorf(errFmtMapTypeNotSupported, reflect.TypeOf(input).String())\n\t}\n}", "func (node FbsInvertedMapString) Unmarshal(v interface{}) error {\n\n return node.Node.Unmarshal(v, func(s string, rv reflect.Value) error {\n \n switch InvertedMapString_FieldEnum[s] {\n case InvertedMapString_Key:\n //return node.Key()\n rv.Set(reflect.ValueOf( node.Key() ))\n }\n return nil\n })\n\n}", "func MapStrFromJournalEntry(ev *sdjournal.JournalEntry, cleanKeys bool, convertToNumbers bool, MoveMetadataLocation string) common.MapStr {\n\tm := common.MapStr{}\n\t// for the sake of MoveMetadataLocation we will write all the JournalEntry data except the \"message\" here\n\ttarget := m\n\n\t// convert non-empty MoveMetadataLocation to the nested common.MapStr{} and point target to the deepest one\n\tif MoveMetadataLocation != \"\" {\n\t\tdests := strings.Split(MoveMetadataLocation, \".\")\n\t\tfor _, key := range dests {\n\t\t\ttarget[key] = common.MapStr{}\n\t\t\ttarget = target[key].(common.MapStr)\n\t\t}\n\t}\n\n\t// range over the JournalEntry Fields and convert to the common.MapStr\n\tfor k, v := range ev.Fields {\n\t\tnk := makeNewKey(k, cleanKeys)\n\t\tnv := makeNewValue(v, convertToNumbers)\n\t\t// message Field should be on the top level of the event\n\t\tif nk == \"message\" {\n\t\t\tm[nk] = nv\n\t\t\tcontinue\n\t\t}\n\t\ttarget[nk] = nv\n\t}\n\n\treturn m\n}", "func (controller *Controller) parseObjectFromMap(objectData map[string]interface{}) (*object.Object, error) {\n\terrorMessage := \"unable to parse object\"\n\n\tname, err := controller.parseStringFromMap(objectData, \"name\")\n\tif err != nil {\n\t\treturn nil, errors.New(errorMessage)\n\t}\n\n\tcolor, specularReflection, roughness, transmissionReflection, diffuseReflection, err :=\n\t\tcontroller.parseLightCharacteristicsFromMap(objectData)\n\tif err != nil {\n\t\treturn nil, errors.New(errorMessage)\n\t}\n\n\tnormals, err := controller.parseNormalsFromMap(objectData)\n\tif err != nil {\n\t\treturn nil, errors.New(errorMessage)\n\t}\n\n\trepository, err := controller.parseRepositoryFromMap(objectData)\n\tif err != nil {\n\t\treturn nil, errors.New(errorMessage)\n\t}\n\n\ttriangles, err := controller.parseTrianglesFromMap(objectData)\n\tif err != nil {\n\t\treturn nil, errors.New(errorMessage)\n\t}\n\n\tparsedObject, err := object.Init(name, repository, triangles, normals, color, specularReflection, roughness,\n\t\ttransmissionReflection, diffuseReflection)\n\tif err != nil {\n\t\treturn nil, errors.New(errorMessage)\n\t}\n\treturn parsedObject, nil\n}", "func ParseStr(encodedString string, result map[string]interface{}) error {\n\t// build nested map.\n\tvar build func(map[string]interface{}, []string, interface{}) error\n\n\tbuild = func(result map[string]interface{}, keys []string, value interface{}) error {\n\t\tlength := len(keys)\n\t\t// trim ',\"\n\t\tkey := strings.Trim(keys[0], \"'\\\"\")\n\t\tif length == 1 {\n\t\t\tresult[key] = value\n\t\t\treturn nil\n\t\t}\n\n\t\t// The end is slice. like f[], f[a][]\n\t\tif keys[1] == \"\" && length == 2 {\n\t\t\t// todo nested slice\n\t\t\tif key == \"\" {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tval, ok := result[key]\n\t\t\tif !ok {\n\t\t\t\tresult[key] = []interface{}{value}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tchildren, ok := val.([]interface{})\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"expected type '[]interface{}' for key '%s', but got '%T'\", key, val)\n\t\t\t}\n\t\t\tresult[key] = append(children, value)\n\t\t\treturn nil\n\t\t}\n\n\t\t// The end is slice + map. like f[][a]\n\t\tif keys[1] == \"\" && length > 2 && keys[2] != \"\" {\n\t\t\tval, ok := result[key]\n\t\t\tif !ok {\n\t\t\t\tresult[key] = []interface{}{}\n\t\t\t\tval = result[key]\n\t\t\t}\n\t\t\tchildren, ok := val.([]interface{})\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"expected type '[]interface{}' for key '%s', but got '%T'\", key, val)\n\t\t\t}\n\t\t\tif l := len(children); l > 0 {\n\t\t\t\tif child, ok := children[l-1].(map[string]interface{}); ok {\n\t\t\t\t\tif _, ok := child[keys[2]]; !ok {\n\t\t\t\t\t\t_ = build(child, keys[2:], value)\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tchild := map[string]interface{}{}\n\t\t\t_ = build(child, keys[2:], value)\n\t\t\tresult[key] = append(children, child)\n\n\t\t\treturn nil\n\t\t}\n\n\t\t// map. like f[a], f[a][b]\n\t\tval, ok := result[key]\n\t\tif !ok {\n\t\t\tresult[key] = map[string]interface{}{}\n\t\t\tval = result[key]\n\t\t}\n\t\tchildren, ok := val.(map[string]interface{})\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"expected type 'map[string]interface{}' for key '%s', but got '%T'\", key, val)\n\t\t}\n\n\t\treturn build(children, keys[1:], value)\n\t}\n\n\t// split encodedString.\n\tparts := strings.Split(encodedString, \"&\")\n\tfor _, part := range parts {\n\t\tpos := strings.Index(part, \"=\")\n\t\tif pos <= 0 {\n\t\t\tcontinue\n\t\t}\n\t\tkey, err := url.QueryUnescape(part[:pos])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor key[0] == ' ' {\n\t\t\tkey = key[1:]\n\t\t}\n\t\tif key == \"\" || key[0] == '[' {\n\t\t\tcontinue\n\t\t}\n\t\tvalue, err := url.QueryUnescape(part[pos+1:])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// split into multiple keys\n\t\tvar keys []string\n\t\tleft := 0\n\t\tfor i, k := range key {\n\t\t\tif k == '[' && left == 0 {\n\t\t\t\tleft = i\n\t\t\t} else if k == ']' {\n\t\t\t\tif left > 0 {\n\t\t\t\t\tif len(keys) == 0 {\n\t\t\t\t\t\tkeys = append(keys, key[:left])\n\t\t\t\t\t}\n\t\t\t\t\tkeys = append(keys, key[left+1:i])\n\t\t\t\t\tleft = 0\n\t\t\t\t\tif i+1 < len(key) && key[i+1] != '[' {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(keys) == 0 {\n\t\t\tkeys = append(keys, key)\n\t\t}\n\t\t// first key\n\t\tfirst := \"\"\n\t\tfor i, chr := range keys[0] {\n\t\t\tif chr == ' ' || chr == '.' || chr == '[' {\n\t\t\t\tfirst += \"_\"\n\t\t\t} else {\n\t\t\t\tfirst += string(chr)\n\t\t\t}\n\t\t\tif chr == '[' {\n\t\t\t\tfirst += keys[0][i+1:]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tkeys[0] = first\n\n\t\t// build nested map\n\t\tif err := build(result, keys, value); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func ParseStr(encodedString string, result map[string]interface{}) error {\n\t// build nested map.\n\tvar build func(map[string]interface{}, []string, interface{}) error\n\n\tbuild = func(result map[string]interface{}, keys []string, value interface{}) error {\n\t\tlength := len(keys)\n\t\t// trim ',\"\n\t\tkey := strings.Trim(keys[0], \"'\\\"\")\n\t\tif length == 1 {\n\t\t\tresult[key] = value\n\t\t\treturn nil\n\t\t}\n\n\t\t// The end is slice. like f[], f[a][]\n\t\tif keys[1] == \"\" && length == 2 {\n\t\t\t// todo nested slice\n\t\t\tif key == \"\" {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tval, ok := result[key]\n\t\t\tif !ok {\n\t\t\t\tresult[key] = []interface{}{value}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tchildren, ok := val.([]interface{})\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"expected type '[]interface{}' for key '%s', but got '%T'\", key, val)\n\t\t\t}\n\t\t\tresult[key] = append(children, value)\n\t\t\treturn nil\n\t\t}\n\n\t\t// The end is slice + map. like f[][a]\n\t\tif keys[1] == \"\" && length > 2 && keys[2] != \"\" {\n\t\t\tval, ok := result[key]\n\t\t\tif !ok {\n\t\t\t\tresult[key] = []interface{}{}\n\t\t\t\tval = result[key]\n\t\t\t}\n\t\t\tchildren, ok := val.([]interface{})\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"expected type '[]interface{}' for key '%s', but got '%T'\", key, val)\n\t\t\t}\n\t\t\tif l := len(children); l > 0 {\n\t\t\t\tif child, ok := children[l-1].(map[string]interface{}); ok {\n\t\t\t\t\tif _, ok := child[keys[2]]; !ok {\n\t\t\t\t\t\t_ = build(child, keys[2:], value)\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tchild := map[string]interface{}{}\n\t\t\t_ = build(child, keys[2:], value)\n\t\t\tresult[key] = append(children, child)\n\n\t\t\treturn nil\n\t\t}\n\n\t\t// map. like f[a], f[a][b]\n\t\tval, ok := result[key]\n\t\tif !ok {\n\t\t\tresult[key] = map[string]interface{}{}\n\t\t\tval = result[key]\n\t\t}\n\t\tchildren, ok := val.(map[string]interface{})\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"expected type 'map[string]interface{}' for key '%s', but got '%T'\", key, val)\n\t\t}\n\n\t\treturn build(children, keys[1:], value)\n\t}\n\n\t// split encodedString.\n\tparts := strings.Split(encodedString, \"&\")\n\tfor _, part := range parts {\n\t\tpos := strings.Index(part, \"=\")\n\t\tif pos <= 0 {\n\t\t\tcontinue\n\t\t}\n\t\tkey, err := url.QueryUnescape(part[:pos])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor key[0] == ' ' {\n\t\t\tkey = key[1:]\n\t\t}\n\t\tif key == \"\" || key[0] == '[' {\n\t\t\tcontinue\n\t\t}\n\t\tvalue, err := url.QueryUnescape(part[pos+1:])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// split into multiple keys\n\t\tvar keys []string\n\t\tleft := 0\n\t\tfor i, k := range key {\n\t\t\tif k == '[' && left == 0 {\n\t\t\t\tleft = i\n\t\t\t} else if k == ']' {\n\t\t\t\tif left > 0 {\n\t\t\t\t\tif len(keys) == 0 {\n\t\t\t\t\t\tkeys = append(keys, key[:left])\n\t\t\t\t\t}\n\t\t\t\t\tkeys = append(keys, key[left+1:i])\n\t\t\t\t\tleft = 0\n\t\t\t\t\tif i+1 < len(key) && key[i+1] != '[' {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(keys) == 0 {\n\t\t\tkeys = append(keys, key)\n\t\t}\n\t\t// first key\n\t\tfirst := \"\"\n\t\tfor i, chr := range keys[0] {\n\t\t\tif chr == ' ' || chr == '.' || chr == '[' {\n\t\t\t\tfirst += \"_\"\n\t\t\t} else {\n\t\t\t\tfirst += string(chr)\n\t\t\t}\n\t\t\tif chr == '[' {\n\t\t\t\tfirst += keys[0][i+1:]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tkeys[0] = first\n\n\t\t// build nested map\n\t\tif err := build(result, keys, value); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func getDataMap(s string) map[string]string {\n\tm := make(map[string]string)\n\tvar key, data []rune\n\tvar pp rune\n\tisKey := true\n\n\tfor _, p := range s {\n\t\tif isKey {\n\t\t\tif p == '=' {\n\t\t\t\tisKey = false\n\t\t\t} else {\n\t\t\t\tkey = append(key, p)\n\t\t\t}\n\t\t} else {\n\t\t\tif p == '\\\\' && pp == '\\\\' {\n\t\t\t\t// Do nothing here, because '\\\\' should be tranformed in '\\'.\n\t\t\t\t// Change the p rune to any character, so that pp won't hold '\\'.\n\t\t\t\t// This is important for the '&'...\n\t\t\t\tp = ' '\n\t\t\t} else if p == '&' {\n\t\t\t\t// Skip escaped '&' characters\n\t\t\t\tif pp == '\\\\' {\n\t\t\t\t\t// Remove the last '\\' and replace it by the '&' character\n\t\t\t\t\tdata[len(data)-1] = '&'\n\t\t\t\t} else {\n\t\t\t\t\t// Return an emtpy map if the key is empty\n\t\t\t\t\tif len(key) == 0 {\n\t\t\t\t\t\treturn make(map[string]string)\n\t\t\t\t\t}\n\n\t\t\t\t\tm[string(key)] = string(data)\n\t\t\t\t\tkey = key[:0]\n\t\t\t\t\tdata = data[:0]\n\t\t\t\t\tisKey = true\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tdata = append(data, p)\n\t\t\t}\n\t\t}\n\n\t\t// Save the current part to the previous part rune\n\t\tpp = p\n\t}\n\n\treturn m\n}", "func (em envmap) parse(oe []string) error {\n\tfor _, v := range oe {\n\t\tkv := strings.SplitN(v, \"=\", 2)\n\t\tif len(kv) < 2 {\n\t\t\treturn fmt.Errorf(\"expected format key=value in '%s'\", v)\n\t\t}\n\t\tem[kv[0]] = kv[1]\n\t}\n\treturn nil\n}", "func mapFromQueryString(queryString string) map[string]string {\n\tm := make(map[string]string)\n\tparams := strings.Split(queryString, \"&\")\n\n\tfor _, param := range params {\n\t\tsplitParam := strings.Split(param, \"=\")\n\t\tkey := splitParam[0]\n\t\tval := splitParam[1]\n\n\t\tm[key] = val\n\t}\n\treturn m\n}", "func ReflectSetStructFieldsFromStringMap(structPtr interface{}, m map[string]string, errOnMissingField bool) error {\n\tv := reflect.ValueOf(structPtr)\n\tif v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {\n\t\treturn fmt.Errorf(\"structPtr must be pointer to a struct, but is %T\", structPtr)\n\t}\n\tv = v.Elem()\n\n\tfor name, value := range m {\n\t\tif f := v.FieldByName(name); f.IsValid() {\n\t\t\tif f.Kind() == reflect.String {\n\t\t\t\tf.SetString(value)\n\t\t\t} else {\n\t\t\t\t_, err := fmt.Sscan(value, f.Addr().Interface())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t} else if errOnMissingField {\n\t\t\treturn fmt.Errorf(\"%T has no struct field '%s'\", v.Interface(), name)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (element *String) UnmarshalMap(data map[string]any) error {\n\n\tvar err error\n\n\tif convert.String(data[\"type\"]) != \"string\" {\n\t\treturn derp.NewInternalError(\"schema.String.UnmarshalMap\", \"Data is not type 'string'\", data)\n\t}\n\n\telement.Default = convert.String(data[\"default\"])\n\telement.MinLength = convert.Int(data[\"minLength\"])\n\telement.MaxLength = convert.Int(data[\"maxLength\"])\n\telement.Pattern = convert.String(data[\"pattern\"])\n\telement.Format = convert.String(data[\"format\"])\n\telement.Enum = convert.SliceOfString(data[\"enum\"])\n\telement.Required = convert.Bool(data[\"required\"])\n\telement.RequiredIf = convert.String(data[\"required-if\"])\n\n\treturn err\n}", "func mapArgs(rawArgs string) (map[string]string, error) {\n\targMap := make(map[string]string)\n\n\t// split params: param0:<param-val0> paramN:<param-valN> badparam\n\tparams, err := commandSplit(rawArgs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// for each, split pram:<pram-value> into {param, <param-val>}\n\tfor _, param := range params {\n\t\tcmdName, cmdStr, err := namedParamSplit(param)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"map args: %s\", err)\n\t\t}\n\t\targMap[cmdName] = cmdStr\n\t}\n\n\treturn argMap, nil\n}", "func NewStrMapFrom(orig map[string]interface{}) StrMap {\n\treturn &strMap{s: orig}\n}", "func (is *infosec) parse(page *Page) {\n\tis.Map.parse(page)\n}", "func StructFromMapType() dgo.MapType {\n\tif sfmType == nil {\n\t\tsfmType = Parse(`map[string](dgo|type|{type:dgo|type,required?:bool,...})`).(dgo.MapType)\n\t}\n\treturn sfmType\n}", "func createTopologyMap(topologyString string) map[string][]string {\n\ttopologyMap := make(map[string][]string)\n\tfor _, t := range strings.Split(topologyString, \",\") {\n\t\tt = strings.TrimSpace(t)\n\t\ttopology := strings.Split(t, \":\")\n\t\tif len(topology) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\ttopologyMap[topology[0]] = append(topologyMap[topology[0]], topology[1])\n\t}\n\treturn topologyMap\n}", "func parseMap(aMap map[string]interface{}) {\n\tfor key, val := range aMap {\n\t\tswitch concreteVal := val.(type) {\n\t\tcase []interface{}:\n\t\t\t//fmt.Println(key)\n\t\t\tif key == \"scopes\" {\n\t\t\t\tscope = append(scope, \"!INCLUDE\")\n\t\t\t\tscope = append(scope, fmt.Sprint(concreteVal))\n\t\t\t\tparseArray(val.([]interface{}))\n\t\t\t} else if key == \"out_of_scope\" {\n\t\t\t\tscope = append(scope, \"!EXCLUDE\")\n\t\t\t\tscope = append(scope, fmt.Sprint(concreteVal))\n\t\t\t\tparseArray(val.([]interface{}))\n\t\t\t}\n\t\tdefault:\n\t\t\t// fmt.Println(key, \":\", concreteVal)\n\t\t\tif key == \"content\" {\n\t\t\t\tscope = append(scope, fmt.Sprint(concreteVal))\n\t\t\t}\n\t\t}\n\t}\n}", "func mapper(key string) (string, string, string, string) {\n\tvals := strings.Split(key, \":\")\n\treturn vals[0], vals[1], vals[2], vals[3]\n}", "func ParseMapWS(line string) map[string]string {\n\tif line == \"\" {\n\t\treturn nil\n\t}\n\n\tvar (\n\t\tescp bool\n\t\tquot rune\n\t\tckey string\n\t\tkeyb = &bytes.Buffer{}\n\t\tvalb = &bytes.Buffer{}\n\t\tword = keyb\n\t\tdata = map[string]string{}\n\t)\n\n\tfor i, c := range line {\n\t\t// Check to see if the character is a quote character.\n\t\tswitch c {\n\t\tcase '\\\\':\n\t\t\t// If not already escaped then activate escape.\n\t\t\tif !escp {\n\t\t\t\tescp = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase '\\'', '\"':\n\t\t\t// If the quote or double quote is the first char or\n\t\t\t// an unescaped char then determine if this is the\n\t\t\t// beginning of a quote or end of one.\n\t\t\tif i == 0 || !escp {\n\t\t\t\tif quot == c {\n\t\t\t\t\tquot = 0\n\t\t\t\t} else {\n\t\t\t\t\tquot = c\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase '=':\n\t\t\t// If the word buffer is currently the key buffer,\n\t\t\t// quoting is not enabled, and the preceeding character\n\t\t\t// is not the escape character then the equal sign indicates\n\t\t\t// a transition from key to value.\n\t\t\tif word == keyb && quot == 0 && !escp {\n\t\t\t\tckey = keyb.String()\n\t\t\t\tkeyb.Reset()\n\t\t\t\tword = valb\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase ' ', '\\t':\n\t\t\t// If quoting is not enabled and the preceeding character is\n\t\t\t// not the escape character then record the value into the\n\t\t\t// map and fast-forward the cursor to the next, non-whitespace\n\t\t\t// character.\n\t\t\tif quot == 0 && !escp {\n\t\t\t\t// Record the value into the map for the current key.\n\t\t\t\tif ckey != \"\" {\n\t\t\t\t\tdata[ckey] = valb.String()\n\t\t\t\t\tvalb.Reset()\n\t\t\t\t\tword = keyb\n\t\t\t\t\tckey = \"\"\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif escp {\n\t\t\tescp = false\n\t\t}\n\t\tword.WriteRune(c)\n\t}\n\n\t// If the current key string is not empty then record it with the value\n\t// buffer's string value as a new pair.\n\tif ckey != \"\" {\n\t\tdata[ckey] = valb.String()\n\t}\n\n\treturn data\n}", "func ParseMap(line string) map[string]string {\n\tline = strings.TrimSpace(line)\n\tif line == \"\" {\n\t\treturn nil\n\t}\n\n\tr := csv.NewReader(strings.NewReader(line))\n\tr.TrimLeadingSpace = true\n\n\trecord, err := r.Read()\n\tif err == io.EOF {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdata := map[string]string{}\n\tfor i := range record {\n\t\tp := strings.SplitN(record[i], \"=\", 2)\n\t\tif len(p) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tk := p[0]\n\t\tvar v string\n\t\tif len(p) > 1 {\n\t\t\tv = p[1]\n\t\t}\n\t\tdata[k] = v\n\t}\n\n\treturn data\n}", "func parseKeyValueStringsToMap(values []string) (map[string]string, []string) {\n\tparsedLines := make(map[string]string)\n\tvar unparsableLines []string\n\tfor _, kv := range values {\n\t\tsplit := strings.SplitN(kv, \"=\", 2)\n\t\tkey := strings.TrimSpace(split[0])\n\t\tvalue := \"\"\n\t\tif len(split) == 2 {\n\t\t\tvalue = strings.TrimSpace(split[1])\n\t\t}\n\n\t\terr := validation.ValidateEnvarName(key)\n\t\tif err != nil {\n\t\t\tunparsableLines = append(unparsableLines, kv)\n\t\t} else {\n\t\t\tparsedLines[key] = value\n\t\t}\n\t}\n\n\treturn parsedLines, unparsableLines\n}", "func (o *Input) FromMap(values map[string]interface{}) error {\n\to.Message = values[\"message\"]\n\treturn nil\n}", "func MapToStruct(m map[string]interface{}, struc interface{}) error {\n\t//fmt.Printf(\"Input map: %+v\\n\", m)\n\t//fmt.Printf(\"Input struc: %+v\\n\", struc)\n\tval := r.Indirect(r.ValueOf(struc))\n\tsinfo := getStructInfo(val)\n\t//fmt.Printf(\"sinfo: %+v\\n\", sinfo)\n\tfor k, v := range m {\n\t\t//fmt.Printf(\"k: %+v v: %+v\\n\", k, v)\n\t\tif info, ok := sinfo.FieldsMap[k]; ok {\n\t\t\t//fmt.Printf(\"info: %+v\\n\", info)\n\t\t\tstructField := val.Field(info.Num)\n\t\t\t//fmt.Printf(\"type struct: %q, %q, %q\\n\", structField.Type(), structField.Type().Name(), structField.Kind())\n\t\t\t//fmt.Printf(\"type value: %q\\n\", r.TypeOf(v).Name())\n\t\t\t//fmt.Printf(\"value: %+v\\n\", r.ValueOf(v))\n\t\t\tif structField.Kind().String() == \"slice\" && r.TypeOf(v).Kind().String() == \"slice\" {\n\t\t\t\tif structField.Type().Elem() == r.TypeOf(v).Elem() {\n\t\t\t\t\t//fmt.Print(\"Slices of same type\\n\")\n\t\t\t\t\tstructField.Set(r.ValueOf(v))\n\t\t\t\t} else if structField.Type().Elem().Kind().String() == r.TypeOf(v).Elem().Kind().String() {\n\t\t\t\t\t//fmt.Print(\"Slices of same kind\\n\")\n\t\t\t\t\ts := r.ValueOf(v)\n\t\t\t\t\tresult := r.MakeSlice(structField.Type(), 0, s.Len())\n\t\t\t\t\tfor j := 0; j < s.Len(); j++ {\n\t\t\t\t\t\tresult = r.Append(result, r.ValueOf(s.Index(j).Interface()).Convert(structField.Type().Elem()))\n\t\t\t\t\t}\n\t\t\t\t\tstructField.Set(result)\n\t\t\t\t} else if r.TypeOf(v).Elem().String() == \"string\" {\n\t\t\t\t\t//fmt.Print(\"Slices of different kind\\n\")\n\t\t\t\t\tstringList := v.([]string)\n\t\t\t\t\tresult := r.MakeSlice(structField.Type(), 0, len(stringList))\n\t\t\t\t\tfor _, str := range stringList {\n\t\t\t\t\t\ttmp := r.New(structField.Type().Elem())\n\t\t\t\t\t\terr := json.Unmarshal([]byte(str), tmp.Interface())\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t//fmt.Printf(\"Unmarshal failed on: %q due to: %q!!!\\n\", str, err)\n\t\t\t\t\t\t\t//return err\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tresult = r.Append(result, r.Indirect(tmp))\n\t\t\t\t\t}\n\t\t\t\t\tstructField.Set(result)\n\t\t\t\t}\n\t\t\t} else if structField.Type().Name() == \"\" || r.TypeOf(v).Name() == \"\" {\n\t\t\t\treturn fmt.Errorf(\"WTF are these types???!!! %q %q\\n\", structField.Kind().String(), r.TypeOf(v).Kind().String())\n\t\t\t} else if structField.Type().Name() == r.TypeOf(v).Name() {\n\t\t\t\t//fmt.Print(\"Field set naturally!!!\\n\")\n\t\t\t\tstructField.Set(r.ValueOf(v))\n\t\t\t} else if structField.Kind().String() == r.TypeOf(v).Name() {\n\t\t\t\t//fmt.Print(\"Field set with convert !!!\\n\")\n\t\t\t\tstructField.Set(r.ValueOf(v).Convert(structField.Type()))\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"Please handle these types: %s with %s\\n\", structField.Kind().String(), r.TypeOf(v).Kind().String())\n\t\t\t}\n\t\t} else {\n\t\t\t//fmt.Printf(\"field %q not found\\n\", k) TODO: in which situation do we reach this point? oO\n\t\t}\n\t\t//fmt.Printf(\"Check fill struc: %+v\\n\", struc)\n\t}\n\treturn nil\n}", "func (e *Extractor) FieldValueFromTagMap(tag string) (out map[string]interface{}, err error) {\n\n\tif err := e.isValidStruct(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tout = make(map[string]interface{})\n\ts := reflect.ValueOf(e.StructAddr).Elem()\n\tfields := e.fields(s)\n\n\tfor _, field := range fields {\n\t\tif val, ok := field.tags.Lookup(tag); ok {\n\t\t\tkey, omit := e.parseOmitempty(val, field.value)\n\t\t\tif omit {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tout[key] = field.value.Interface()\n\t\t}\n\n\t}\n\n\treturn\n}", "func (s *SimpleStruct) UnmarshalMap(m map[string]interface{}) error {\n\n\tif v, ok := m[\"SimpleField\"].(string); ok {\n\t\ts.SimpleField = v\n\n\t} else if v, exists := m[\"SimpleField\"]; exists && v != nil {\n\t\treturn fmt.Errorf(\"expected field SimpleField to be string but got %T\", m[\"SimpleField\"])\n\t}\n\n\tif v, ok := m[\"field2\"].(string); ok {\n\t\ts.SimpleJSONTagged = v\n\n\t} else if v, exists := m[\"field2\"]; exists && v != nil {\n\t\treturn fmt.Errorf(\"expected field field2 to be string but got %T\", m[\"field2\"])\n\t}\n\n\tif v, ok := m[\"field3\"].(string); ok {\n\t\ts.SimpleJSONTaggedOmitted = v\n\n\t} else if v, exists := m[\"field3\"]; exists && v != nil {\n\t\treturn fmt.Errorf(\"expected field field3 to be string but got %T\", m[\"field3\"])\n\t}\n\n\tif v, ok := m[\"SimpleOmitEmptyNoName\"].(string); ok {\n\t\ts.SimpleOmitEmptyNoName = v\n\n\t} else if v, exists := m[\"SimpleOmitEmptyNoName\"]; exists && v != nil {\n\t\treturn fmt.Errorf(\"expected field SimpleOmitEmptyNoName to be string but got %T\", m[\"SimpleOmitEmptyNoName\"])\n\t}\n\n\t// Pointer SimplePointer\n\tif p, ok := m[\"pointer\"]; ok {\n\n\t\tif m, ok := p.(string); ok {\n\t\t\ts.SimplePointer = &m\n\n\t\t} else if p == nil {\n\t\t\ts.SimplePointer = nil\n\t\t}\n\n\t}\n\n\tif v, ok := m[\"integer\"].(int); ok {\n\t\ts.SimpleInteger = v\n\n\t} else if p, ok := m[\"integer\"].(float64); ok {\n\t\tv := int(p)\n\t\ts.SimpleInteger = v\n\n\t} else if v, exists := m[\"integer\"]; exists && v != nil {\n\t\treturn fmt.Errorf(\"expected field integer to be int but got %T\", m[\"integer\"])\n\t}\n\n\t// Pointer SimpleIntegerPtr\n\tif p, ok := m[\"integer_ptr\"]; ok {\n\n\t\tif m, ok := p.(int); ok {\n\t\t\ts.SimpleIntegerPtr = &m\n\n\t\t} else if m, ok := p.(float64); ok {\n\t\t\tv := int(m)\n\t\t\ts.SimpleIntegerPtr = &v\n\n\t\t} else if p == nil {\n\t\t\ts.SimpleIntegerPtr = nil\n\t\t}\n\n\t}\n\n\treturn nil\n}", "func Parse(content string) *Map {\n\temap := NewMap()\n\n\tlines := strings.Split(content, \"\\n\")\n\tfor _, line := range lines {\n\t\tline = strings.Trim(line, \" \")\n\n\t\tif !strings.HasPrefix(line, \"#\") && line != \"\" {\n\t\t\tkey, val := parseLine(line)\n\n\t\t\temap.Set(key, val)\n\t\t}\n\t}\n\n\treturn emap\n}", "func (s *Structure) StringMap(isMaster bool, cmd string, params ...interface{}) (reply map[string]string, err error) {\n\tconn := s.getConn(isMaster)\n\tif conn == nil {\n\t\treturn nil, configNotExistsOrLoad(s.InstanceName, isMaster)\n\t}\n\n\treply, err = redis.StringMap(conn.Do(cmd, params...))\n\tconn.Close()\n\n\treturn reply, err\n}", "func Map2Protocol(m map[string]string) Protocol {\n\tt, err := time.Parse(dateFormat, m[\"Time\"])\n\tif err != nil {\n\t\tfmt.Printf(\"err by parse date: %v\", err)\n\t}\n\tid, _ := strconv.Atoi(m[\"HeroID\"])\n\treturn Protocol{\n\t\tAction: m[\"Action\"],\n\t\tHeroID: int64(id),\n\t\tNote: m[\"Note\"],\n\t\tTime: t,\n\t}\n}", "func MapString(m map[string]interface{}, key string) string {\n\tb, _ := m[key].(string)\n\treturn b\n}", "func ConvertSliceToMap(inputs []string, kind string) (map[string]string, error) {\n\tresult := make(map[string]string)\n\tfor _, input := range inputs {\n\t\tc := strings.Index(input, \":\")\n\t\tswitch {\n\t\tcase c == 0:\n\t\t\t// key is not passed\n\t\t\treturn nil, fmt.Errorf(\"invalid %s: '%s' (%s)\", kind, input, \"need k:v pair where v may be quoted\")\n\t\tcase c < 0:\n\t\t\t// only key passed\n\t\t\tresult[input] = \"\"\n\t\tdefault:\n\t\t\t// both key and value passed\n\t\t\tkey := input[:c]\n\t\t\tvalue := trimQuotes(input[c+1:])\n\t\t\tresult[key] = value\n\t\t}\n\t}\n\treturn result, nil\n}", "func stringFromMap(m map[string]interface{}, key string) string {\n\tfor k, v := range m {\n\t\tif k == key {\n\t\t\treturn v.(string)\n\t\t}\n\t}\n\treturn \"\"\n}", "func makeMap(yamlStruct T) map[string]string {\n\turlMap := make(map[string]string)\n\tfor _, s := range yamlStruct {\n\t\turlMap[s.P] = s.U\n\t}\n\treturn urlMap\n}", "func (status *Status) FromStringStringMap(input map[string]string) error {\n\tfor k, v := range input {\n\t\tstatus.parseProperty(k, v)\n\t}\n\treturn nil\n}", "func runStringToIntMap() {\n\tvar string2IntMap = make(String2IntMap, 5)\n\tstring2IntMap[\"USA\"] = 100\n\tstring2IntMap[\"China\"] = 200\n\tfmt.Printf(\"Map says %d \\r\\n\", len(string2IntMap))\n\tdelete(string2IntMap, \"USA\")\n\tfmt.Printf(\"Map says %d \\r\\n\", len(string2IntMap))\n\tfmt.Printf(\"Map says %d \\r\\n\", string2IntMap[\"USA\"])\n\tfmt.Printf(\"Map says %d \\r\\n\", len(string2IntMap))\n}", "func FromMap(input map[string]interface{}) interface{} {\n\tvar (\n\t\ttypeIfc interface{}\n\t\tok bool\n\t)\n\n\tif typeIfc, ok = input[\"type\"]; ok {\n\t\tswitch typeIfc.(string) {\n\t\tcase FEATURE:\n\t\t\treturn FeatureFromMap(input)\n\t\tcase FEATURECOLLECTION:\n\t\t\treturn FeatureCollectionFromMap(input)\n\t\tdefault:\n\t\t\treturn newGeometry(input)\n\t\t}\n\n\t}\n\treturn nil\n}", "func ValString(k string, p map[string]string) (v string) {\n\n\tv, _ = p[k]\n\treturn\n}", "func (m *Match) TryParseGameMap(msg string) {\n\tif match := mapLoaded.FindStringSubmatch(msg); len(match) > 0 {\n\t\tm._map = match[1]\n\t}\n}", "func parseKeyValString(keyValStr, delim1, delim2 string) (map[string]string, error) {\n\tm := make(map[string]string)\n\tif delim1 == delim2 {\n\t\treturn m, errors.New(\"delimiters can't be equal\")\n\t}\n\tpairs := strings.SplitN(keyValStr, delim1, -1)\n\tfor _, pair := range pairs {\n\t\tparts := strings.SplitN(pair, delim2, 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn m, errors.New(\"failed to parse into two parts\")\n\t\t}\n\t\tkey, val := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1])\n\t\tif key == \"\" || val == \"\" {\n\t\t\treturn m, errors.New(\"key or value is empty\")\n\t\t}\n\t\tm[key] = val\n\t}\n\treturn m, nil\n}", "func splitMapOverwrite(str string) (string, string, error) {\n\tif split := strings.SplitN(str, \"=\", 2); len(split) > 1 {\n\t\treturn split[0], split[1], nil\n\t}\n\n\treturn \"\", \"\", fmt.Errorf(\"Provided value %q is malformed, does not match k=v\", str)\n}", "func newSimpleMapping(name string, m map[byte]rune) *simpleMapping {\n\treturn &simpleMapping{\n\t\tbaseName: name,\n\t\tdecode: m,\n\t}\n}", "func StringPtrMap(src map[string]string) map[string]*string {\n\tdst := make(map[string]*string)\n\tfor k, val := range src {\n\t\tv := val\n\t\tdst[k] = &v\n\t}\n\treturn dst\n}", "func (i *Input) FromMap(values map[string]interface{}) error {\n\n\tvar err error\n\ti.Message, err = coerce.ToString(values[\"message\"])\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func Map2Struct(jmap interface{}, s interface{}) error {\n tmpDataJson, err := json.Marshal(jmap)\n if err != nil {\n return err\n }\n err = json.Unmarshal(tmpDataJson, &s)\n if err != nil {\n return err\n }\n return nil\n}", "func UnmarshallFromStringMap(data map[string]string) NodeInfo {\n\tvar err error\n\tvar diskNames []string\n\tvar netifNames []string\n\n\tnbd := NodeInfo{}\n\tfor k, v := range data {\n\t\tswitch k {\n\t\tcase \"ChassisSerial\":\n\t\t\tnbd.ChassisSerial = v\n\t\tcase \"MachineID\":\n\t\t\tnbd.MachineID = v\n\t\tcase \"MemTotal\":\n\t\t\tnbd.MemTotal, _ = strconv.ParseUint(v, 0, 0)\n\t\tcase \"NumCPU\":\n\t\t\tnbd.NumCPU, _ = strconv.Atoi(v)\n\t\tcase \"NumCore\":\n\t\t\tnbd.NumCore, _ = strconv.Atoi(v)\n\t\tcase \"NumPhysicalCPU\":\n\t\t\tnbd.NumPhysicalCPU, _ = strconv.Atoi(v)\n\t\tcase \"CPUMHz\":\n\t\t\tnbd.CPUMHz, _ = strconv.ParseFloat(v, 64)\n\t\tcase \"CPUModelName\":\n\t\t\tnbd.CPUModelName = v\n\t\t}\n\t\tif strings.HasPrefix(k, \"Disks_\") {\n\t\t\tdiskNames = append(diskNames, k)\n\t\t}\n\t\tif strings.HasPrefix(k, \"NetIfs_\") {\n\t\t\tnetifNames = append(netifNames, k)\n\t\t}\n\t}\n\t// keep the names in the same order as marshalling\n\tsort.Strings(diskNames)\n\tsort.Strings(netifNames)\n\n\tfor _, k := range diskNames {\n\t\tvar n DiskInfo\n\t\tv := data[k]\n\t\terr = json.Unmarshal([]byte(v), &n)\n\t\tif err != nil {\n\t\t\tlog.Println(err, v)\n\t\t} else {\n\t\t\tnbd.Disks = append(nbd.Disks, n)\n\t\t}\n\t}\n\tfor _, k := range netifNames {\n\t\tvar n NetIntf\n\t\tv := data[k]\n\t\tn.Addrs = make([]net.IPNet, 0, 4)\n\t\terr = json.Unmarshal([]byte(v), &n)\n\t\tif err != nil {\n\t\t\tlog.Println(err, v)\n\t\t} else {\n\t\t\tnbd.NetIntfs = append(nbd.NetIntfs, n)\n\t\t}\n\t}\n\treturn nbd\n}", "func stringToInputStruct(args []string, v interface{}) {\n\tfieldNames := getFieldNames(v)\n\te := reflect.ValueOf(v).Elem()\n\tfor i, fn := range fieldNames {\n\t\tf := e.FieldByName(fn)\n\t\tf.SetString(args[i])\n\t}\n}", "func kindMapStringToType(o map[string]string) (map[string]kindElement, error) {\n\tr := make(map[string]kindElement)\n\tvar err error\n\tfor k, v := range o {\n\t\tr[k], err = kindFromString(v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn r, nil\n}", "func (self *Map) MapString(tagName ...string) map[string]string {\n\tvar rv = make(map[string]string)\n\n\tfor k, v := range self.MapNative(tagName...) {\n\t\trv[k] = typeutil.String(v)\n\t}\n\n\treturn rv\n}", "func typeifyParsedLine(pl map[string]string) map[string]interface{} {\n\t// try to convert numbers, if possible\n\tmsi := make(map[string]interface{}, len(pl))\n\tfor k, v := range pl {\n\t\tswitch {\n\t\tcase strings.Contains(v, \".\"):\n\t\t\tf, err := strconv.ParseFloat(v, 64)\n\t\t\tif err == nil {\n\t\t\t\tmsi[k] = f\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase v == \"-\":\n\t\t\t// no value, don't set a \"-\" string\n\t\t\tcontinue\n\t\tdefault:\n\t\t\ti, err := strconv.ParseInt(v, 10, 64)\n\t\t\tif err == nil {\n\t\t\t\tmsi[k] = i\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tmsi[k] = v\n\t}\n\treturn msi\n}", "func mapStringStringMergeFrom(dst, src *map[string]string) {\n\tif (src == nil) || (*src == nil) {\n\t\treturn\n\t}\n\n\tif *dst == nil {\n\t\t*dst = make(map[string]string)\n\t}\n\n\tfor key, value := range *src {\n\t\tif _, ok := (*dst)[key]; ok {\n\t\t\t// Such key already exists in dst\n\t\t\tcontinue\n\t\t}\n\n\t\t// No such a key in dst\n\t\t(*dst)[key] = value\n\t}\n}", "func (i *Input) FromMap(values map[string]interface{}) error {\n\tvar err error\n\tvar keys interface{}\n\tif keys, err = coerce.ToAny(values[\"keys\"]); err != nil {\n\t\treturn err\n\t}\n\tswitch v := keys.(type) {\n\tcase []interface{}:\n\t\tfor _, d := range v {\n\t\t\tk := strings.TrimSpace(d.(string))\n\t\t\tif len(k) > 0 {\n\t\t\t\ti.StateKeys = append(i.StateKeys, k)\n\t\t\t}\n\t\t}\n\tcase string:\n\t\ti.StateKeys = []string{strings.TrimSpace(v)}\n\t}\n\n\tvar orgs interface{}\n\tif orgs, err = coerce.ToAny(values[\"organizations\"]); err != nil {\n\t\treturn err\n\t}\n\tswitch v := orgs.(type) {\n\tcase []interface{}:\n\t\tfor _, d := range v {\n\t\t\tk := strings.TrimSpace(d.(string))\n\t\t\tif len(k) > 0 {\n\t\t\t\ti.Organizations = append(i.Organizations, k)\n\t\t\t}\n\t\t}\n\tcase string:\n\t\ti.Organizations = []string{strings.TrimSpace(v)}\n\t}\n\n\tif i.Policy, err = coerce.ToString(values[\"policy\"]); err != nil {\n\t\treturn err\n\t}\n\tif i.PrivateCollection, err = coerce.ToString(values[\"privateCollection\"]); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func parseEntry(s string) *OidLocs {\n\tparts := strings.Split(s, \" - NLOC \")\n\t// 2017/07/14 17:20:09 27dfb875965af8f5eda9ab3247533d34dc0e263aa723e0ce02c3ba3d8f245fa2\n\t// 27dfb875965af8f5eda9ab3247533d34dc0e263aa723e0ce02c3ba3d8f245fa2\n\toid := strings.Split(parts[0], \" \")[2]\n\t// 1 loc_0 44b21c59-4249-437f-87f7-4a03f9c3bb28 16601959 16765728 163721 163721 NIDX 1 idx_0 0 163721\n\tlocsindices := strings.Split(parts[1], \" NIDX \")\n\t// 1 loc_0 44b21c59-4249-437f-87f7-4a03f9c3bb28 16601959 16765728 163721 163721\n\tlocs := locsindices[0]\n\t// 1 idx_0 0 163721\n\tvar indices string\n\tif len(locsindices) > 1 {\n\t\tindices = locsindices[1]\n\t}\n\n\tres := &OidLocs{oid: oid}\n\n\tnlocs := strings.SplitN(locs, \" \", 2)[0]\n\tnl, _ := strconv.Atoi(nlocs)\n\t// loc_0 44b21c59-4249-437f-87f7-4a03f9c3bb28 16601959 16765728 163721 163721\n\tfields := strings.Split(strings.SplitN(locs, \" \", 2)[1], \" \")\n\tfor i := 0; i < nl; i++ {\n\t\tid := fields[i*6+1]\n\t\tso, _ := strconv.ParseInt(fields[i*6+2], 10, 64)\n\t\teo, _ := strconv.ParseInt(fields[i*6+3], 10, 64)\n\t\tres.locs = append(res.locs, &loc{id, so, eo})\n\t}\n\n\tif indices != \"\" {\n\t\tnindices := strings.SplitN(indices, \" \", 2)[0]\n\t\tnidx, _ := strconv.Atoi(nindices)\n\t\tfields = strings.Split(strings.SplitN(indices, \" \", 2)[1], \" \")\n\t\tfor i := 0; i < nidx; i++ {\n\t\t\tof, _ := strconv.ParseInt(fields[i*3+1], 10, 64)\n\t\t\tl, _ := strconv.ParseInt(fields[i*3+2], 10, 64)\n\t\t\tres.indices = append(res.indices, &index{of, l})\n\t\t}\n\t}\n\n\treturn res\n}", "func (o *Output) FromMap(values map[string]interface{}) error {\n\to.Filter, _ = values[\"filter\"].(map[string]string)\n\treturn nil\n}", "func ParseRequestMap(request string, requestMap map[string]string) bool {\n\tvar isValidRequest bool = false\n\tvar jsonObjArr []string = strings.Split(request, \"\\n\")\n\tfor i := 0; i < len(jsonObjArr); i++ {\n\t\tvar jsonObj string = jsonObjArr[i]\n\t\tvar keyAndValue []string = strings.Split(jsonObj, \":\")\n\t\tfmt.Print(keyAndValue, \"\\n\")\n\t\tif len(keyAndValue) == 2 {\n\t\t\trequestMap[keyAndValue[0]] = keyAndValue[1]\n\t\t}\n\t}\n\tfmt.Print(\"the request map: \\n\", requestMap, \"\\n\")\n\n\ttoken := requestMap[\"authToken\"]\n\tfmt.Print(\"Token: \", token, \"\\n\")\n\tisValidRequest = verifyRequest(requestMap[\"authToken\"])\n\tfmt.Print(\"isValidRequest: \", isValidRequest, \"\\n\")\n\treturn isValidRequest\n}", "func processMap(name string, arr map[string]interface{}, value proto.Message) (reflect.Value, *adapter.ConfigErrors) {\n\tvar ce *adapter.ConfigErrors\n\tptrType := reflect.TypeOf(value)\n\tvalueType := reflect.Indirect(reflect.ValueOf(value)).Type()\n\toutmap := reflect.MakeMap(reflect.MapOf(reflect.ValueOf(\"\").Type(), ptrType))\n\tfor vname, val := range arr {\n\t\tdm := reflect.New(valueType).Interface().(proto.Message)\n\t\tif cerr := updateMsg(fmt.Sprintf(\"%s[%s]\", name, vname), val, dm, value, false); cerr != nil {\n\t\t\tce = ce.Extend(cerr)\n\t\t\tcontinue\n\t\t}\n\t\toutmap.SetMapIndex(reflect.ValueOf(vname), reflect.ValueOf(dm))\n\t}\n\treturn outmap, ce\n}", "func myMapper(value string) *list.List {\n\tvalue = strings.ReplaceAll(value, \".\", \" \")\n\talphanumeric := regexp.MustCompile(\"[^a-zA-Z\\\\s]+\")\n\tvalue = alphanumeric.ReplaceAllString(value, \"\")\n\twhitespace := regexp.MustCompile(\"[\\\\s]+\")\n\tvalue = whitespace.ReplaceAllString(value, \" \")\n\titrValue := strings.Fields(value)\n\tnewList := new(list.List)\n\tnewList.Init()\n\tfor _, word := range itrValue {\n\t\tmr := KeyValue{Key: word, Value: \"1\"}\n\t\tnewList.PushFront(mr)\n\t}\n\treturn newList\n}", "func StringMap(src map[string]*string) map[string]string {\n\tdst := make(map[string]string)\n\tfor k, val := range src {\n\t\tif val != nil {\n\t\t\tdst[k] = *val\n\t\t}\n\t}\n\treturn dst\n}", "func MapToTerraformTfvars(input map[string]interface{}) (output string, err error) {\n\tvar buf bytes.Buffer\n\n\tfor key, value := range input {\n\t\tswitch v := value.(type) {\n\t\tcase map[string]string:\n\t\t\t_, err := buf.WriteString(fmt.Sprintf(\"%s = {\\n\", key))\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\tkeys := make([]string, len(v))\n\t\t\tpos := 0\n\t\t\tfor key, _ := range v {\n\t\t\t\tkeys[pos] = key\n\t\t\t\tpos++\n\t\t\t}\n\t\t\tsort.Strings(keys)\n\t\t\tfor _, key := range keys {\n\t\t\t\t_, err := buf.WriteString(fmt.Sprintf(\" %s = \\\"%s\\\"\\n\", key, v[key]))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t_, err = buf.WriteString(\"}\\n\")\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\tcase []string:\n\t\t\tvalues := make([]string, len(v))\n\t\t\tfor pos, _ := range v {\n\t\t\t\tvalues[pos] = fmt.Sprintf(`\"%s\"`, v[pos])\n\t\t\t}\n\t\t\t_, err := buf.WriteString(fmt.Sprintf(\"%s = [%s]\\n\", key, strings.Join(values, \", \")))\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\tcase string:\n\t\t\t_, err := buf.WriteString(fmt.Sprintf(\"%s = \\\"%s\\\"\\n\", key, v))\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\tcase int:\n\t\t\t_, err := buf.WriteString(fmt.Sprintf(\"%s = %d\\n\", key, v))\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\tcase *net.IPNet:\n\t\t\t_, err := buf.WriteString(fmt.Sprintf(\"%s = \\\"%s\\\"\\n\", key, v.String()))\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\tdefault:\n\t\t\treturn \"\", fmt.Errorf(\"ignoring unknown var key='%s' type='%#+v'\", key, v)\n\t\t}\n\t}\n\treturn buf.String(), nil\n}", "func readMap(filename string) (map[string]string, error) {\n\tnames := make(map[string]string)\n\tinput, err := os.Open(filename)\n\tdefer func() {\n\t\tif closeErr := input.Close(); closeErr != nil {\n\t\t\terr = closeErr\n\t\t}\n\t}()\n\n\tif err != nil {\n\t\treturn names, err\n\t}\n\tscanner := bufio.NewScanner(input)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tsplit := strings.Split(line, \"\\t\")\n\t\tnames[split[0]] = split[1]\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\treturn names, err\n\t}\n\n\treturn names, nil\n}", "func parseMapNames(adabasRequest *adatypes.Request, x interface{}) (err error) {\n\trepository := x.(*Repository)\n\tadatypes.Central.Log.Debugf(\"Search for map ... \" + mapFieldName.fieldName())\n\tv := adabasRequest.Definition.Search(mapFieldName.fieldName())\n\tif v == nil {\n\t\treturn adatypes.NewGenericError(28, mapFieldName.fieldName())\n\t}\n\tname := v.String()\n\trepository.Lock()\n\tdefer repository.Unlock()\n\tif f, ok := repository.mapNames[name]; ok {\n\t\tf.found = true\n\t} else {\n\t\trepository.mapNames[name] = &mapNameFlags{isn: adabasRequest.Isn, found: true}\n\t}\n\treturn\n}", "func (o *ExportDataPartial) FromMap(kv map[string]interface{}) {\n\tif val, ok := kv[\"job_id\"].(*string); ok {\n\t\to.JobID = val\n\t} else if val, ok := kv[\"job_id\"].(string); ok {\n\t\to.JobID = &val\n\t} else {\n\t\tif val, ok := kv[\"job_id\"]; ok {\n\t\t\tif val == nil {\n\t\t\t\to.JobID = pstrings.Pointer(\"\")\n\t\t\t} else {\n\t\t\t\t// if coming in as map, convert it back\n\t\t\t\tif kv, ok := val.(map[string]interface{}); ok {\n\t\t\t\t\tval = kv[\"string\"]\n\t\t\t\t}\n\t\t\t\to.JobID = pstrings.Pointer(fmt.Sprintf(\"%v\", val))\n\t\t\t}\n\t\t}\n\t}\n\tif val, ok := kv[\"objects\"].(*string); ok {\n\t\to.Objects = val\n\t} else if val, ok := kv[\"objects\"].(string); ok {\n\t\to.Objects = &val\n\t} else {\n\t\tif val, ok := kv[\"objects\"]; ok {\n\t\t\tif val == nil {\n\t\t\t\to.Objects = pstrings.Pointer(\"\")\n\t\t\t} else {\n\t\t\t\t// if coming in as map, convert it back\n\t\t\t\tif kv, ok := val.(map[string]interface{}); ok {\n\t\t\t\t\tval = kv[\"string\"]\n\t\t\t\t}\n\t\t\t\to.Objects = pstrings.Pointer(fmt.Sprintf(\"%v\", val))\n\t\t\t}\n\t\t}\n\t}\n\to.setDefaults(false)\n}", "func Parse(bytes []byte) (_ map[string]interface{}, returnedError error) {\n\tstringBytes := string(bytes)\n\n\t// Handle errors\n\tdefer CatchPanic(&returnedError, \"failed to parse value \" + stringBytes)\n\n\tresult := ToMap(js.Global.Get(\"JSON\").Call(\"parse\", stringBytes))\n\treturn result, nil\n}", "func MapStringStringPointer(in map[string]string) (out map[string]*string) {\n\tout = make(map[string]*string, 0)\n\tfor k, v := range in {\n\t\tout[k] = StringPointer(v)\n\t}\n\treturn\n}", "func ConvertToMap(input string, kind string) (map[string]string, error) {\n\tresult := make(map[string]string)\n\tif input == \"\" {\n\t\treturn result, nil\n\t}\n\tinputs := strings.Split(input, \",\")\n\treturn ConvertSliceToMap(inputs, kind)\n}", "func parseParams(s string) (map[string]string, string, error) {\n if len(s) < 1 || s[0] != paramDelimOpen {\n return nil, \"\", syntaxError(fmt.Errorf(\"Invalid parameters; expected '%v', got '%v'\", string(paramDelimOpen), string(s[0])))\n }else{\n s = s[1:]\n }\n \n params := make(map[string]string)\n for len(s) > 0 {\n _, s = scan.White(s)\n \n if len(s) < 1 {\n return nil, \"\", syntaxError(fmt.Errorf(\"Unexpected end of parameters\"))\n }\n if s[0] == paramDelimClose {\n s = s[1:]\n break\n }\n if s[0] == paramDelimList {\n s = s[1:]\n continue\n }\n \n var k, v string\n var err error\n k, v, s, err = parseKeyValue(s)\n if err != nil {\n return nil, \"\", err\n }\n \n params[k] = v\n }\n \n return params, s, nil\n}", "func FromStringDict(p *Prototype, d StringDict) *Prototype {\n\ts := &Prototype{\n\t\tconstructor: p.constructor,\n\t\tparents: []parent{{\"parent\", p}},\n\t\tentries: make(entries, 0, len(d)),\n\t}\n\tfor k, v := range d {\n\t\ts.entries = append(s.entries, protoSlot{k, v})\n\t}\n\tsort.Sort(s.entries)\n\treturn s\n}", "func FromString(tagStr string) (Tagging, error) {\n\ttags, err := url.ParseQuery(tagStr)\n\tif err != nil {\n\t\treturn Tagging{}, err\n\t}\n\tvar idx = 0\n\tparsedTags := make([]Tag, len(tags))\n\tfor k := range tags {\n\t\tparsedTags[idx].Key = k\n\t\tparsedTags[idx].Value = tags.Get(k)\n\t\tidx++\n\t}\n\treturn Tagging{\n\t\tTagSet: TagSet{\n\t\t\tTags: parsedTags,\n\t\t},\n\t}, nil\n}", "func parseMap(\n\tscan *scanner,\n\tendDelim tokenType,\n\tdecorator string,\n) (*mapNode, error) {\n\taMap := &mapNode{children: []Node{}, decorator: decorator}\n\tkeys := make(map[string]struct{})\n\n\tfor {\n\t\t// scan the key\n\t\tkeyStart := scan.nextOffset\n\t\tkeyNode, keyErr := parseValue(scan, true, endDelim, \"\")\n\t\tkeyEnd := scan.nextOffset\n\t\tif keyErr != nil {\n\t\t\treturn nil, keyErr\n\t\t}\n\t\tif keyNode == nil {\n\t\t\treturn aMap, nil\n\t\t}\n\t\tif _, ok := keys[keyNode.Value()]; ok {\n\t\t\treturn nil, newParseError(\n\t\t\t\tfmt.Sprintf(\"Duplicate key %s\", keyNode.Value()),\n\t\t\t\tscan,\n\t\t\t\tkeyStart,\n\t\t\t\tkeyEnd-keyStart,\n\t\t\t)\n\t\t}\n\n\t\t// read the delimiter\n\t\tdelimToken := scan.Token()\n\t\tif delimToken.Type != mapKVDelimToken {\n\t\t\treturn nil, newParseError(\n\t\t\t\t\"Illegal token, expected map delimiter `=`\",\n\t\t\t\tscan,\n\t\t\t\tdelimToken.Offset,\n\t\t\t\tlen(delimToken.Content),\n\t\t\t)\n\t\t}\n\n\t\t// read and append the value\n\t\tvalNode, valErr := parseValue(scan, false, endDelim, \"\")\n\t\tif valErr != nil {\n\t\t\treturn nil, valErr\n\t\t}\n\t\tif valNode == nil {\n\t\t\treturn nil, newParseError(\n\t\t\t\t\"Illegal token, expected map value, got EOF\",\n\t\t\t\tscan,\n\t\t\t\tlen(scan.src),\n\t\t\t\t0,\n\t\t\t)\n\t\t}\n\n\t\taMap.children = append(aMap.children, keyNode, valNode)\n\t\tkeys[keyNode.Value()] = struct{}{}\n\t}\n}", "func Struct2Map(v interface{}) (map[string]interface{}, error) {\n bytes, err := json.Marshal(v)\n if err != nil {\n return nil, err\n }\n data := make(map[string]interface{})\n if err := json.Unmarshal(bytes, &data); err != nil {\n return nil, err\n }\n return data, nil\n}", "func (t *Template) ExecuteMapString(m map[string]string) (string, error) {\n\tmInterface := make(map[string]interface{})\n\tfor key, value := range m {\n\t\tmInterface[key] = value\n\t}\n\treturn t.ExecuteString(mInterface)\n}", "func ExampleUnmarshal_map() {\n\tstr := \"{\\\"a\\\":1,\\\"b\\\":2,\\\"c\\\":3}\"\n\tobj, err := Unmarshal([]byte(str))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tm, ok := obj.(map[string]interface{})\n\tif ok {\n\t\tif v, ok := m[\"b\"]; ok {\n\t\t\tfmt.Println(\"B:\", v)\n\t\t}\n\t}\n\t// Output: B: 2\n}", "func (o *Echo) FromMap(kv map[string]interface{}) {\n\n\to.ID = \"\"\n\n\t// if coming from db\n\tif id, ok := kv[\"_id\"]; ok && id != \"\" {\n\t\tkv[\"id\"] = id\n\t}\n\n\tif val, ok := kv[\"id\"].(string); ok {\n\t\to.ID = val\n\t} else {\n\t\tif val, ok := kv[\"id\"]; ok {\n\t\t\tif val == nil {\n\t\t\t\to.ID = \"\"\n\t\t\t} else {\n\t\t\t\tif m, ok := val.(map[string]interface{}); ok {\n\t\t\t\t\tval = pjson.Stringify(m)\n\t\t\t\t}\n\t\t\t\to.ID = fmt.Sprintf(\"%v\", val)\n\t\t\t}\n\t\t}\n\t}\n\n\tif val, ok := kv[\"message\"].(*string); ok {\n\t\to.Message = val\n\t} else if val, ok := kv[\"message\"].(string); ok {\n\t\to.Message = &val\n\t} else {\n\t\tif val, ok := kv[\"message\"]; ok {\n\t\t\tif val == nil {\n\t\t\t\to.Message = pstrings.Pointer(\"\")\n\t\t\t} else {\n\t\t\t\t// if coming in as map, convert it back\n\t\t\t\tif kv, ok := val.(map[string]interface{}); ok {\n\t\t\t\t\tval = kv[\"string\"]\n\t\t\t\t}\n\t\t\t\to.Message = pstrings.Pointer(fmt.Sprintf(\"%v\", val))\n\t\t\t}\n\t\t}\n\t}\n\n\tif val, ok := kv[\"updated_ts\"].(int64); ok {\n\t\to.UpdatedAt = val\n\t} else {\n\t\tif val, ok := kv[\"updated_ts\"]; ok {\n\t\t\tif val == nil {\n\t\t\t\to.UpdatedAt = number.ToInt64Any(nil)\n\t\t\t} else {\n\t\t\t\tif tv, ok := val.(time.Time); ok {\n\t\t\t\t\tval = datetime.TimeToEpoch(tv)\n\t\t\t\t}\n\t\t\t\to.UpdatedAt = number.ToInt64Any(val)\n\t\t\t}\n\t\t}\n\t}\n\to.setDefaults(false)\n}", "func parseReplicationInfo(m map[string]string) map[string]string {\n\t/*\n\t\trole:master\n\t\tconnected_slaves:1\n\t\tslave0:ip=10.1.1.228,port=7004,state=online,offset=3689968249,lag=1\n\t\tmaster_replid:17270cf205f7c98c4c8e80c348fd0564132e6643\n\t\tmaster_replid2:0000000000000000000000000000000000000000\n\t\t...\n\t\t...\n\t*/\n\tif len(m) < 1 {\n\t\treturn nil\n\t}\n\tslaveReg, _ := regexp.Compile(\"^slave([0-9]*)\")\n\tslaveMapping := make(map[string]string)\n\n\ttmpInfolines := make([]string, 0)\n\tfor key, value := range m {\n\t\tif !slaveReg.MatchString(key) {\n\t\t\tcontinue\n\t\t}\n\t\tinfoss := strings.Split(value, \",\")\n\t\tif len(infoss) < 2 {\n\t\t\treturn nil\n\t\t}\n\t\tfor _, info := range infoss {\n\t\t\t// ip=10.1.1.228,...\n\t\t\tinfoLine := strings.Split(info, \"=\")\n\t\t\ttmpInfolines = append(tmpInfolines, infoLine...)\n\t\t}\n\t\targs := sliceStr2Dict(tmpInfolines)\n\t\tslaveMapping[key] =\n\t\t\tstrings.Join(\n\t\t\t\tstrings.Split(fieldSplicing(args, \"ip\", \"port\"), \",\"),\n\t\t\t\t\":\",\n\t\t\t)\n\t}\n\treturn slaveMapping\n}", "func JSONStringToMap(result string) map[string]interface{} {\n\tjsonPD := make(map[string]interface{})\n\tbyt := []byte(result)\n\terr := json.Unmarshal(byt, &jsonPD)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn jsonPD\n}", "func HStoreFromStringMap(val map[string]string) driver.Valuer {\n\treturn hstoreFromStringMap{val: val}\n}" ]
[ "0.6854407", "0.64725333", "0.6340307", "0.6208719", "0.6053783", "0.60045505", "0.59692", "0.59418225", "0.5891847", "0.587586", "0.5810818", "0.5806822", "0.57869965", "0.5742572", "0.5738563", "0.5701027", "0.5696601", "0.56867707", "0.5680063", "0.56742865", "0.5658497", "0.56447077", "0.56360906", "0.5624283", "0.5600453", "0.55947006", "0.55818576", "0.5553858", "0.55518186", "0.5547133", "0.5547107", "0.5547093", "0.5547093", "0.55439526", "0.5529169", "0.54869944", "0.54855365", "0.54703444", "0.545567", "0.5454893", "0.54539347", "0.54533285", "0.545068", "0.5449693", "0.54453343", "0.5412132", "0.5406508", "0.5398373", "0.5392189", "0.53886265", "0.5386957", "0.537405", "0.5353937", "0.53497416", "0.534658", "0.5341116", "0.5338327", "0.5338122", "0.533006", "0.5328709", "0.5320643", "0.5316027", "0.5315217", "0.53062326", "0.53052986", "0.5303467", "0.53020245", "0.52985", "0.5288163", "0.5287507", "0.52757317", "0.5269528", "0.52652395", "0.5262892", "0.5257531", "0.52514076", "0.52457285", "0.5241201", "0.5235464", "0.5226512", "0.5226221", "0.5223205", "0.52178067", "0.52164394", "0.52160025", "0.52157617", "0.5208099", "0.51996887", "0.5199441", "0.5193934", "0.51916635", "0.5181195", "0.517677", "0.51639223", "0.51472175", "0.51416904", "0.5120971", "0.5106265", "0.5102214", "0.50949275", "0.50899106" ]
0.0
-1
Compile will compile solution if not yet compiled. The compilation prosess will execute compile script of the language. It will use debugcompile script when debug parameter is true. When debug is true, but the language is not debuggable (doesn't contain debugcompile script), an ErrLanguageNotDebuggable error will returned. This function will execute the compilation script (could be compile/debugcompile) that defined in language definition. This execution could be skipped when the solution already compiled before.
func (cptool *CPTool) Compile(ctx context.Context, solution Solution, debug bool) (CompilationResult, error) { language := solution.Language if debug && !language.Debuggable { return CompilationResult{}, ErrLanguageNotDebuggable } targetDir := cptool.getCompiledDirectory(solution, debug) cptool.fs.MkdirAll(targetDir, os.ModePerm) targetPath := cptool.getCompiledTarget(solution, debug) if cptool.logger != nil { cptool.logger.Println(logger.VERBOSE, "Compiling to: ", targetPath) } info, err := cptool.fs.Stat(targetPath) if err == nil { compiledTime := info.ModTime() if compiledTime.After(solution.LastUpdated) { return CompilationResult{ Skipped: true, TargetPath: targetPath, }, nil } } commandPath := language.CompileScript if debug { commandPath = language.DebugScript } if cptool.logger != nil { cptool.logger.Println(logger.VERBOSE, "Compiling using script: ", commandPath) } cmd := cptool.exec.CommandContext(ctx, commandPath, solution.Path, targetPath) stderr, err := cmd.StderrPipe() if err != nil { return CompilationResult{}, err } err = cmd.Start() if err != nil { return CompilationResult{}, err } compilationError, err := ioutil.ReadAll(stderr) if err != nil { return CompilationResult{}, err } err = cmd.Wait() if err != nil { if cptool.logger != nil { cptool.logger.Print(logger.VERBOSE, "Compilation script execution giving error result") } return CompilationResult{ErrorMessage: string(compilationError)}, err } return CompilationResult{ Skipped: false, TargetPath: targetPath, }, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Compile(ctx context.Context, targets []string) error {\n\tlog := logger.NewDefault(\"compile\")\n\tlog.SetLogLevel(logger.LevelInfo)\n\tif consts.IsDebugMode(ctx) {\n\t\tlog.SetLogLevel(logger.LevelDebug)\n\t}\n\n\tconfigManager, err := configmanager.NewConfigManager(log)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpluginManager, err := pluginmanager.NewPluginManager(pluginmanager.NewConfig(), log)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcompilerManager, err := compilermanager.NewCompilerManager(ctx, log, configManager, pluginManager)\n\tif err != nil {\n\t\treturn err\n\t}\n\tactionManager, err := actionmanager.NewActionManager(log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconfigItems, err := StepLookUpConfigs(ctx, targets, configManager)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := StepInstallProtoc(ctx, pluginManager, configItems); err != nil {\n\t\treturn err\n\t}\n\tif err := StepInstallRepositories(ctx, pluginManager, configItems); err != nil {\n\t\treturn err\n\t}\n\tif err := StepInstallPlugins(ctx, pluginManager, configItems); err != nil {\n\t\treturn err\n\t}\n\tif err := StepCompile(ctx, compilerManager, targets); err != nil {\n\t\treturn err\n\t}\n\n\tif !consts.IsDisableAction(ctx) {\n\t\tif err := StepPostAction(ctx, actionManager, configItems); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := StepPostShell(ctx, actionManager, configItems); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tdisplayWarn := false\n\t\tfor _, configItem := range configItems {\n\t\t\tif len(configItem.Config().PostActions) > 0 || configItem.Config().PostShell != \"\" {\n\t\t\t\tdisplayWarn = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif displayWarn {\n\t\t\tlog.LogWarn(nil, \"PostAction and PostShell is skipped. If you need to allow execution, please append '-p' to command flags to enable\")\n\t\t}\n\t}\n\n\tlog.LogInfo(nil, \"Good job! you are ready to go :)\")\n\treturn nil\n}", "func (cptool *CPTool) CompileByName(ctx context.Context, languageName string, solutionName string, debug bool) (CompilationResult, error) {\n\tstart := time.Now()\n\n\tlanguage, err := cptool.GetLanguageByName(languageName)\n\tif err != nil {\n\t\treturn CompilationResult{}, err\n\t}\n\tif cptool.logger != nil {\n\t\tcptool.logger.Println(logger.VERBOSE, \"Compiling using language:\", language.Name)\n\t}\n\n\tsolution, err := cptool.GetSolution(solutionName, language)\n\tif err != nil {\n\t\treturn CompilationResult{}, err\n\t}\n\tif cptool.logger != nil {\n\t\tcptool.logger.Println(logger.VERBOSE, \"Compiling solution:\", solution.Name)\n\t}\n\n\tresult, err := cptool.Compile(ctx, solution, debug)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\tresult.Duration = time.Since(start)\n\treturn result, nil\n}", "func Compile(resolved *resolver.ResolvedProgram) (compiledProg *Program, err error) {\n\tdefer func() {\n\t\t// The compiler uses panic with a *compileError to signal compile\n\t\t// errors internally, and they're caught here. This avoids the\n\t\t// need to check errors everywhere.\n\t\tif r := recover(); r != nil {\n\t\t\t// Convert to compileError or re-panic\n\t\t\terr = r.(*compileError)\n\t\t}\n\t}()\n\n\tp := &Program{}\n\n\t// Reuse identical constants across entire program.\n\tindexes := constantIndexes{\n\t\tnums: make(map[float64]int),\n\t\tstrs: make(map[string]int),\n\t\tregexes: make(map[string]int),\n\t}\n\n\t// Compile functions. For functions called before they're defined or\n\t// recursive functions, we have to set most p.Functions data first, then\n\t// compile Body afterward.\n\tp.Functions = make([]Function, len(resolved.Functions))\n\tfor i, astFunc := range resolved.Functions {\n\t\tarrays := make([]bool, len(astFunc.Params))\n\t\tnumArrays := 0\n\t\tfor j, param := range astFunc.Params {\n\t\t\t_, info, _ := resolved.LookupVar(astFunc.Name, param)\n\t\t\tif info.Type == resolver.Array {\n\t\t\t\tarrays[j] = true\n\t\t\t\tnumArrays++\n\t\t\t}\n\t\t}\n\t\tcompiledFunc := Function{\n\t\t\tName: astFunc.Name,\n\t\t\tParams: astFunc.Params,\n\t\t\tArrays: arrays,\n\t\t\tNumScalars: len(astFunc.Params) - numArrays,\n\t\t\tNumArrays: numArrays,\n\t\t}\n\t\tp.Functions[i] = compiledFunc\n\t}\n\tfor i, astFunc := range resolved.Functions {\n\t\tc := compiler{resolved: resolved, program: p, indexes: indexes, funcName: astFunc.Name}\n\t\tc.stmts(astFunc.Body)\n\t\tp.Functions[i].Body = c.finish()\n\t}\n\n\t// Compile BEGIN blocks.\n\tfor _, stmts := range resolved.Begin {\n\t\tc := compiler{resolved: resolved, program: p, indexes: indexes}\n\t\tc.stmts(stmts)\n\t\tp.Begin = append(p.Begin, c.finish()...)\n\t}\n\n\t// Compile pattern-action blocks.\n\tfor _, action := range resolved.Actions {\n\t\tvar pattern [][]Opcode\n\t\tswitch len(action.Pattern) {\n\t\tcase 0:\n\t\t\t// Always considered a match\n\t\tcase 1:\n\t\t\tc := compiler{resolved: resolved, program: p, indexes: indexes}\n\t\t\tc.expr(action.Pattern[0])\n\t\t\tpattern = [][]Opcode{c.finish()}\n\t\tcase 2:\n\t\t\tc := compiler{resolved: resolved, program: p, indexes: indexes}\n\t\t\tc.expr(action.Pattern[0])\n\t\t\tpattern = append(pattern, c.finish())\n\t\t\tc = compiler{resolved: resolved, program: p, indexes: indexes}\n\t\t\tc.expr(action.Pattern[1])\n\t\t\tpattern = append(pattern, c.finish())\n\t\t}\n\t\tvar body []Opcode\n\t\tif len(action.Stmts) > 0 {\n\t\t\tc := compiler{resolved: resolved, program: p, indexes: indexes}\n\t\t\tc.stmts(action.Stmts)\n\t\t\tbody = c.finish()\n\t\t}\n\t\tp.Actions = append(p.Actions, Action{\n\t\t\tPattern: pattern,\n\t\t\tBody: body,\n\t\t})\n\t}\n\n\t// Compile END blocks.\n\tfor _, stmts := range resolved.End {\n\t\tc := compiler{resolved: resolved, program: p, indexes: indexes}\n\t\tc.stmts(stmts)\n\t\tp.End = append(p.End, c.finish()...)\n\t}\n\n\t// Build slices that map indexes to names (for variables and functions).\n\t// These are only used for disassembly, but set them up here.\n\tresolved.IterVars(\"\", func(name string, info resolver.VarInfo) {\n\t\tif info.Type == resolver.Array {\n\t\t\tfor len(p.arrayNames) <= info.Index {\n\t\t\t\tp.arrayNames = append(p.arrayNames, \"\")\n\t\t\t}\n\t\t\tp.arrayNames[info.Index] = name\n\t\t} else {\n\t\t\tfor len(p.scalarNames) <= info.Index {\n\t\t\t\tp.scalarNames = append(p.scalarNames, \"\")\n\t\t\t}\n\t\t\tp.scalarNames[info.Index] = name\n\t\t}\n\t})\n\tresolved.IterFuncs(func(name string, info resolver.FuncInfo) {\n\t\tfor len(p.nativeFuncNames) <= info.Index {\n\t\t\tp.nativeFuncNames = append(p.nativeFuncNames, \"\")\n\t\t}\n\t\tp.nativeFuncNames[info.Index] = name\n\t})\n\n\treturn p, nil\n}", "func Compile(ctx context.Context, cli *client.Client, image string, lang string,\n\tpath string) (int, string) {\n\n\teval := \"\"\n\tswitch lang {\n\tcase \"cpp\":\n\t\teval = \"g++ -w -O2 /tests/data/a.cpp -o /tests/data/a.out 2>&1\"\n\t}\n\n\tresp, err := cli.ContainerCreate(ctx, &container.Config{\n\t\tImage: image,\n\t\tCmd: []string{\"/bin/bash\", \"-c\", eval},\n\t}, &container.HostConfig{\n\t\tMounts: []mount.Mount{\n\t\t\tmount.Mount{\n\t\t\t\tType: mount.TypeBind,\n\t\t\t\tSource: path,\n\t\t\t\tTarget: \"/tests/data\",\n\t\t\t},\n\t\t},\n\t}, nil, nil, \"\")\n\n\tif err != nil {\n\t\treturn 2, err.Error()\n\t}\n\n\tout, err := runContainer(ctx, cli, resp.ID)\n\tif err != nil {\n\t\treturn 2, err.Error()\n\t}\n\n\tr, _ := regexp.Compile(\"error\")\n\tif r.MatchString(out) {\n\t\treturn 0, out\n\t}\n\treturn 1, \"\"\n}", "func compile(dest string, src string, vars []string) error {\n\targs := []string{\n\t\t\"build\",\n\t\t\"-o\", dest,\n\t}\n\n\tif len(vars) > 0 {\n\t\targs = append(args, \"-ldflags\")\n\n\t\tfor idx, val := range vars {\n\t\t\tvars[idx] = \"-X \" + val\n\t\t}\n\n\t\tif Debug {\n\t\t\tvars = append(vars, \"-X main.debug=true\")\n\t\t}\n\n\t\targs = append(args, strings.Join(vars, \" \"))\n\t}\n\n\tt := time.Now()\n\n\toutput, err := exec.Command(\"go\", append(args, src)...).CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Compile of %s failed: %s\", src, output)\n\t}\n\n\tdebugf(\"Compile %#v finished in %s\", args, time.Now().Sub(t))\n\treturn nil\n}", "func Compile(code string, ext vm.Externals) (vm.Program, parser.Messages) {\n\tinput := antlr.NewInputStream(code)\n\treturn compile(input, ext)\n}", "func RunCompiled(inv Invocation, exePath string, errlog *log.Logger) int {\n\tdebug.Println(\"running binary\", exePath)\n\tc := exec.Command(exePath, inv.Args...)\n\tc.Stderr = inv.Stderr\n\tc.Stdout = inv.Stdout\n\tc.Stdin = inv.Stdin\n\tc.Dir = inv.Dir\n\tif inv.WorkDir != inv.Dir {\n\t\tc.Dir = inv.WorkDir\n\t}\n\t// intentionally pass through unaltered os.Environ here.. your magefile has\n\t// to deal with it.\n\tc.Env = os.Environ()\n\tif inv.Verbose {\n\t\tc.Env = append(c.Env, \"MAGEFILE_VERBOSE=1\")\n\t}\n\tif inv.List {\n\t\tc.Env = append(c.Env, \"MAGEFILE_LIST=1\")\n\t}\n\tif inv.Help {\n\t\tc.Env = append(c.Env, \"MAGEFILE_HELP=1\")\n\t}\n\tif inv.Debug {\n\t\tc.Env = append(c.Env, \"MAGEFILE_DEBUG=1\")\n\t}\n\tif inv.GoCmd != \"\" {\n\t\tc.Env = append(c.Env, fmt.Sprintf(\"MAGEFILE_GOCMD=%s\", inv.GoCmd))\n\t}\n\tif inv.Timeout > 0 {\n\t\tc.Env = append(c.Env, fmt.Sprintf(\"MAGEFILE_TIMEOUT=%s\", inv.Timeout.String()))\n\t}\n\tdebug.Print(\"running magefile with mage vars:\\n\", strings.Join(filter(c.Env, \"MAGEFILE\"), \"\\n\"))\n\t// catch SIGINT to allow magefile to handle them\n\tsigCh := make(chan os.Signal, 1)\n\tsignal.Notify(sigCh, syscall.SIGINT)\n\tdefer signal.Stop(sigCh)\n\terr := c.Run()\n\tif !sh.CmdRan(err) {\n\t\terrlog.Printf(\"failed to run compiled magefile: %v\", err)\n\t}\n\treturn sh.ExitStatus(err)\n}", "func Compile(ctx context.Context, ui *ui.UI, discovered *discover.Discovered) (*Binaries, error) {\n\tegrp, ctx := errgroup.WithContext(ctx)\n\tbinaries := &Binaries{}\n\tif discovered.Local != nil {\n\t\tegrp.Go(func() error {\n\t\t\tpath, err := compile(ctx, ui, discovered.Local)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbinaries.Local = Binary{Path: path}\n\t\t\treturn nil\n\t\t})\n\t}\n\tif discovered.Plan != nil {\n\t\tegrp.Go(func() error {\n\t\t\tpath, err := compile(ctx, ui, discovered.Plan)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbinaries.Plan = Binary{Path: path}\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tif err := egrp.Wait(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn binaries, nil\n}", "func Compile(input string, ops ...Option) (*vm.Program, error) {\n\tconfig := &conf.Config{\n\t\tOperators: make(map[string][]string),\n\t\tConstExprFns: make(map[string]reflect.Value),\n\t\tOptimize: true,\n\t}\n\n\tfor _, op := range ops {\n\t\top(config)\n\t}\n\n\tif err := config.Check(); err != nil {\n\t\treturn nil, err\n\t}\n\n\ttree, err := parser.Parse(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = checker.Check(tree, config)\n\n\t// If we have a patch to apply, it may fix out error and\n\t// second type check is needed. Otherwise it is an error.\n\tif err != nil && len(config.Visitors) == 0 {\n\t\treturn nil, err\n\t}\n\n\t// Patch operators before Optimize, as we may also mark it as ConstExpr.\n\tcompiler.PatchOperators(&tree.Node, config)\n\n\tif len(config.Visitors) >= 0 {\n\t\tfor _, v := range config.Visitors {\n\t\t\tast.Walk(&tree.Node, v)\n\t\t}\n\t\t_, err = checker.Check(tree, config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif config.Optimize {\n\t\terr = optimizer.Optimize(&tree.Node, config)\n\t\tif err != nil {\n\t\t\tif fileError, ok := err.(*file.Error); ok {\n\t\t\t\treturn nil, fileError.Bind(tree.Source)\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tprogram, err := compiler.Compile(tree, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn program, nil\n}", "func (c *CLI) ExecDebug() error {\n\tpp.Fprintln(c.Stdout, c.Context)\n\tpp.Fprintln(c.Stdout, c.Config)\n\treturn nil\n}", "func Compile(state *lua.LState, moonscriptCode string) (string, error) {\n\tmoonbundle, err := Asset(\"moon-bundle.lua\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tstate.SetGlobal(\"_moonbundle_code\", lua.LString(moonbundle))\n\tstate.SetGlobal(\"__moonscript_code\", lua.LString(moonscriptCode))\n\n\terr = state.DoString(`\n package.loaded.moonscript = loadstring(_moonbundle_code)()\n\n local moonparse = require(\"moonscript.parse\")\n local mooncompile = require(\"moonscript.compile\")\n\n local tree, err = moonparse.string(__moonscript_code)\n if not tree then\n print(\"gmoonscript error: unable to parse moonscript, check formatting!\")\n else\n __output_lua_code_, err = mooncompile.tree(tree)\n end\n\n -- remove all created modules and vars\n package.loaded.moonscript = nil\n moonparse = nil\n mooncompile = nil\n\n _moonbundle_code = nil\n __moonscript_code = nil\n collectgarbage()\n `)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tluaOutput := state.GetGlobal(\"__output_lua_code_\")\n\tstate.SetGlobal(\"__output_lua_code\", lua.LNil)\n\n\treturn luaOutput.String(), nil\n}", "func noCompile(runCommandTemplate []string, include func(string) bool, language apipb.LanguageGroup) compileFunc {\n\treturn func(program *apipb.Program, outputBase util.FileBase) (*Compilation, error) {\n\t\tvar filteredPaths []string\n\t\tfor _, file := range program.Sources {\n\t\t\tif include(file.Path) {\n\t\t\t\tfilteredPaths = append(filteredPaths, file.Path)\n\t\t\t}\n\t\t}\n\t\tif len(filteredPaths) == 0 {\n\t\t\treturn &Compilation{CompilerErrors: \"No valid source files found\"}, nil\n\t\t}\n\n\t\trunCommand := substituteFiles(runCommandTemplate, filteredPaths)\n\t\treturn &Compilation{\n\t\t\tProgram: &apipb.CompiledProgram{\n\t\t\t\tProgramRoot: outputBase.Path(),\n\t\t\t\tRunCommand: runCommand,\n\t\t\t\tLanguage: language,\n\t\t\t}}, nil\n\t}\n}", "func Compile(goos, goarch, ldflags, magePath, goCmd, compileTo string, gofiles []string, isDebug bool, stderr, stdout io.Writer) error {\n\tdebug.Println(\"compiling to\", compileTo)\n\tdebug.Println(\"compiling using gocmd:\", goCmd)\n\tif isDebug {\n\t\tinternal.RunDebug(goCmd, \"version\")\n\t\tinternal.RunDebug(goCmd, \"env\")\n\t}\n\tenviron, err := internal.EnvWithGOOS(goos, goarch)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// strip off the path since we're setting the path in the build command\n\tfor i := range gofiles {\n\t\tgofiles[i] = filepath.Base(gofiles[i])\n\t}\n\tbuildArgs := []string{\"build\", \"-o\", compileTo}\n\tif ldflags != \"\" {\n\t\tbuildArgs = append(buildArgs, \"-ldflags\", ldflags)\n\t}\n\targs := append(buildArgs, gofiles...)\n\n\tdebug.Printf(\"running %s %s\", goCmd, strings.Join(args, \" \"))\n\tc := exec.Command(goCmd, args...)\n\tc.Env = environ\n\tc.Stderr = stderr\n\tc.Stdout = stdout\n\tc.Dir = magePath\n\tstart := time.Now()\n\terr = c.Run()\n\tdebug.Println(\"time to compile Magefile:\", time.Since(start))\n\tif err != nil {\n\t\treturn errors.New(\"error compiling magefiles\")\n\t}\n\treturn nil\n}", "func (c *Context) run(language, code, stdinGlob string) (string, Message) {\n\tlog.Printf(\"launching new %s sandbox\", language)\n\t// log.Printf(\"launching sandbox...\\nLanguage: %s\\nStdin: %sCode: Hidden\\n\", language, stdinGlob)\n\n\tlang, ok := c.compilers[strings.ToLower(language)]\n\tif !ok || lang.Disabled == \"true\" {\n\t\treturn \"\", Message{\"error\", \"language not supported\"}\n\t}\n\n\tif code == \"\" {\n\t\treturn \"\", Message{\"error\", \"no code submitted\"}\n\t}\n\n\tsb, err := newSandbox(lang.ExecutionDetails, code, stdinGlob, c.options)\n\tif err != nil {\n\t\tlog.Printf(\"sandbox initialization error: %v\", err)\n\t\treturn \"\", Message{\"error\", fmt.Sprintf(\"%s\", err)}\n\t}\n\n\t// run the new sandbox\n\toutput, err := sb.run()\n\tif err != nil {\n\t\tlog.Printf(\"sandbox run error: %v\", err)\n\t\treturn output, Message{\"error\", fmt.Sprintf(\"%s\", err)}\n\t}\n\n\tsplitOutput := strings.SplitN(output, \"*-COMPILEBOX::ENDOFOUTPUT-*\", 2)\n\ttimeTaken := splitOutput[1]\n\tresult := splitOutput[0]\n\n\treturn result, Message{\"success\", \"compilation took \" + timeTaken + \" seconds\"}\n}", "func StepCompile(ctx context.Context,\n\tcompilerManager compilermanager.CompilerManager,\n\ttargets []string,\n) error {\n\tprogress := progressbar.GetProgressBar(ctx, len(targets))\n\tprogress.SetPrefix(\"Compile Proto Files\")\n\tc := concurrent.NewErrGroup(ctx, 10)\n\tfor _, target := range targets {\n\t\tfunc(target string) {\n\t\t\tc.Go(func(ctx context.Context) error {\n\t\t\t\tprogress.SetSuffix(target)\n\t\t\t\tcomp, err := compilerManager.GetCompiler(ctx, target)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := comp.Compile(ctx, target); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tprogress.Incr()\n\t\t\t\treturn nil\n\t\t\t})\n\t\t}(target)\n\t}\n\tif err := c.Wait(); err != nil {\n\t\treturn err\n\t}\n\tprogress.Wait()\n\treturn nil\n}", "func runCCompiler(command string, flags ...string) error {\n\tswitch command {\n\tcase \"clang\":\n\t\t// Compile this with the internal Clang compiler.\n\t\theaderPath := getClangHeaderPath(goenv.Get(\"TINYGOROOT\"))\n\t\tif headerPath == \"\" {\n\t\t\treturn errors.New(\"could not locate Clang headers\")\n\t\t}\n\t\tflags = append(flags, \"-I\"+headerPath)\n\t\tflags = append([]string{\"tinygo:\" + command}, flags...)\n\t\tvar cflag *C.char\n\t\tbuf := C.calloc(C.size_t(len(flags)), C.size_t(unsafe.Sizeof(cflag)))\n\t\tcflags := (*[1 << 10]*C.char)(unsafe.Pointer(buf))[:len(flags):len(flags)]\n\t\tfor i, flag := range flags {\n\t\t\tcflag := C.CString(flag)\n\t\t\tcflags[i] = cflag\n\t\t\tdefer C.free(unsafe.Pointer(cflag))\n\t\t}\n\t\tok := C.tinygo_clang_driver(C.int(len(flags)), (**C.char)(buf))\n\t\tif !ok {\n\t\t\treturn errors.New(\"failed to compile using built-in clang\")\n\t\t}\n\t\treturn nil\n\tdefault:\n\t\t// Running some other compiler. Maybe it has been defined in the\n\t\t// commands map (unlikely).\n\t\tif cmdNames, ok := commands[command]; ok {\n\t\t\treturn execCommand(cmdNames, flags...)\n\t\t}\n\t\t// Alternatively, run the compiler directly.\n\t\tcmd := exec.Command(command, flags...)\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\t\treturn cmd.Run()\n\t}\n}", "func (svc *PipelineService) Compile(org, repo, ref string, opt *PipelineOptions) (*yaml.Build, *Response, error) {\n\t// set the API endpoint path we send the request to\n\tu := fmt.Sprintf(\"/api/v1/pipelines/%s/%s/%s/compile\", org, repo, ref)\n\n\t// add optional arguments if supplied\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// yaml Build type we want to return\n\tv := new(yaml.Build)\n\n\t// send request using client\n\tresp, err := svc.client.Call(\"POST\", u, nil, v)\n\n\treturn v, resp, err\n}", "func ExecuteProjectBuild(source, target, descriptor string, extensions []string, phase string, getWd func() (string, error)) error {\n\tif phase != \"pre\" && phase != \"post\" {\n\t\treturn fmt.Errorf(UnsupportedPhaseMsg, phase)\n\t}\n\tloc, err := dir.Location(source, target, descriptor, extensions, getWd)\n\tif err != nil {\n\t\treturn err\n\t}\n\toMta, err := loc.ParseFile()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn execProjectBuilders(loc, oMta, phase)\n}", "func (p *Qlang) Exec(codeText []byte, fname string) (err error) {\n\n\tcode := p.cl.Code()\n\tstart := code.Len()\n\tend, err := p.Cl(codeText, fname)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif qcl.DumpCode != 0 {\n\t\tcode.Dump(start)\n\t}\n\n\tp.ExecBlock(start, end, p.cl.GlobalSymbols())\n\treturn\n}", "func (cp *Compiler) Compile(source, destination string) error {\n\tvar err error\n\n\tvar gccer = exec.Command(cp.Path, append(cp.Args, []string{source, \"-o\", destination}...)...)\n\n\tvar stderr = bytes.NewBuffer(make([]byte, 65536))\n\tgccer.Stderr = stderr\n\n\tif err = gccer.Run(); err != nil {\n\t\tvar ce = new(types.CompileError)\n\t\tce.ProcErr = err.Error()\n\t\tce.Info, err = ioutil.ReadAll(stderr)\n\t\tif err != nil {\n\t\t\tce.Info = []byte(err.Error())\n\t\t}\n\t\treturn ce\n\t}\n\treturn nil\n}", "func (svc *Compiler) Compile(source string, filename string) (*CompileResult, error) {\n\tsvc.ctx.Global().Set(\"source\", source)\n\tsvc.ctx.Global().Set(\"filename\", filename)\n\tval, err := svc.ctx.RunScript(\"compile(source, { filename });\", \"compile_call\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%+v\", err)\n\t}\n\tresult := CompileResult{}\n\terr = json.Unmarshal([]byte(val.String()), &result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &result, nil\n}", "func compile(pack *godata.GoPackage) bool {\n\tvar argc int\n\tvar argv []string\n\tvar argvFilled int\n\tvar objDir = \"\" //outputDirPrefix + getObjDir();\n\n\t// check for recursive dependencies\n\tif pack.InProgress {\n\t\tlogger.Error(\"Found a recurisve dependency in %s. This is not supported in Go.\\n\", pack.Name)\n\t\tpack.HasErrors = true\n\t\tpack.InProgress = false\n\t\treturn false\n\t}\n\n\tpack.InProgress = true\n\n\t// first compile all dependencies\n\tfor _, idep := range *pack.Depends {\n\t\tdep := idep.(*godata.GoPackage)\n\t\tif dep.HasErrors {\n\t\t\tpack.HasErrors = true\n\t\t\tpack.InProgress = false\n\t\t\treturn false\n\t\t}\n\n\t\tif !dep.Compiled &&\n\t\t\t(dep.Type == godata.LOCAL_PACKAGE ||\n\t\t\t\tdep.Type == godata.UNKNOWN_PACKAGE && dep.Files.Len() > 0) {\n\t\t\tif !compile(dep) {\n\t\t\t\tpack.HasErrors = true\n\t\t\t\tpack.InProgress = false\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\t// cgo files (the ones which import \"C\") can't be compiled\n\t// at the moment. They need to be compiled by hand into .a files.\n\tif pack.HasCGOFiles() {\n\t\tif pack.HasExistingAFile() {\n\t\t\tpack.Compiled = true\n\t\t\tpack.InProgress = false\n\t\t\treturn true\n\t\t} else {\n\t\t\tlogger.Error(\"Can't compile cgo files. Please manually compile them.\\n\")\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t// check if this package has any files (if not -> error)\n\tif pack.Files.Len() == 0 && pack.Type == godata.LOCAL_PACKAGE {\n\t\tlogger.Error(\"No files found for package %s.\\n\", pack.Name)\n\t\tos.Exit(1)\n\t}\n\n\t// if the outputDirPrefix points to something, subdirectories\n\t// need to be created if they don't already exist\n\toutputFile := objDir + pack.OutputFile\n\tif strings.Index(outputFile, \"/\") != -1 {\n\t\tpath := outputFile[0:strings.LastIndex(outputFile, \"/\")]\n\t\tdir, err := os.Stat(path)\n\t\tif err != nil {\n\t\t\terr = os.MkdirAll(path, rootPathPerm)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(\"Could not create output path %s: %s\\n\", path, err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t} else if !dir.IsDirectory() {\n\t\t\tlogger.Error(\"File found in %s instead of a directory.\\n\", path)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t// before compiling, remove any .a file\n\t// this is done because the compiler/linker looks for .a files\n\t// before it looks for .[568] files\n\tif !*flagKeepAFiles {\n\t\tif err := os.Remove(outputFile + \".a\"); err == nil {\n\t\t\tlogger.Debug(\"Removed file %s.a.\\n\", outputFile)\n\t\t}\n\t}\n\n\t// construct compiler command line arguments\n\tif pack.Name != \"main\" {\n\t\tlogger.Info(\"Compiling %s...\\n\", pack.Name)\n\t} else {\n\t\tlogger.Info(\"Compiling %s (%s)...\\n\", pack.Name, pack.OutputFile)\n\t}\n\n\targc = pack.Files.Len() + 3\n\tif *flagIncludePaths != \"\" {\n\t\targc += 2 * (strings.Count(*flagIncludePaths, \",\") + 1)\n\t}\n\tif pack.NeedsLocalSearchPath() || objDir != \"\" {\n\t\targc += 2\n\t}\n\tif pack.Name == \"main\" {\n\t\targc += 2\n\t}\n\targv = make([]string, argc*2)\n\n\targv[argvFilled] = compilerBin\n\targvFilled++\n\targv[argvFilled] = \"-o\"\n\targvFilled++\n\targv[argvFilled] = outputFile + objExt\n\targvFilled++\n\n\tif *flagIncludePaths != \"\" {\n\t\tfor _, includePath := range strings.Split(*flagIncludePaths, \",\", -1) {\n\t\t\targv[argvFilled] = \"-I\"\n\t\t\targvFilled++\n\t\t\targv[argvFilled] = includePath\n\t\t\targvFilled++\n\t\t}\n\t}\n\t// \tfor _, arg := range argv {\n\t// \t\tlogger.Info(arg)\n\t// \t\tlogger.Info(\" \")\n\t// \t}\n\t// \tlogger.Info(\"\\n\")\n\n\tif pack.NeedsLocalSearchPath() || objDir != \"\" {\n\t\targv[argvFilled] = \"-I\"\n\t\targvFilled++\n\t\tif objDir != \"\" {\n\t\t\targv[argvFilled] = objDir\n\t\t} else {\n\t\t\targv[argvFilled] = \".\"\n\t\t}\n\t\targvFilled++\n\t}\n\tif pack.Name == \"main\" {\n\t\targv[argvFilled] = \"-I\"\n\t\targvFilled++\n\t\targv[argvFilled] = \".\"\n\t\targvFilled++\n\t}\n\n\tfor i := 0; i < pack.Files.Len(); i++ {\n\t\tgf := pack.Files.At(i).(*godata.GoFile)\n\t\targv[argvFilled] = gf.Filename\n\t\targvFilled++\n\t}\n\n\tlogger.Info(\" %s\\n\", getCommandline(argv[0:argvFilled]))\n\tcmd, err := exec.Run(compilerBin, argv[0:argvFilled], os.Environ(), rootPath,\n\t\texec.DevNull, exec.PassThrough, exec.PassThrough)\n\tif err != nil {\n\t\tlogger.Error(\"%s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\twaitmsg, err := cmd.Wait(0)\n\tif err != nil {\n\t\tlogger.Error(\"Compiler execution error (%s), aborting compilation.\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tif waitmsg.ExitStatus() != 0 {\n\t\tpack.HasErrors = true\n\t\tpack.InProgress = false\n\t\treturn false\n\t}\n\n\t// it should now be compiled\n\tpack.Compiled = true\n\tpack.InProgress = false\n\n\treturn true\n}", "func compileAndRun(code []byte) (string, error) {\n\tconst name = \"tmp\"\n\tos.RemoveAll(name)\n\terr := os.Mkdir(name, 0700)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// write src\n\terr = ioutil.WriteFile(name+\"/main.go\", code, 0700)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// compile\n\tcmd := exec.Command(\"sh\", \"-c\", \"go build\")\n\tcmd.Dir = name\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%v: %s\", err, out)\n\t}\n\n\t// execute\n\tcmd = exec.Command(\"sh\", \"-c\", \"./\"+name)\n\tcmd.Dir = name\n\tout, err = cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%v: %s\", err, out)\n\t}\n\n\tos.RemoveAll(name)\n\treturn string(out), nil\n}", "func (c *client) Compile(v interface{}) (*pipeline.Build, *library.Pipeline, error) {\n\tp, data, err := c.Parse(v, c.repo.GetPipelineType(), new(yaml.Template))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// create the library pipeline object from the yaml configuration\n\t_pipeline := p.ToPipelineLibrary()\n\t_pipeline.SetData(data)\n\t_pipeline.SetType(c.repo.GetPipelineType())\n\n\t// validate the yaml configuration\n\terr = c.Validate(p)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\t// create map of templates for easy lookup\n\ttemplates := mapFromTemplates(p.Templates)\n\n\tevent := c.build.GetEvent()\n\taction := c.build.GetEventAction()\n\n\t// if the build has an event action, concatenate event and event action for matching\n\tif !strings.EqualFold(action, \"\") {\n\t\tevent = event + \":\" + action\n\t}\n\n\t// create the ruledata to purge steps\n\tr := &pipeline.RuleData{\n\t\tBranch: c.build.GetBranch(),\n\t\tComment: c.comment,\n\t\tEvent: event,\n\t\tPath: c.files,\n\t\tRepo: c.repo.GetFullName(),\n\t\tTag: strings.TrimPrefix(c.build.GetRef(), \"refs/tags/\"),\n\t\tTarget: c.build.GetDeploy(),\n\t}\n\n\tswitch {\n\tcase p.Metadata.RenderInline:\n\t\tnewPipeline, err := c.compileInline(p, c.TemplateDepth)\n\t\tif err != nil {\n\t\t\treturn nil, _pipeline, err\n\t\t}\n\t\t// validate the yaml configuration\n\t\terr = c.Validate(newPipeline)\n\t\tif err != nil {\n\t\t\treturn nil, _pipeline, err\n\t\t}\n\n\t\tif len(newPipeline.Stages) > 0 {\n\t\t\treturn c.compileStages(newPipeline, _pipeline, map[string]*yaml.Template{}, r)\n\t\t}\n\n\t\treturn c.compileSteps(newPipeline, _pipeline, map[string]*yaml.Template{}, r)\n\tcase len(p.Stages) > 0:\n\t\treturn c.compileStages(p, _pipeline, templates, r)\n\tdefault:\n\t\treturn c.compileSteps(p, _pipeline, templates, r)\n\t}\n}", "func buildAndCopyProgram(src io.Reader) error {\n\t// FIXME: BuildProgram should probably be in some other package,\n\t// so that it can be used by both the compiler tests and the\n\t// command line client.\n\td, err := ioutil.TempDir(\"\", \"langbuild\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif debug {\n\t\tlog.Println(\"Using temporary directory\", d, \"(WARNING: will not automatically delete in debug mode)\")\n\t}\n\tif !debug {\n\t\tdefer os.RemoveAll(d)\n\t}\n\texe, err := codegen.BuildProgram(d, src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif exe == \"\" {\n\t\treturn fmt.Errorf(\"No executable built.\")\n\t}\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\tname := path.Base(cwd)\n\tif name == \".\" || name == \"\" || name == \"/\" {\n\t\tlog.Fatal(\"Could not determine appropriate executable name.\")\n\t}\n\treturn copyFile(d+\"/\"+exe, \"./\"+name)\n}", "func (t *TargetBuilder) Debug(extraJtagCmd string, reset bool, noGDB bool, elfFileOverride string) error {\n\tif err := t.PrepBuild(); err != nil {\n\t\treturn err\n\t}\n\n\tvar elfBase string // Everything except \".elf\"\n\n\tif elfFileOverride != \"\" {\n\t\t// The debug script appends \".elf\" to the basename. Make sure we can strip\n\t\t// the extension here and the script will reconstruct the original\n\t\t// filename.\n\t\telfBase = strings.TrimSuffix(elfFileOverride, \".elf\")\n\t\tif elfBase == elfFileOverride {\n\t\t\treturn util.FmtNewtError(\n\t\t\t\t\"invalid elf filename: must end in \\\".elf\\\": filename=%s\",\n\t\t\t\telfFileOverride)\n\t\t}\n\t}\n\n\tif t.LoaderBuilder == nil {\n\t\tif elfBase == \"\" {\n\t\t\telfBase = t.AppBuilder.AppBinBasePath()\n\t\t}\n\t\treturn t.debugApp(extraJtagCmd, reset, noGDB, elfBase)\n\t} else {\n\t\tif elfBase == \"\" {\n\t\t\telfBase = t.LoaderBuilder.AppBinBasePath()\n\t\t}\n\t\treturn t.debugLoader(extraJtagCmd, reset, noGDB, elfBase)\n\t}\n}", "func (c *Compiler) Compile(expr string) (*runtime.Program, error) {\n\tprogAST, err := parser.Parse(expr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, pass := range context.Passes {\n\t\terr = progAST.RunPass(c.ctx, pass)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tprog := c.ctx.Builder.Build()\n\tprog.ResultType = progAST.Type()\n\treturn prog, nil\n}", "func ExecBuild(makefileTmp, buildProjectCmdSrc, buildProjectCmdTrg string, extensions []string, buildProjectCmdMode, buildProjectCmdMtar, buildProjectCmdPlatform string, buildProjectCmdStrict bool, wdGetter func() (string, error), wdExec func([][]string, bool) error, useDefaultMbt bool) error {\n\t// Generate build script\n\terr := tpl.ExecuteMake(buildProjectCmdSrc, \"\", extensions, makefileTmp, buildProjectCmdMode, wdGetter, useDefaultMbt)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif buildProjectCmdTrg == \"\" {\n\t\terr = wdExec([][]string{{buildProjectCmdSrc, \"make\", \"-f\", makefileTmp, \"p=\" + buildProjectCmdPlatform, \"mtar=\" + buildProjectCmdMtar, \"strict=\" + strconv.FormatBool(buildProjectCmdStrict), \"mode=\" + buildProjectCmdMode}}, false)\n\t} else {\n\t\terr = wdExec([][]string{{buildProjectCmdSrc, \"make\", \"-f\", makefileTmp, \"p=\" + buildProjectCmdPlatform, \"mtar=\" + buildProjectCmdMtar, `t=\"` + buildProjectCmdTrg + `\"`, \"strict=\" + strconv.FormatBool(buildProjectCmdStrict), \"mode=\" + buildProjectCmdMode}}, false)\n\t}\n\t// Remove temporary Makefile\n\tremoveError := os.Remove(filepath.Join(buildProjectCmdSrc, filepath.FromSlash(makefileTmp)))\n\tif removeError != nil {\n\t\tremoveError = errors.Wrapf(removeError, removeFailedMsg, makefileTmp)\n\t}\n\n\tif err != nil {\n\t\tif removeError != nil {\n\t\t\tlogs.Logger.Error(removeError)\n\t\t}\n\t\treturn errors.Wrap(err, execFailedMsg)\n\t}\n\treturn removeError\n}", "func Compile(ctx context.Context, cln *client.Client, w io.Writer, mod *ast.Module, targets []codegen.Target) (solver.Request, error) {\n\terr := checker.SemanticPass(mod)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = linter.Lint(ctx, mod)\n\tif err != nil {\n\t\tfor _, span := range diagnostic.Spans(err) {\n\t\t\tfmt.Fprintln(w, span.Pretty(ctx))\n\t\t}\n\t}\n\n\terr = checker.Check(mod)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresolver, err := module.NewResolver(cln)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcg := codegen.New(cln, resolver)\n\tif solver.ConcurrencyLimiter(ctx) == nil {\n\t\tctx = solver.WithConcurrencyLimiter(ctx, semaphore.NewWeighted(defaultMaxConcurrency))\n\t}\n\treturn cg.Generate(ctx, mod, targets)\n}", "func (c *client) CompileLite(v interface{}, template, substitute bool) (*yaml.Build, *library.Pipeline, error) {\n\tp, data, err := c.Parse(v, c.repo.GetPipelineType(), new(yaml.Template))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// create the library pipeline object from the yaml configuration\n\t_pipeline := p.ToPipelineLibrary()\n\t_pipeline.SetData(data)\n\t_pipeline.SetType(c.repo.GetPipelineType())\n\n\tif p.Metadata.RenderInline {\n\t\tnewPipeline, err := c.compileInline(p, c.TemplateDepth)\n\t\tif err != nil {\n\t\t\treturn nil, _pipeline, err\n\t\t}\n\t\t// validate the yaml configuration\n\t\terr = c.Validate(newPipeline)\n\t\tif err != nil {\n\t\t\treturn nil, _pipeline, err\n\t\t}\n\n\t\tp = newPipeline\n\t}\n\n\tif template {\n\t\t// create map of templates for easy lookup\n\t\ttemplates := mapFromTemplates(p.Templates)\n\n\t\tswitch {\n\t\tcase len(p.Stages) > 0:\n\t\t\t// inject the templates into the steps\n\t\t\tp, err = c.ExpandStages(p, templates, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, _pipeline, err\n\t\t\t}\n\n\t\t\tif substitute {\n\t\t\t\t// inject the substituted environment variables into the steps\n\t\t\t\tp.Stages, err = c.SubstituteStages(p.Stages)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, _pipeline, err\n\t\t\t\t}\n\t\t\t}\n\t\tcase len(p.Steps) > 0:\n\t\t\t// inject the templates into the steps\n\t\t\tp, err = c.ExpandSteps(p, templates, nil, c.TemplateDepth)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, _pipeline, err\n\t\t\t}\n\n\t\t\tif substitute {\n\t\t\t\t// inject the substituted environment variables into the steps\n\t\t\t\tp.Steps, err = c.SubstituteSteps(p.Steps)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, _pipeline, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// validate the yaml configuration\n\terr = c.Validate(p)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\treturn p, _pipeline, nil\n}", "func main() {\n\tlog.SetFlags(0)\n\tif err := precompile(); err != nil {\n\t\tlog.Printf(\"error: %v\", err)\n\t}\n}", "func compileBytecodeAndRun(program *ast.RootNode) object.Object {\n\tcomp := compiler.New()\n\n\terr := comp.Compile(program)\n\tif err != nil {\n\t\tfmt.Printf(\"compiler error: %s\", err)\n\t}\n\n\tvm := vm.New(comp.Bytecode())\n\n\terr = vm.Run()\n\tif err != nil {\n\t\tfmt.Printf(\"vm error: %s\", err)\n\t}\n\n\treturn vm.LastPoppedStackElement()\n}", "func Compile(rootPath string, minify bool) (string, error) {\n\topts := api.BuildOptions{\n\t\tEntryPoints: []string{path.Join(rootPath, \"index.js\")},\n\t\tOutfile: \"script.js\",\n\t\tBundle: true,\n\t\tWrite: false,\n\t\tLogLevel: api.LogLevelInfo,\n\t}\n\tif minify {\n\t\topts.MinifyWhitespace = true\n\t\topts.MinifyIdentifiers = false\n\t\topts.MinifySyntax = true\n\t}\n\tres := api.Build(opts)\n\tif len(res.Errors) > 0 {\n\t\treturn \"\", errors.New(\"failed to compile web js\")\n\t}\n\treturn string(res.OutputFiles[0].Contents), nil\n}", "func compile_plugin(filename string, tag string) {\n out := globals.tmp_directory + \"/\" + tag\n cmd := \"go\"\n arg1 := \"build\"\n arg2 := \"-o\"\n arg3 := out\n arg4 := filename\n o, err := exec.Command(cmd, arg1, arg2, arg3, arg4).Output()\n if err != nil {\n println(err.Error())\n println(o)\n return\n }\n}", "func (runAll RunAll) Compile() []string {\n\tcommands := make([]string, len(runAll.Runs))\n\n\tfor i, run := range runAll.Runs {\n\t\tcommands[i] = run.Compile()[0]\n\t}\n\n\treturn []string{strings.Join(commands, \" && \")}\n}", "func Build(target *prog.Target, lang, src string) (string, error) {\n\tbin, err := ioutil.TempFile(\"\", \"syzkaller\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create temp file: %v\", err)\n\t}\n\tbin.Close()\n\tsysTarget := targets.List[target.OS][target.Arch]\n\tcompiler := sysTarget.CCompilerPrefix + \"gcc\"\n\tif _, err := exec.LookPath(compiler); err != nil {\n\t\treturn \"\", NoCompilerErr\n\t}\n\tflags := []string{\n\t\t\"-x\", lang, \"-Wall\", \"-Werror\", \"-O1\", \"-g\", \"-o\", bin.Name(),\n\t\tsrc, \"-pthread\",\n\t}\n\tflags = append(flags, sysTarget.CrossCFlags...)\n\tif sysTarget.PtrSize == 4 {\n\t\t// We do generate uint64's for syscall arguments that overflow longs on 32-bit archs.\n\t\tflags = append(flags, \"-Wno-overflow\")\n\t}\n\tout, err := exec.Command(compiler, append(flags, \"-static\")...).CombinedOutput()\n\tif err != nil {\n\t\t// Some distributions don't have static libraries.\n\t\tout, err = exec.Command(compiler, flags...).CombinedOutput()\n\t}\n\tif err != nil {\n\t\tos.Remove(bin.Name())\n\t\tdata, _ := ioutil.ReadFile(src)\n\t\treturn \"\", fmt.Errorf(\"failed to build program:\\n%s\\n%s\\ncompiler invocation: %v %v\\n\",\n\t\t\tdata, out, compiler, flags)\n\t}\n\treturn bin.Name(), nil\n}", "func compileJs(filename string, externs []string, includes []string, w io.Writer, level Optimization) error {\n\t// output pipe\n\torp, owp, err := os.Pipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer orp.Close()\n\tdefer owp.Close()\n\n\tvar cp, jp *os.Process\n\tswitch level {\n\tcase None:\n\t\tcp, err = cpp(filename, includes, owp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\towp.Close()\n\tcase Basic, Advanced:\n\t\tirp, iwp, err := os.Pipe()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer irp.Close()\n\t\tdefer iwp.Close()\n\n\t\tcp, err = cpp(filename, includes, iwp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tiwp.Close()\n\n\t\tjp, err = jsc(irp, owp, externs, pathToJsc(), level)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tirp.Close()\n\t\towp.Close()\n\t}\n\n\t_, err = io.Copy(w, orp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = waitFor(cp, jp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (Internal) GrpcDebug() error {\n\tfmt.Println(\"\\n=====> Running protoc...\\n\")\n\terr := os.Chdir(filepath.Join(srcDir(), \"agent/proto\"))\n\tif err != nil {\n\t\treturn trace.ConvertSystemError(err)\n\t}\n\tprotoFiles, err := filepath.Glob(\"debug/*.proto\")\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\targs := []string{fmt.Sprint(\"-I=.:\", os.Getenv(\"PROTO_INCLUDE\")), \"--gofast_out=plugins=grpc,Mgogo.proto=github.com/gogo/protobuf/gogoproto:.\"}\n\treturn trace.Wrap(sh.RunV(\n\t\t\"protoc\",\n\t\tappend(args, protoFiles...)...,\n\t))\n}", "func (p *Qlang) SafeExec(code []byte, fname string) (err error) {\n\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tswitch v := e.(type) {\n\t\t\tcase string:\n\t\t\t\terr = errors.New(v)\n\t\t\tcase error:\n\t\t\t\terr = v\n\t\t\tdefault:\n\t\t\t\tpanic(e)\n\t\t\t}\n\t\t}\n\t}()\n\n\terr = p.Exec(code, fname)\n\treturn\n}", "func (c *Compiler) Compile(conf *yaml.Config) *backend.Config {\n\tconfig := new(backend.Config)\n\n\t// create a default volume\n\tconfig.Volumes = append(config.Volumes, &backend.Volume{\n\t\tName: fmt.Sprintf(\"%s_default\", c.prefix),\n\t\tDriver: \"local\",\n\t})\n\n\t// create a default network\n\tconfig.Networks = append(config.Networks, &backend.Network{\n\t\tName: fmt.Sprintf(\"%s_default\", c.prefix),\n\t\tDriver: \"bridge\",\n\t})\n\n\t// overrides the default workspace paths when specified\n\t// in the YAML file.\n\tif len(conf.Workspace.Base) != 0 {\n\t\tc.base = conf.Workspace.Base\n\t}\n\tif len(conf.Workspace.Path) != 0 {\n\t\tc.path = conf.Workspace.Path\n\t}\n\n\t// add default clone step\n\tif c.local == false && len(conf.Clone.Containers) == 0 {\n\t\tcontainer := &yaml.Container{\n\t\t\tName: \"clone\",\n\t\t\tImage: \"crun/git:latest\",\n\t\t\tVargs: map[string]interface{}{\"depth\": \"0\"},\n\t\t}\n\t\tswitch c.metadata.Sys.Arch {\n\t\tcase \"linux/arm\":\n\t\t\tcontainer.Image = \"crun/git:linux-arm\"\n\t\tcase \"linux/arm64\":\n\t\t\tcontainer.Image = \"crun/git:linux-arm64\"\n\t\t}\n\t\tname := fmt.Sprintf(\"%s_clone\", c.prefix)\n\t\tstep := c.createProcess(name, container, \"clone\")\n\n\t\tstage := new(backend.Stage)\n\t\tstage.Name = name\n\t\tstage.Alias = \"clone\"\n\t\tstage.Steps = append(stage.Steps, step)\n\n\t\tconfig.Stages = append(config.Stages, stage)\n\t} else if c.local == false {\n\t\tfor i, container := range conf.Clone.Containers {\n\t\t\tif !container.Constraints.Match(c.metadata) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstage := new(backend.Stage)\n\t\t\tstage.Name = fmt.Sprintf(\"%s_clone_%v\", c.prefix, i)\n\t\t\tstage.Alias = container.Name\n\n\t\t\tname := fmt.Sprintf(\"%s_clone_%d\", c.prefix, i)\n\t\t\tstep := c.createProcess(name, container, \"clone\")\n\t\t\tstage.Steps = append(stage.Steps, step)\n\n\t\t\tconfig.Stages = append(config.Stages, stage)\n\t\t}\n\t}\n\n\t// c.setupCache2(conf, config)\n\tc.RestoreCache(conf, config)\n\n\t// add services steps\n\tif len(conf.Services.Containers) != 0 {\n\t\tstage := new(backend.Stage)\n\t\tstage.Name = fmt.Sprintf(\"%s_services\", c.prefix)\n\t\tstage.Alias = \"services\"\n\n\t\tfor i, container := range conf.Services.Containers {\n\t\t\tif !container.Constraints.Match(c.metadata) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tname := fmt.Sprintf(\"%s_services_%d\", c.prefix, i)\n\t\t\tstep := c.createProcess(name, container, \"services\")\n\t\t\tstage.Steps = append(stage.Steps, step)\n\n\t\t}\n\t\tconfig.Stages = append(config.Stages, stage)\n\t}\n\n\t// add pipeline steps. 1 pipeline step per stage, at the moment\n\tvar stage *backend.Stage\n\tvar group string\n\tfor i, container := range conf.Pipeline.Containers {\n\t\t//Skip if local and should not run local\n\t\tif c.local && !container.Constraints.Local.Bool() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif !container.Constraints.Match(c.metadata) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif stage == nil || group != container.Group || container.Group == \"\" {\n\t\t\tgroup = container.Group\n\n\t\t\tstage = new(backend.Stage)\n\t\t\tstage.Name = fmt.Sprintf(\"%s_stage_%v\", c.prefix, i)\n\t\t\tstage.Alias = container.Name\n\t\t\tconfig.Stages = append(config.Stages, stage)\n\t\t}\n\n\t\tname := fmt.Sprintf(\"%s_step_%d\", c.prefix, i)\n\t\tstep := c.createProcess(name, container, \"pipeline\")\n\t\tstage.Steps = append(stage.Steps, step)\n\t}\n\n\t// c.setupCacheRebuild2(conf, config)\n\tc.SaveCache(conf, config)\n\n\treturn config\n}", "func (l *loader) compile(name string) (string, error) {\n // Copy the file to the objects directory with a different name\n // each time, to avoid retrieving the cached version.\n // Apparently the cache key is the path of the file compiled and\n // there's no way to invalidate it.\n\n f, err := ioutil.ReadFile(filepath.Join(l.pluginsDir, name + \".go\"))\n if err != nil {\n return \"\", fmt.Errorf(\"Cannot read %s.go: %v\", name, err)\n }\n\n name = fmt.Sprintf(\"%d.go\", rand.Int())\n srcPath := filepath.Join(l.objectsDir, name)\n if err := ioutil.WriteFile(srcPath, f, 0666); err != nil {\n return \"\", fmt.Errorf(\"Cannot write %s: %v\", name, err)\n }\n\n objectPath := srcPath[:len(srcPath)-3] + \".so\"\n\n cmd := exec.Command(\"go\", \"build\", \"-buildmode=plugin\", \"-o=\"+objectPath, srcPath)\n cmd.Stderr = os.Stderr\n cmd.Stdout = os.Stdout\n if err := cmd.Run(); err != nil {\n return \"\", fmt.Errorf(\"Cannot compile %s: %v\", name, err)\n }\n\n return objectPath, nil\n}", "func (state *State) CompileAndRun(code, filename string) (ret *Value, err error) {\n\tscript := NewScript()\n\t\n\tval, err := state.CompileToValue(code, NewPosition(1, 1, filename))\n\tif err != nil {\n\t\treturn\n\t}\n\t\n\tret, err = state.Run(script, val)\n\treturn\n}", "func (self *Build) exec(moduleLabel core.Label, fileType core.FileType) error {\n\tthread := createThread(self, moduleLabel, fileType)\n\n\tsourceData, err := self.sourceFileReader(moduleLabel)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to execute %v: read failed: %v\", moduleLabel, err)\n\t}\n\n\t_, err = starlark.ExecFile(thread, moduleLabel.String(), sourceData,\n\t\tbuiltins.InitialGlobals(fileType))\n\treturn err\n}", "func (p *PrecompiledTemplate) Execute(content map[string]string, lang string) (string, string, error) {\n\n\tvar bodyBuffer bytes.Buffer\n\tvar subject string\n\tvar err error\n\tcontextParts := p.fillEscapedParts(content)\n\tp.fillAndLocalize(lang, content, contextParts)\n\n\tif subject, err = p.fillAndLocalizeSubject(lang, contextParts); err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"models: failure to generate subject %s\", strconv.Quote(p.name.String()))\n\t}\n\n\tif err := p.precompiledBody.Execute(&bodyBuffer, content); err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"models: failure to execute body template %s with content\", strconv.Quote(p.name.String()))\n\t}\n\n\treturn subject, bodyBuffer.String(), nil\n}", "func (app *application) Execute(script linkers.Script) (middle.Program, error) {\n\tlangRef := script.Language()\n\tlang := langRef.Language()\n\tlangApp := lang.Application()\n\tinVariable := langRef.Input()\n\toutVariable := langRef.Output()\n\tcode := script.Code()\n\n\tcodeValue, err := app.computableBuilder.Create().WithString(code).Now()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinput := map[string]computable.Value{\n\t\tinVariable: codeValue,\n\t}\n\n\tlinkedProgram, err := app.programBuilder.Create().WithApplication(langApp).Now()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinterpreter, err := app.interpreterBuilder.Create().WithProgram(linkedProgram).Now()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !interpreter.IsApplication() {\n\t\treturn nil, errors.New(\"the interpreter was expected to be an application interpreter\")\n\t}\n\n\tstackFrame, err := interpreter.Application().Execute(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcomputedCodeValue, err := stackFrame.Current().Fetch(outVariable)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !computedCodeValue.IsString() {\n\t\tstr := fmt.Sprintf(\"the output variable (%s) was expected to contain code and therefore be a string\", outVariable)\n\t\treturn nil, errors.New(str)\n\t}\n\n\tpOutputCode := computedCodeValue.String()\n\tprogramIns, err := app.parser.ExecuteScript(*pOutputCode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif parsedProgram, ok := programIns.(parsers.Program); ok {\n\t\treturn app.middleAdapter.ToProgram(parsedProgram)\n\t}\n\n\treturn nil, errors.New(\"the parsed compiled output was expected to contain a parsers.Program instance\")\n}", "func (ev *Evaler) Compile(n *parse.Chunk) (Op, error) {\n\treturn compile(makeScope(ev.global), n)\n}", "func Compile(value bool) TestOptionsFunc {\n\treturn func(_ *testing.T, test *Test) { test.Options.Compile = &value }\n}", "func (c *Compiler) Compile(ctx context.Context, node ast.StmtNode) (ast.Statement, error) {\n\tast.SetFlag(node)\n\tif _, ok := node.(*ast.UpdateStmt); ok {\n\t\tsVars := variable.GetSessionVars(ctx)\n\t\tsVars.InUpdateStmt = true\n\t\tdefer func() {\n\t\t\tsVars.InUpdateStmt = false\n\t\t}()\n\t}\n\n\tis := sessionctx.GetDomain(ctx).InfoSchema()\n\tif err := plan.Preprocess(node, is, ctx); err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\t// Validate should be after NameResolve.\n\tif err := plan.Validate(node, false); err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tp, err := plan.Optimize(ctx, node, is)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\t_, isDDL := node.(ast.DDLNode)\n\tsa := &statement{\n\t\tis: is,\n\t\tplan: p,\n\t\ttext: node.Text(),\n\t\tisDDL: isDDL,\n\t}\n\treturn sa, nil\n}", "func (vm *VM) Compile(source string, chunk *Chunk) (int, bool) {\n\tvar scanner Scanner\n\tvar parser Parser\n\tscanner.Init(source)\n\tparser.scanner = &scanner\n\tparser.chunk = chunk\n\tparser.backpatch = map[string][]Label{}\n\tparser.labels = map[string]byte{}\n\tparser.advance()\n\tfor parser.previous.Type != EOF {\n\t\tparser.statement()\n\t\tif parser.errorState {\n\t\t\tparser.synchronize()\n\t\t}\n\t}\n\tparser.consume(EOF, \"Expected EOF.\")\n\tparser.emitHalt()\n\tparser.checkLabels()\n\treturn parser.size, !parser.hasError\n}", "func (i *Interpreter) run(w *eval.World, path_orEmpty string, sourceCode string) error {\n\tvar code eval.Code\n\tvar vars []string\n\tvar err error\n\n\tfileSet := token.NewFileSet()\n\tif len(path_orEmpty) > 0 {\n\t\tfileSet.AddFile(path_orEmpty, fileSet.Base(), len(sourceCode))\n\t}\n\n\tcode, vars, err = i.compile(w, fileSet, sourceCode)\n\ti.tryToAddVars(w, fileSet, vars)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresult, err := code.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif result != nil {\n\t\tfmt.Fprintf(stdout, \"%s\\n\", result)\n\t}\n\n\treturn nil\n}", "func compileCheckout(ctx context.Context, checkoutPath, lunchTarget, mmmaTargets, logFilePrefix, pathToCompileScript string) (bool, string, error) {\n\tcheckoutBase := path.Base(checkoutPath)\n\tsklog.Infof(\"Started compiling %s\", checkoutBase)\n\t// Create metric and send it to a timer.\n\tcompileTimesMetric := metrics2.GetFloat64Metric(fmt.Sprintf(\"android_compile_time_%s\", checkoutBase))\n\tdefer timer.NewWithMetric(fmt.Sprintf(\"Time taken to compile %s:\", checkoutBase), compileTimesMetric).Stop()\n\n\t// Execute the compile script pointing it to the checkout.\n\tcommand := exec.Command(\"bash\", pathToCompileScript, checkoutPath, lunchTarget, mmmaTargets)\n\tlogFile, err := ioutil.TempFile(*workdir, logFilePrefix)\n\tdefer util.Remove(logFile.Name())\n\tif err != nil {\n\t\treturn false, \"\", fmt.Errorf(\"Could not create log file\")\n\t}\n\tcommand.Stdout = io.MultiWriter(logFile, os.Stdout)\n\tcommand.Stderr = command.Stdout\n\n\t// Execute the command and determine if it was successful\n\tcompileSuccess := (command.Run() == nil)\n\n\t// Put the log file in Google Storage.\n\ttarget := bucketHandle.Object(filepath.Base(logFile.Name()))\n\twriter := target.NewWriter(ctx)\n\twriter.ObjectAttrs.ContentType = \"text/plain\"\n\t// Make uploaded logs readable by google.com domain.\n\twriter.ObjectAttrs.ACL = []storage.ACLRule{{Entity: \"domain-google.com\", Role: storage.RoleReader}}\n\tdefer util.Close(writer)\n\n\tdata, err := ioutil.ReadFile(logFile.Name())\n\tcompileLog := string(data)\n\t// Write the logs to Google storage.\n\tif _, err := io.WriteString(writer, compileLog); err != nil {\n\t\treturn compileSuccess, \"\", fmt.Errorf(\"Could not write %s to google storage: %s\", logFile.Name(), err)\n\t}\n\t// Write to logs to sklog as well.\n\tsklog.Infof(\"Compilation logs for %s on %s:\", logFilePrefix, checkoutBase)\n\tsklog.Infof(\"\\n---------------------------------------------------\\n%s\\n---------------------------------------------------\\n\", compileLog)\n\n\treturn compileSuccess, fmt.Sprintf(\"https://storage.cloud.google.com/%s/%s\", COMPILE_TASK_LOGS_BUCKET, filepath.Base(logFile.Name())), nil\n}", "func (i *Interpreter) compile(w *eval.World, fileSet *token.FileSet, sourceCode string) (code eval.Code, vars []string, err error) {\n\tvar statements []ast.Stmt\n\tvar declarations []ast.Decl\n\n\tvars_buffer := make(map[string]bool)\n\n\tstatements, err1 := parseStmtList(fileSet, sourceCode)\n\tif err1 == nil {\n\t\tfor _, s := range statements {\n\t\t\taddTopLevelVars(s, vars_buffer)\n\t\t}\n\t\tvars = make([]string, 0, len(vars_buffer))\n\t\tfor varName, _ := range vars_buffer {\n\t\t\tvars = append(vars, varName)\n\t\t}\n\n\t\tcode, err = w.CompileStmtList(fileSet, statements)\n\n\t\treturn code, vars, err\n\t}\n\n\tdeclarations, err2 := parseDeclList(fileSet, sourceCode)\n\tif err2 == nil {\n\t\tfor _, d := range declarations {\n\t\t\taddTopLevelVars(d, vars_buffer)\n\t\t}\n\t\tvars = make([]string, 0, len(vars_buffer))\n\t\tfor varName, _ := range vars_buffer {\n\t\t\tvars = append(vars, varName)\n\t\t}\n\n\t\tcode, err = w.CompileDeclList(fileSet, declarations)\n\n\t\treturn code, vars, err\n\t}\n\n\treturn nil, nil, err1\n}", "func (r *Eval) Run(ctx context.Context, script []byte) (Object, *Bytecode, error) {\n\tbytecode, err := Compile(script, r.Opts)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tbytecode.Main.NumParams = bytecode.Main.NumLocals\n\tr.Opts.Constants = bytecode.Constants\n\tr.fixOpPop(bytecode)\n\tr.VM.SetBytecode(bytecode)\n\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\n\tr.VM.modulesCache = r.ModulesCache\n\tret, err := r.run(ctx)\n\tr.ModulesCache = r.VM.modulesCache\n\tr.Locals = r.VM.GetLocals(r.Locals)\n\tr.VM.Clear()\n\n\tif err != nil {\n\t\treturn nil, bytecode, err\n\t}\n\treturn ret, bytecode, nil\n}", "func (c *Compiler) Compile(ctx context.Context, files ...string) (linker.Files, error) {\n\tif len(files) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tpar := c.MaxParallelism\n\tif par <= 0 {\n\t\tpar = runtime.GOMAXPROCS(-1)\n\t\tcpus := runtime.NumCPU()\n\t\tif par > cpus {\n\t\t\tpar = cpus\n\t\t}\n\t}\n\n\th := reporter.NewHandler(c.Reporter)\n\n\te := executor{\n\t\tc: c,\n\t\th: h,\n\t\ts: semaphore.NewWeighted(int64(par)),\n\t\tcancel: cancel,\n\t\tsym: &linker.Symbols{},\n\t\tresCache: &options.FeaturesResolverCache{},\n\t\tresults: map[string]*result{},\n\t}\n\n\t// We lock now and create all tasks under lock to make sure that no\n\t// async task can create a duplicate result. For example, if files\n\t// contains both \"foo.proto\" and \"bar.proto\", then there is a race\n\t// after we start compiling \"foo.proto\" between this loop and the\n\t// async compilation task to create the result for \"bar.proto\". But\n\t// we need to know if the file is directly requested for compilation,\n\t// so we need this loop to define the result. So this loop holds the\n\t// lock the whole time so async tasks can't create a result first.\n\tresults := make([]*result, len(files))\n\tfunc() {\n\t\te.mu.Lock()\n\t\tdefer e.mu.Unlock()\n\t\tfor i, f := range files {\n\t\t\tresults[i] = e.compileLocked(ctx, f, true)\n\t\t}\n\t}()\n\n\tdescs := make([]linker.File, len(files))\n\tvar firstError error\n\tfor i, r := range results {\n\t\tselect {\n\t\tcase <-r.ready:\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, ctx.Err()\n\t\t}\n\t\tif r.err != nil {\n\t\t\tif firstError == nil {\n\t\t\t\tfirstError = r.err\n\t\t\t}\n\t\t}\n\t\tdescs[i] = r.res\n\t}\n\n\tif err := h.Error(); err != nil {\n\t\treturn descs, err\n\t}\n\t// this should probably never happen; if any task returned an\n\t// error, h.Error() should be non-nil\n\treturn descs, firstError\n}", "func handleCompile(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\n\tvar source []byte\n\tif strings.HasPrefix(r.Header.Get(\"Content-Type\"), \"text/plain\") {\n\t\t// Read the source from the POST request.\n\t\tvar err error\n\t\tsource, err = ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusUnprocessableEntity)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\t// Read the source from a form parameter.\n\t\tsource = []byte(r.FormValue(\"code\"))\n\t}\n\t// Hash the source code, used for the build cache.\n\tsourceHashRaw := sha256.Sum256([]byte(source))\n\tsourceHash := hex.EncodeToString(sourceHashRaw[:])\n\n\tformat := r.FormValue(\"format\")\n\tswitch format {\n\tcase \"\", \"wasm\":\n\t\t// Run code in the browser.\n\t\tformat = \"wasm\"\n\tcase \"elf\", \"hex\", \"uf2\":\n\t\t// Build a firmware that can be flashed directly to a development board.\n\tdefault:\n\t\t// Unrecognized format. Disallow to be sure (might introduce security\n\t\t// issues otherwise).\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Attempt to serve directly from the directory with cached files.\n\tfilename := filepath.Join(cacheDir, \"build-\"+r.FormValue(\"target\")+\"-\"+sourceHash+\".\"+format)\n\tfp, err := os.Open(filename)\n\tif err == nil {\n\t\t// File was already cached! Serve it directly.\n\t\tdefer fp.Close()\n\t\tsendCompiledResult(w, fp, format)\n\t\treturn\n\t}\n\n\t// Create a new compiler job, which will be executed in a single goroutine\n\t// (to avoid overloading the system).\n\tjob := compilerJob{\n\t\tSource: source,\n\t\tSourceHash: sourceHash,\n\t\tTarget: r.FormValue(\"target\"),\n\t\tFormat: format,\n\t\tContext: r.Context(),\n\t\tResultFile: make(chan string),\n\t\tResultErrors: make(chan []byte),\n\t}\n\t// Send the job for execution.\n\tcompilerChan <- job\n\t// See how well that went, when it finishes.\n\tselect {\n\tcase filename := <-job.ResultFile:\n\t\t// Succesful compilation.\n\t\tfp, err := os.Open(filename)\n\t\tif err != nil {\n\t\t\tlog.Println(\"could not open compiled file:\", err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tdefer fp.Close()\n\t\tsendCompiledResult(w, fp, format)\n\tcase buf := <-job.ResultErrors:\n\t\t// Failed compilation.\n\t\tw.Write(buf)\n\t}\n}", "func (c *Compiler) Compile(node ast.Node) error {\n\tswitch node := node.(type) {\n\tcase *ast.Program:\n\t\tfor _, s := range node.Statements {\n\t\t\terr := c.Compile(s)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\tcase *ast.ExpressionStatement:\n\t\terr := c.Compile(node.Expression)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.emit(operation.Pop)\n\n\tcase *ast.InfixExpression:\n\t\t// Convert LessThan operations to GreaterThan ones\n\t\tif node.Operator == \"<\" {\n\n\t\t\t// First compile the right node, then the left node, unlike other infixes\n\t\t\terr := c.Compile(node.Right)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\terr = c.Compile(node.Left)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tc.emit(operation.GreaterThan)\n\t\t\treturn nil\n\t\t}\n\n\t\terr := c.Compile(node.Left)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = c.Compile(node.Right)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch node.Operator {\n\t\tcase \"+\":\n\t\t\tc.emit(operation.Add)\n\t\tcase \"-\":\n\t\t\tc.emit(operation.Sub)\n\t\tcase \"*\":\n\t\t\tc.emit(operation.Mul)\n\t\tcase \"/\":\n\t\t\tc.emit(operation.Div)\n\t\tcase \">\":\n\t\t\tc.emit(operation.GreaterThan)\n\t\tcase \"==\":\n\t\t\tc.emit(operation.Equal)\n\t\tcase \"!=\":\n\t\t\tc.emit(operation.NotEqual)\n\t\tcase \"=\":\n\t\t\tswitch node.Left.(type) {\n\t\t\t// Index assignment\n\t\t\tcase *ast.IndexExpression:\n\t\t\t\tie := node.Left.(*ast.IndexExpression)\n\n\t\t\t\t// Compile identifier\n\t\t\t\terr := c.Compile(ie.Left)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t// Compile the index\n\t\t\t\terr = c.Compile(ie.Index)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t// Compile the value\n\t\t\t\terr = c.Compile(node.Right)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tc.emit(operation.SetIndex)\n\n\t\t\t// Identifier assignment\n\t\t\tcase *ast.Identifier:\n\t\t\t\t// Get the variable\n\t\t\t\tident := node.Left.(*ast.Identifier)\n\t\t\t\tsymbol, ok := c.symbols.Resolve(ident.Value)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"Variable %s is undefined\", node.Left)\n\t\t\t\t}\n\n\t\t\t\t// Compile the value to be assigned\n\t\t\t\terr := c.Compile(node.Right)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tswitch symbol.Scope {\n\t\t\t\tcase symbols.GlobalScope:\n\t\t\t\t\tc.emit(operation.SetGlobal, symbol.Index)\n\t\t\t\tcase symbols.LocalScope:\n\t\t\t\t\tc.emit(operation.SetLocal, symbol.Index)\n\t\t\t\tcase symbols.FreeScope:\n\t\t\t\t\tc.emit(operation.SetFree, symbol.Index)\n\t\t\t\t}\n\t\t\t}\n\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unknown operator %s\", node.Operator)\n\t\t}\n\n\tcase *ast.PrefixExpression:\n\t\terr := c.Compile(node.Right)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch node.Operator {\n\t\tcase \"!\":\n\t\t\tc.emit(operation.Bang)\n\t\tcase \"-\":\n\t\t\tc.emit(operation.Minus)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unknown operator %s\", node.Operator)\n\t\t}\n\n\tcase *ast.IntegerLiteral:\n\t\tinteger := &data.Integer{Value: node.Value}\n\t\tc.emit(operation.Constant, c.addConstant(integer))\n\n\tcase *ast.Boolean:\n\t\tif node.Value {\n\t\t\tc.emit(operation.True)\n\t\t} else {\n\t\t\tc.emit(operation.False)\n\t\t}\n\n\tcase *ast.StringLiteral:\n\t\tstr := &data.String{Value: node.Value}\n\t\tc.emit(operation.Constant, c.addConstant(str))\n\n\tcase *ast.ArrayLiteral:\n\t\tfor _, el := range node.Elements {\n\t\t\terr := c.Compile(el)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tc.emit(operation.Array, len(node.Elements))\n\n\tcase *ast.HashLiteral:\n\t\tkeys := []ast.Expression{}\n\t\tfor k := range node.Pairs {\n\t\t\tkeys = append(keys, k)\n\t\t}\n\t\t// Sort the keys as Go doesn't guarantee key/val order on iteration\n\t\tsort.Slice(keys, func(i, j int) bool {\n\t\t\treturn keys[i].String() < keys[j].String()\n\t\t})\n\t\tfor _, k := range keys {\n\t\t\terr := c.Compile(k)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = c.Compile(node.Pairs[k])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tc.emit(operation.Hash, len(node.Pairs)*2)\n\n\tcase *ast.IndexExpression:\n\t\terr := c.Compile(node.Left)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = c.Compile(node.Index)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.emit(operation.Index)\n\n\tcase *ast.IfExpression:\n\t\terr := c.Compile(node.Condition)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Emit a `JumpNotTruthy` with a temporary operand\n\t\tjumpNotTruthyPos := c.emit(operation.JumpNotTruthy, 9999)\n\t\terr = c.Compile(node.Consequent)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif c.isEmitted(operation.Pop) {\n\t\t\tc.preventPop()\n\t\t}\n\n\t\t// Emit a `Jump` with a temporary operand\n\t\tjumpPos := c.emit(operation.Jump, 9999)\n\t\tafterConsequentPos := len(c.currentInstructions())\n\t\tc.changeOperand(jumpNotTruthyPos, afterConsequentPos)\n\t\tif node.Alternate == nil {\n\t\t\tc.emit(operation.Null)\n\t\t} else {\n\t\t\terr := c.Compile(node.Alternate)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif c.isEmitted(operation.Pop) {\n\t\t\t\tc.preventPop()\n\t\t\t}\n\t\t}\n\n\t\tafterAlternatePos := len(c.currentInstructions())\n\t\tc.changeOperand(jumpPos, afterAlternatePos)\n\n\tcase *ast.LetStatement:\n\t\tsymbol := c.symbols.Define(node.Name.Value)\n\t\terr := c.Compile(node.Value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif symbol.Scope == symbols.GlobalScope {\n\t\t\tc.emit(operation.SetGlobal, symbol.Index)\n\t\t} else {\n\t\t\tc.emit(operation.SetLocal, symbol.Index)\n\t\t}\n\n\tcase *ast.Identifier:\n\t\tsymbol, ok := c.symbols.Resolve(node.Value)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Variable %s is undefined\", node.Value)\n\t\t}\n\t\tc.loadSymbol(symbol)\n\n\tcase *ast.BlockStatement:\n\t\tfor _, s := range node.Statements {\n\t\t\terr := c.Compile(s)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\tcase *ast.FunctionLiteral:\n\t\tc.enterScope()\n\t\tfor _, p := range node.Parameters {\n\t\t\tc.symbols.Define(p.Value)\n\t\t}\n\t\terr := c.Compile(node.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// If a pop was emitted, replace it with a return value (implicit return)\n\t\tif c.isEmitted(operation.Pop) {\n\t\t\tc.changeEmittedTo(operation.ReturnValue)\n\t\t}\n\n\t\t// If no return value was emitted, emit a return instead (no return - empty body)\n\t\tif !c.isEmitted(operation.ReturnValue) {\n\t\t\tc.emit(operation.Return)\n\t\t}\n\n\t\tfreeSymbols := c.symbols.Free\n\t\tlocalCount := c.symbols.DefinitionCount\n\t\tinstructions := c.leaveScope()\n\n\t\tfor _, s := range freeSymbols {\n\t\t\tc.loadSymbol(s)\n\t\t}\n\n\t\tcompiled := &data.CompiledFunction{\n\t\t\tInstructions: instructions,\n\t\t\tLocalCount: localCount,\n\t\t\tParamCount: len(node.Parameters),\n\t\t}\n\n\t\tfnIndex := c.addConstant(compiled)\n\t\tc.emit(operation.Closure, fnIndex, len(freeSymbols))\n\n\tcase *ast.ReturnStatement:\n\t\terr := c.Compile(node.ReturnValue)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.emit(operation.ReturnValue)\n\n\tcase *ast.CallExpression:\n\t\terr := c.Compile(node.Function)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, a := range node.Arguments {\n\t\t\terr := c.Compile(a)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tc.emit(operation.Call, len(node.Arguments))\n\n\t}\n\n\treturn nil\n}", "func executeBuild() {\n\tfmt.Println(\"Building ...\")\n}", "func (ic *Context) Exec() error {\n\tdefer ic.Finalize()\n\treturn ic.VM.Run()\n}", "func (m *GoRoutines) Exec(c context.Context, name string, fn func(ctx context.Context)) {\n\tg := &GoRoutine{\n\t\tctx: c,\n\t\tName: name,\n\t\tFunc: fn,\n\t\tActive: true,\n\t\tRestart: false,\n\t}\n\tm.exec(g)\n}", "func (s *Server) Compiled(ctx context.Context, request *proto.Request) (*proto.Response, error) {\n\t// extract the code URL, args, and stdin from request\n\tcodeURL, args, stdin := request.GetCodeURL(), request.GetArgs(), request.GetStdin()\n\n\t/*\n\t\tCode has to be downloaded in\n\t\t/home/${whoami}/remote/ruby/code-#{time.Now()}.rb\n\t*/\n\n\t// get home directory of current user\n\tcurrentUser, err := user.Current()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// get and format current time for every request\n\tt := time.Now().Format(\"20060102150405\")\n\n\t// generate string for destination (in UNIX based systems)\n\tdestinationString := fmt.Sprintf(\"%s/rpc/ruby/code-%s.rb\", currentUser.HomeDir, t)\n\n\t// download file in the provided destination\n\twget.Wget(codeURL, destinationString)\n\n\t/*\n\t\tIf no arguments were provided, only leave the\n\t\tdestinationString in the args slice otherwise\n\t\tappend location of file to arguments list\n\t*/\n\tif args[0] == \"\" {\n\t\targs = []string{destinationString}\n\t} else {\n\t\targs = append(args, destinationString)\n\t}\n\n\t// get Command struct instance by passing command name and arguments\n\tcmd := exec.Command(\"ruby\", args...)\n\n\t// provide stdin to command\n\tcmd.Stdin = bytes.NewReader(stdin)\n\n\t// store cmd.Stdout in a Bytes buffer\n\tvar Stdout bytes.Buffer\n\tcmd.Stdout = &Stdout\n\n\t// run the command\n\terr = cmd.Run()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// delete the code file\n\terr = os.Remove(destinationString)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\t// return full response\n\treturn &proto.Response{Body: Stdout.Bytes()}, nil\n}", "func (gen *Generator) Exec() (err error) {\n\tif err := gen.Prompt(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := gen.Extends(); err != nil {\n\t\treturn err\n\t}\n\n\tif !gen.Options.PerformUpgrade {\n\t\t// run scripts in config.run_after array.\n\t\tif err := gen.RunBefore(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := filepath.Walk(gen.Template.Files, gen.WalkFiles); err != nil {\n\t\treturn err\n\t}\n\n\tif err := gen.Project.SaveState(); err != nil {\n\t\treturn err\n\t}\n\n\tif !gen.Options.PerformUpgrade {\n\t\t// run scripts in config.run_after array.\n\t\tif err := gen.RunAfter(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn err\n}", "func (be Batch) Compile() error {\n\tswitch len(be.errors) {\n\tcase 0:\n\t\treturn nil\n\tcase 1:\n\t\treturn be.errors[0]\n\tdefault:\n\t\treturn be\n\t}\n}", "func (c *Compiler) Compile(givenAst *ast.Ast) (bleveQuery.Query, error) {\n\tq, err := compile(givenAst)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn q, nil\n}", "func (p *Probe) IsRuntimeCompiled() bool {\n\treturn p.runtimeCompiled\n}", "func TestCompileForInterpreter(t *testing.T) {\n\tabs, err := filepath.Abs(filepath.FromSlash(\"../examples/main.wasm\")) // Get absolute path to test WASM file\n\n\tif err != nil { // Check for errors\n\t\tt.Fatal(err) // Panic\n\t}\n\n\ttestSourceFile, err := ioutil.ReadFile(abs) // Read test WASM file\n\n\tif err != nil { // Check for errors\n\t\tt.Fatal(err) // Panic\n\t}\n\n\tmodule, err := LoadModule(testSourceFile) // Load module\n\n\tif err != nil { // Check for errors\n\t\tt.Fatal(err) // Panic\n\t}\n\n\tsimpleGasPolicy := SimpleGasPolicy{GasPerInstruction: 1} // Init simple gas policy\n\n\tinterpreterCompiled, err := module.CompileForInterpreter(&simpleGasPolicy) // Compile for interpreter\n\n\tif err != nil { // Check for errors\n\t\tt.Fatal(err) // Panic\n\t}\n\n\tt.Log(interpreterCompiled) // Log success\n}", "func (t Trigger) Compile() (*Trigger, error) {\n\treturn compileTrigger(t)\n}", "func (ev *Evaler) Compile(name, text string, n *parse.Chunk) (Op, error) {\n\treturn compile(name, text, makeScope(ev.global), n)\n}", "func (g *Gate) Compile(c *Compiler) {\n\tif g.Dead || g.Compiled {\n\t\treturn\n\t}\n\tg.Compiled = true\n\tswitch g.Op {\n\tcase circuit.INV:\n\t\tc.compiled = append(c.compiled, circuit.Gate{\n\t\t\tInput0: circuit.Wire(g.A.ID()),\n\t\t\tOutput: circuit.Wire(g.O.ID()),\n\t\t\tOp: g.Op,\n\t\t})\n\n\tdefault:\n\t\tc.compiled = append(c.compiled, circuit.Gate{\n\t\t\tInput0: circuit.Wire(g.A.ID()),\n\t\t\tInput1: circuit.Wire(g.B.ID()),\n\t\t\tOutput: circuit.Wire(g.O.ID()),\n\t\t\tOp: g.Op,\n\t\t})\n\t}\n}", "func Exec(ctx context.Context, args Args) error {\n\tprojectURL := DatoNotificationURL(args.ProjectID)\n\tif args.Pipeline.Build.Status == \"success\" {\n\t\tvar resultMessage = []byte(`{ \"status\": \"success\" }`)\n\t\thttp.Post(projectURL, \"application/json\", bytes.NewBuffer(resultMessage))\n\t\tfmt.Printf(\"Sent success webhook to DatoCMS for project %s\\n\", args.ProjectID)\n\t} else {\n\t\tvar resultMessage = []byte(`{ \"status\": \"error\" }`)\n\t\thttp.Post(projectURL, \"application/json\", bytes.NewBuffer(resultMessage))\n\t\tfmt.Printf(\"Send failure webhook to DatoCMS for project %s\\n\", args.ProjectID)\n\t}\n\n\treturn nil\n}", "func New(language string) (Compiler, error) {\n\tif len(language) <= 0 {\n\t\treturn nil, compiler_error.ErrEmptyLanguage\n\t}\n\n\tcnst := constructor(language)\n\tif cnst == nil {\n\t\treturn nil, fmt.Errorf(\"invalid compiler language: %s\", language)\n\t}\n\n\tcmp, err := cnst()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create compiler: %s\", err)\n\t}\n\n\treturn cmp, nil\n}", "func try_compile_plugin(tag string) {\n filename := \"../plugins/\" + tag + \".go\"\n if file_exists(filename) {\n compile_plugin(filename, tag)\n }\n}", "func (f *RemoteRuntime) Exec(ctx context.Context, req *kubeapi.ExecRequest) (*kubeapi.ExecResponse, error) {\n\treturn f.RuntimeService.Exec(ctx, req)\n}", "func Compile(source string) error {\n\tpage := buildPage()\n\n\tcreateRSS(page)\n\tcreateHome(page)\n\tcreateIssues(page)\n\n\tCopyDir(\"themes/yeo/assets\", \"public/assets\")\n\tCopyDir(\"static/\", \"public/\")\n\n\treturn nil\n}", "func Compile(input []byte) Program {\n\ttokens := Tokenize(input)\n\t//remove whitespace tokens - they're unnecessary past this point\n\tvar newTokens []Token\n\tfor _, token := range tokens {\n\t\tif token.tokType != Whitespace {\n\t\t\tnewTokens = append(newTokens, token)\n\t\t}\n\t}\n\ttokens = newTokens\n\n\t//TokenDebugPrint(tokens)\n\n\t//Split the stream of tokens into statements\n\tvar curStatement Statement\n\tvar statements []Statement\n\tfor _, token := range tokens {\n\t\tif token.tokType == Punctuation && token.value == \".\" {\n\t\t\tstatements = append(statements, curStatement)\n\t\t\tcurStatement = Statement{}\n\t\t} else {\n\t\t\t//no need to carry the period past here\n\t\t\tcurStatement = append(curStatement, token)\n\t\t}\n\t}\n\treturn statements\n}", "func (script BuildScript) Exec(ctx context.Context, buildContext *BuildContext) error {\n\tn := len(script)\n\tfor i, s := range script {\n\t\tif buildContext.Stderr != nil {\n\t\t\tfmt.Fprintf(buildContext.Stderr, \"[%d/%d] %s\\n\", i+1, n, s)\n\t\t}\n\t\tif err := s.Exec(ctx, buildContext); err != nil {\n\t\t\treturn fmt.Errorf(\"exec step %d: %s: %w\", i, s, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (c *Compiler) Compile(modules map[string]*Module) {\n\t// TODO(tsandall): should the modules be deep copied?\n\tc.Modules = modules\n\tc.compile()\n}", "func buildExecutable() {\n\tvar executables []string\n\tvar execFilled int\n\n\t// check if there's a main package:\n\tif goPackages.GetMainCount() == 0 {\n\t\tlogger.Error(\"No main package found.\\n\")\n\t\tos.Exit(1)\n\t}\n\n\t// multiple main, no command file from command line and no -a -> error\n\tif (goPackages.GetMainCount() > 1) && (flag.NArg() == 0) && !*flagBuildAll {\n\t\tlogger.Error(\"Multiple files found with main function.\\n\")\n\t\tlogger.ErrorContinue(\"Please specify one or more as command line parameter or\\n\")\n\t\tlogger.ErrorContinue(\"run gobuild with -a. Available main files are:\\n\")\n\t\tfor _, fn := range goPackages.GetMainFilenames() {\n\t\t\tlogger.ErrorContinue(\"\\t %s\\n\", fn)\n\t\t}\n\t\tos.Exit(1)\n\t}\n\n\t// compile all needed packages\n\tif flag.NArg() > 0 {\n\t\tif *flagRunExec {\n\t\t\texecutables = make([]string, flag.NArg())\n\t\t}\n\t\tfor _, fn := range flag.Args() {\n\t\t\tmainPack, exists := goPackages.GetMain(fn, !*flagSingleMainFile)\n\t\t\tif !exists {\n\t\t\t\tlogger.Error(\"File %s not found.\\n\", fn)\n\t\t\t\treturn // or os.Exit?\n\t\t\t}\n\n\t\t\tif compile(mainPack) {\n\t\t\t\t// link everything together\n\t\t\t\tif link(mainPack) {\n\t\t\t\t\tif *flagRunExec {\n\t\t\t\t\t\texecutables[execFilled] = outputDirPrefix + mainPack.OutputFile\n\t\t\t\t\t\texecFilled++\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tlinkErrors = true\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlogger.Error(\"Can't link executable because of compile errors.\\n\")\n\t\t\t\tcompileErrors = true\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif *flagRunExec {\n\t\t\texecutables = make([]string, goPackages.GetMainCount())\n\t\t}\n\t\tfor _, mainPack := range goPackages.GetMainPackages(!*flagSingleMainFile) {\n\n\t\t\tif compile(mainPack) {\n\t\t\t\tif link(mainPack) {\n\t\t\t\t\tif *flagRunExec {\n\t\t\t\t\t\texecutables[execFilled] = outputDirPrefix + mainPack.OutputFile\n\t\t\t\t\t\texecFilled++\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tlinkErrors = true\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlogger.Error(\"Can't link executable because of compile errors.\\n\")\n\t\t\t\tcompileErrors = true\n\t\t\t}\n\t\t}\n\t}\n\n\tif *flagRunExec && !linkErrors && !compileErrors {\n\t\tfor i := 0; i < execFilled; i++ {\n\t\t\trunExec([]string{executables[i]})\n\t\t}\n\t}\n}", "func setupCompileBuilder(lc *fs_tool.LifeCycle, sdk pb.Sdk, executorConfig *environment.ExecutorConfig) *executors.CompileBuilder {\n\tfilePath := lc.GetAbsoluteExecutableFilePath()\n\tval := setupValidators(sdk, filePath)\n\n\tcompileBuilder := executors.NewExecutorBuilder().\n\t\tWithValidator().\n\t\tWithSdkValidators(val).\n\t\tWithCompiler()\n\n\tswitch sdk {\n\tcase pb.Sdk_SDK_JAVA:\n\t\tworkingDir := lc.GetAbsoluteExecutableFilesFolderPath()\n\n\t\tcompileBuilder = compileBuilder.\n\t\t\tWithCommand(executorConfig.CompileCmd).\n\t\t\tWithArgs(executorConfig.CompileArgs).\n\t\t\tWithFileName(filePath).\n\t\t\tWithWorkingDir(workingDir)\n\t}\n\treturn compileBuilder\n}", "func Compile(spec *openapi.Spec, options ...Option) (*protobuf.Package, error) {\n\tc := newCompileCtx(spec, options...)\n\tc.pushParent(c.pkg)\n\n\tif c.annotate {\n\t\tc.addImport(\"google/api/annotations.proto\")\n\t}\n\n\tif err := c.compileGlobalOptions(spec.GlobalOptions); err != nil {\n\t\treturn nil, errors.Wrap(err, `failed to compile global options`)\n\t}\n\n\t// compile all definitions\n\tif err := c.compileDefinitions(spec.Definitions); err != nil {\n\t\treturn nil, errors.Wrap(err, `failed to compile definitions`)\n\t}\n\tif err := c.compileParameters(spec.Parameters); err != nil {\n\t\treturn nil, errors.Wrap(err, `failed to compile parameters`)\n\t}\n\tif err := c.compileResponses(spec.Responses); err != nil {\n\t\treturn nil, errors.Wrap(err, `failed to compile global responses`)\n\t}\n\n\tp2, err := protobuf.Resolve(c.pkg, c.getTypeFromReference)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, `failed to resolve references`)\n\t}\n\t*(c.pkg) = *(p2.(*protobuf.Package))\n\n\t// compile extensions\n\tc.phase = phaseCompileExtensions\n\tfor _, ext := range spec.Extensions {\n\t\te, err := c.compileExtension(ext)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, `failed to compile extension`)\n\t\t}\n\t\tc.pkg.AddType(e)\n\t}\n\n\t// compile the paths\n\tif !c.skipRpcs {\n\t\tc.phase = phaseCompilePaths\n\t\tif err := c.compilePaths(spec.Paths); err != nil {\n\t\t\treturn nil, errors.Wrap(err, `failed to compile paths`)\n\t\t}\n\t}\n\n\treturn c.pkg, nil\n}", "func TestCompile(t *testing.T) {\n\treturn // avoid usually\n\tcode, _ := translateSymbols(testCode)\n\terr := ioutil.WriteFile(\"/tmp/testcode.go\", code, 0666)\n\tif err != nil {\n\t\tt.Errorf(\"write file failed: %v\", err)\n\t}\n}", "func Compile(constraint string) (go_version.Constraints, error) {\n\treturn go_version.NewConstraint(constraint)\n}", "func (c *Cmds) Compile() {\n\tvar cmp compiler\n\tcmp.compile(c.parseTree)\n\tc.prog = cmp.prog()\n\treturn\n}", "func exec(fl FileStructure) {\n\tdirBuff := loadTemplate(fl.TargetDir, fl.DataSource)\n\tdirName := string(dirBuff)\n\n\tif fl.Skip {\n\t\tgoto execChild\n\t}\n\n\tif _, err := os.Stat(dirName); os.IsExist(err) {\n\t\tpanic(err)\n\t}\n\n\tif fl.IsDir {\n\t\t_, err := os.Stat(dirName)\n\t\tif os.IsNotExist(err) {\n\t\t\tfmt.Printf(\"creating %s...\\n\", dirName)\n\t\t\tif errDir := os.Mkdir(dirName, 0700); errDir != nil {\n\t\t\t\tfmt.Println(\"mkdir err:\", errDir)\n\t\t\t\tpanic(errDir)\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif fl.FileName != \"\" {\n\t\tvar buff []byte\n\t\tif fl.FromTemplate {\n\t\t\tif fl.Source != \"\" {\n\t\t\t\tbuff = loadTemplate(fl.Source, fl.DataSource)\n\t\t\t} else {\n\t\t\t\tlastDir := filepath.Dir(fl.TargetDir)\n\t\t\t\tbuff = defaultDataSource(lastDir[strings.LastIndex(lastDir, \"/\")+1:])\n\t\t\t}\n\t\t} else {\n\t\t\tbuff = []byte(fl.Source)\n\t\t}\n\t\tif len(dirName) > 0 {\n\t\t\tdirName = strings.TrimSuffix(dirName, \"/\")\n\t\t\tif err := ioutil.WriteFile(dirName+\"/\"+fl.FileName, buff, 0644); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t} else {\n\t\t\tif err := ioutil.WriteFile(fl.FileName, buff, 0644); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\n\t}\n\nexecChild:\n\tfor _, child := range fl.Childs {\n\t\tchild.TargetDir = dirName + child.TargetDir\n\t\texec(child)\n\t}\n}", "func (c *CachedCompiler) Compiler(rawQuery string) (*gojq.Code, error) {\n\tcode, found := c.cache.Get(rawQuery)\n\tif found {\n\t\treturn code.(*gojq.Code), nil\n\t}\n\tcode, err := c.compiler(rawQuery)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.cache.Add(rawQuery, code)\n\treturn code.(*gojq.Code), nil\n}", "func (c *client) compileInline(p *yaml.Build, depth int) (*yaml.Build, error) {\n\tnewPipeline := *p\n\tnewPipeline.Templates = yaml.TemplateSlice{}\n\n\t// return if max template depth has been reached\n\tif depth == 0 {\n\t\tretErr := fmt.Errorf(\"max template depth of %d exceeded\", c.TemplateDepth)\n\n\t\treturn nil, retErr\n\t}\n\n\tfor _, template := range p.Templates {\n\t\tbytes, err := c.getTemplate(template, template.Name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tformat := template.Format\n\n\t\t// set the default format to golang if the user did not define anything\n\t\tif template.Format == \"\" {\n\t\t\tformat = constants.PipelineTypeGo\n\t\t}\n\n\t\tparsed, _, err := c.Parse(bytes, format, template)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// if template parsed contains a template reference, recurse with decremented depth\n\t\tif len(parsed.Templates) > 0 && parsed.Metadata.RenderInline {\n\t\t\tparsed, err = c.compileInline(parsed, depth-1)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tswitch {\n\t\tcase len(parsed.Environment) > 0:\n\t\t\tfor key, value := range parsed.Environment {\n\t\t\t\tnewPipeline.Environment[key] = value\n\t\t\t}\n\n\t\t\tfallthrough\n\t\tcase len(parsed.Stages) > 0:\n\t\t\t// ensure all templated steps inside stages have template prefix\n\t\t\tfor stgIndex, newStage := range parsed.Stages {\n\t\t\t\tparsed.Stages[stgIndex].Name = fmt.Sprintf(\"%s_%s\", template.Name, newStage.Name)\n\n\t\t\t\tfor index, newStep := range newStage.Steps {\n\t\t\t\t\tparsed.Stages[stgIndex].Steps[index].Name = fmt.Sprintf(\"%s_%s\", template.Name, newStep.Name)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tnewPipeline.Stages = append(newPipeline.Stages, parsed.Stages...)\n\n\t\t\tfallthrough\n\t\tcase len(parsed.Steps) > 0:\n\t\t\t// ensure all templated steps have template prefix\n\t\t\tfor index, newStep := range parsed.Steps {\n\t\t\t\tparsed.Steps[index].Name = fmt.Sprintf(\"%s_%s\", template.Name, newStep.Name)\n\t\t\t}\n\n\t\t\tnewPipeline.Steps = append(newPipeline.Steps, parsed.Steps...)\n\n\t\t\tfallthrough\n\t\tcase len(parsed.Services) > 0:\n\t\t\tnewPipeline.Services = append(newPipeline.Services, parsed.Services...)\n\t\t\tfallthrough\n\t\tcase len(parsed.Secrets) > 0:\n\t\t\tnewPipeline.Secrets = append(newPipeline.Secrets, parsed.Secrets...)\n\t\tdefault:\n\t\t\t//nolint:lll // ignore long line length due to error message\n\t\t\treturn nil, fmt.Errorf(\"empty template %s provided: template must contain secrets, services, stages or steps\", template.Name)\n\t\t}\n\n\t\tif len(newPipeline.Stages) > 0 && len(newPipeline.Steps) > 0 {\n\t\t\t//nolint:lll // ignore long line length due to error message\n\t\t\treturn nil, fmt.Errorf(\"invalid template %s provided: templates cannot mix stages and steps\", template.Name)\n\t\t}\n\t}\n\n\treturn &newPipeline, nil\n}", "func DebugScript(script string) *exec.Cmd {\n\tscriptPath := filepath.Join(rootPath, \"debug.js\")\n\tif err := ioutil.WriteFile(scriptPath, []byte(script), 0644); err != nil {\n\t\tpanic(err)\n\t}\n\tpath := filepath.Join(rootPath, \"node_modules\", \".bin\", \"node-debug\")\n\tif _, err := exec.LookPath(path); err != nil {\n\t\tfmt.Print(\"Installing node-inspector...\")\n\t\tInstallPackages(\"node-inspector\")\n\t\tfmt.Println(\" done\")\n\t}\n\tcmd := exec.Command(path, \"debug.js\")\n\tcmd.Dir = rootPath\n\treturn cmd\n}", "func (e *ExpressionWithSeverity) compile() error {\n\tr, err := regexp.Compile(e.Expression)\n\tif err != nil {\n\t\treturn err\n\t}\n\te.compiled = r\n\treturn nil\n}", "func (c *Context) Exec(filename string) error {\n\tf := C.CString(filename)\n\tdefer C.free(unsafe.Pointer(f))\n\n\t_, err := C.context_exec(c.context, f)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error executing script '%s' in context\", filename)\n\t}\n\n\treturn nil\n}", "func NewCompiler() *Compiler {\n\n\tc := &Compiler{\n\t\tModules: map[string]*Module{},\n\t\tExports: newExports(),\n\t\tGlobals: map[*Module]map[Var]Value{},\n\t\tRuleGraph: map[*Rule]map[*Rule]struct{}{},\n\t\tModuleTree: NewModuleTree(nil),\n\t\tRuleTree: NewRuleTree(nil),\n\t}\n\n\tc.stages = []stage{\n\t\tstage{c.setExports, \"setExports\"},\n\t\tstage{c.setGlobals, \"setGlobals\"},\n\t\tstage{c.setModuleTree, \"setModuleTree\"},\n\t\tstage{c.setRuleTree, \"setRuleTree\"},\n\t\tstage{c.checkSafetyHead, \"checkSafetyHead\"},\n\t\tstage{c.checkSafetyBody, \"checkSafetyBody\"},\n\t\tstage{c.checkBuiltinArgs, \"checkBuiltinArgs\"},\n\t\tstage{c.resolveAllRefs, \"resolveAllRefs\"},\n\t\tstage{c.setRuleGraph, \"setRuleGraph\"},\n\t\tstage{c.checkRecursion, \"checkRecursion\"},\n\t}\n\n\treturn c\n}", "func CompileOptionUsed(optName string) bool {\n\tcOptName := C.CString(optName)\n\tdefer C.free(unsafe.Pointer(cOptName))\n\treturn C.sqlite3_compileoption_used(cOptName) == 1\n}", "func (b *Builder) CompileAction(mode, depMode BuildMode, p *load.Package) *Action {\n\tvetOnly := mode&ModeVetOnly != 0\n\tmode &^= ModeVetOnly\n\n\tif mode != ModeBuild && (p.Internal.Local || p.Module != nil) && p.Target == \"\" {\n\t\t// Imported via local path or using modules. No permanent target.\n\t\tmode = ModeBuild\n\t}\n\tif mode != ModeBuild && p.Name == \"main\" {\n\t\t// We never install the .a file for a main package.\n\t\tmode = ModeBuild\n\t}\n\n\t// Construct package build action.\n\ta := b.cacheAction(\"build\", p, func() *Action {\n\t\ta := &Action{\n\t\t\tMode: \"build\",\n\t\t\tPackage: p,\n\t\t\tFunc: (*Builder).build,\n\t\t\tObjdir: b.NewObjdir(),\n\t\t}\n\n\t\tif p.Error == nil || !p.Error.IsImportCycle {\n\t\t\tfor _, p1 := range p.Internal.Imports {\n\t\t\t\ta.Deps = append(a.Deps, b.CompileAction(depMode, depMode, p1))\n\t\t\t}\n\t\t}\n\n\t\tif p.Standard {\n\t\t\tswitch p.ImportPath {\n\t\t\tcase \"builtin\", \"unsafe\":\n\t\t\t\t// Fake packages - nothing to build.\n\t\t\t\ta.Mode = \"built-in package\"\n\t\t\t\ta.Func = nil\n\t\t\t\treturn a\n\t\t\t}\n\n\t\t\t// gccgo standard library is \"fake\" too.\n\t\t\tif cfg.BuildToolchainName == \"gccgo\" {\n\t\t\t\t// the target name is needed for cgo.\n\t\t\t\ta.Mode = \"gccgo stdlib\"\n\t\t\t\ta.Target = p.Target\n\t\t\t\ta.Func = nil\n\t\t\t\treturn a\n\t\t\t}\n\t\t}\n\n\t\treturn a\n\t})\n\n\t// Find the build action; the cache entry may have been replaced\n\t// by the install action during (*Builder).installAction.\n\tbuildAction := a\n\tswitch buildAction.Mode {\n\tcase \"build\", \"built-in package\", \"gccgo stdlib\":\n\t\t// ok\n\tcase \"build-install\":\n\t\tbuildAction = a.Deps[0]\n\tdefault:\n\t\tpanic(\"lost build action: \" + buildAction.Mode)\n\t}\n\tbuildAction.needBuild = buildAction.needBuild || !vetOnly\n\n\t// Construct install action.\n\tif mode == ModeInstall || mode == ModeBuggyInstall {\n\t\ta = b.installAction(a, mode)\n\t}\n\n\treturn a\n}", "func backgroundCompiler(ch chan compilerJob) {\n\tn := 0\n\tfor job := range ch {\n\t\tn++\n\t\terr := job.Run()\n\t\tif err != nil {\n\t\t\tbuf := &bytes.Buffer{}\n\t\t\tbuf.WriteString(err.Error())\n\t\t\tjob.ResultErrors <- buf.Bytes()\n\t\t}\n\t\tif n%100 == 1 {\n\t\t\tcleanupCompileCache()\n\t\t}\n\t}\n}", "func (p *PipelineBuild) Translate(lang string) {\n\tfor ks := range p.Stages {\n\t\tfor kj := range p.Stages[ks].PipelineBuildJobs {\n\t\t\tp.Stages[ks].PipelineBuildJobs[kj].Translate(lang)\n\t\t}\n\t}\n}", "func (ep EntryPoint) Compile() []string {\n\treturn quoteAll(ep.Command)\n}", "func (cx *Context) Exec(source string) (err error) {\n\treturn cx.exec(source, \"exec\")\n}", "func (tm *CompilationTelemetry) CompilationEnabled() bool {\n\treturn tm.compilationEnabled\n}", "func withGradleStaticCompilation(p *Project, build procedure) procedure {\n\tbuildFile := filepath.Join(p.Path, \"build.gradle\")\n\n\treturn withFileRestoration(buildFile, func() error {\n\t\t// Append extra configuration to build.gradle\n\t\tfrom, err := os.Open(pathExtraBuildGradle)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tto, err := os.OpenFile(buildFile, os.O_APPEND|os.O_WRONLY, 0644)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif _, err := io.Copy(to, from); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Close files\n\t\terr = from.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = to.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Execute build function\n\t\treturn build()\n\t})\n}", "func (c *client) compileStages(p *yaml.Build, _pipeline *library.Pipeline, tmpls map[string]*yaml.Template, r *pipeline.RuleData) (*pipeline.Build, *library.Pipeline, error) {\n\tvar err error\n\n\t// check if the pipeline disabled the clone\n\tif p.Metadata.Clone == nil || *p.Metadata.Clone {\n\t\t// inject the clone stage\n\t\tp, err = c.CloneStage(p)\n\t\tif err != nil {\n\t\t\treturn nil, _pipeline, err\n\t\t}\n\t}\n\n\t// inject the init stage\n\tp, err = c.InitStage(p)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\t// inject the templates into the stages\n\tp, err = c.ExpandStages(p, tmpls, r)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\tif c.ModificationService.Endpoint != \"\" {\n\t\t// send config to external endpoint for modification\n\t\tp, err = c.modifyConfig(p, c.build, c.repo)\n\t\tif err != nil {\n\t\t\treturn nil, _pipeline, err\n\t\t}\n\t}\n\n\t// validate the yaml configuration\n\terr = c.Validate(p)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\t// Create some default global environment inject vars\n\t// these are used below to overwrite to an empty\n\t// map if they should not be injected into a container\n\tenvGlobalServices, envGlobalSecrets, envGlobalSteps := p.Environment, p.Environment, p.Environment\n\n\tif !p.Metadata.HasEnvironment(\"services\") {\n\t\tenvGlobalServices = make(raw.StringSliceMap)\n\t}\n\n\tif !p.Metadata.HasEnvironment(\"secrets\") {\n\t\tenvGlobalSecrets = make(raw.StringSliceMap)\n\t}\n\n\tif !p.Metadata.HasEnvironment(\"steps\") {\n\t\tenvGlobalSteps = make(raw.StringSliceMap)\n\t}\n\n\t// inject the environment variables into the services\n\tp.Services, err = c.EnvironmentServices(p.Services, envGlobalServices)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\t// inject the environment variables into the secrets\n\tp.Secrets, err = c.EnvironmentSecrets(p.Secrets, envGlobalSecrets)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\t// inject the environment variables into the stages\n\tp.Stages, err = c.EnvironmentStages(p.Stages, envGlobalSteps)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\t// inject the substituted environment variables into the stages\n\tp.Stages, err = c.SubstituteStages(p.Stages)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\t// inject the scripts into the stages\n\tp.Stages, err = c.ScriptStages(p.Stages)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\t// create executable representation\n\tbuild, err := c.TransformStages(r, p)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\treturn build, _pipeline, nil\n}", "func RunCompileTask(ctx context.Context, g *gsFileLocation, task *ac_util.CompileTask, pathToCompileScript string) error {\n\t// Blocking call to wait for an available checkout.\n\tcheckoutPath := <-AvailableCheckoutsChan\n\tdefer addToCheckoutsChannel(checkoutPath)\n\n\t// Step 1: Find an available Android checkout and update the CompileTask\n\t// with the checkout. This is done for the UI and for easier debugging.\n\tdatastoreKey := ac_util.GetTaskDSKey(task.LunchTarget, task.Issue, task.PatchSet)\n\ttask.Checkout = path.Base(checkoutPath)\n\tif err := UpdateCompileTask(ctx, g, task); err != nil {\n\t\treturn fmt.Errorf(\"Could not update compile task with Key %s: %s\", datastoreKey.Name, err)\n\t}\n\n\tskiaPath := filepath.Join(checkoutPath, \"external\", \"skia\")\n\tskiaCheckout, err := git.NewCheckout(ctx, ANDROID_SKIA_URL, path.Dir(skiaPath))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to create GitDir from %s: %s\", skiaPath, err)\n\t}\n\n\t// Step 2: Make sure the Skia checkout within Android is clean. We do this\n\t// before updating to be extra careful and make the server more robust.\n\tif err := cleanSkiaCheckout(ctx, skiaCheckout, checkoutPath); err != nil {\n\t\treturn fmt.Errorf(\"Error when cleaning Skia checkout: %s\", err)\n\t}\n\n\t// Step 3: Update the Android checkout.\n\tif err := updateCheckout(ctx, checkoutPath, false); err != nil {\n\t\tac_util.UpdateCheckoutSyncFailureMetric(true, path.Base(checkoutPath))\n\t\treturn fmt.Errorf(\"Error when updating checkout in %s: %s\", checkoutPath, err)\n\t}\n\tac_util.UpdateCheckoutSyncFailureMetric(false, path.Base(checkoutPath))\n\n\t// Step 4: Get contents of SkUserConfigManual.h. We will use this after\n\t// updating Skia to master/origin and before compiling.\n\tuserConfigContent, err := ioutil.ReadFile(filepath.Join(skiaPath, android_skia_checkout.SkUserConfigManualRelPath))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not read from %s: %s\", android_skia_checkout.SkUserConfigManualRelPath, err)\n\t}\n\n\t// Add origin remote that points to the Skia repo.\n\tif err := skiaCheckout.AddRemote(ctx, \"origin\", SKIA_REPO_URL); err != nil {\n\t\treturn fmt.Errorf(\"Error when adding origin remote: %s\", err)\n\t}\n\t// Fetch origin without updating checkout\n\tif err := skiaCheckout.Fetch(ctx); err != nil {\n\t\treturn fmt.Errorf(\"Error when fetching origin: %s\", err)\n\t}\n\n\t// Step 5: Create a branch and have it track origin/master.\n\tif _, err := skiaCheckout.Git(ctx, \"checkout\", \"-b\", TRY_BRANCH_NAME, \"-t\", \"origin/master\"); err != nil {\n\t\treturn fmt.Errorf(\"Error when creating %s in %s: %s\", TRY_BRANCH_NAME, skiaPath, err)\n\t}\n\n\t// Step 6: If it is a trybot run then apply the patch else apply the hash.\n\ttrybotRun := (task.Hash == \"\")\n\tif trybotRun {\n\t\t// Apply Patch.\n\t\tif err := applyPatch(ctx, skiaCheckout, task.Issue, task.PatchSet); err != nil {\n\t\t\treturn fmt.Errorf(\"Could not apply the patch with issue %d and patchset %d: %s\", task.Issue, task.PatchSet, err)\n\t\t}\n\t\t// Check to see if patch is from origin/master.\n\t\tfromMaster, err := checkPatchFromMasterBranch(task.Issue)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Could not check if commit is from origin/master: %s\", err)\n\t\t}\n\t\ttask.IsMasterBranch = fromMaster\n\t\tif !task.IsMasterBranch {\n\t\t\tif err := UpdateCompileTask(ctx, g, task); err != nil {\n\t\t\t\treturn fmt.Errorf(\"Could not update compile task with Key %s: %s\", datastoreKey.Name, err)\n\t\t\t}\n\t\t\tsklog.Infof(\"Patch with issue %d and patchset %d is not on master branch.\", task.Issue, task.PatchSet)\n\t\t\treturn nil\n\t\t}\n\t\t// Rebase the checkout after applying the patch.\n\t\tif _, err := skiaCheckout.Git(ctx, \"rebase\"); err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to rebase in %s: %s\", skiaCheckout.Dir(), err)\n\t\t}\n\t} else {\n\t\t// Checkout the specified Skia hash.\n\t\t// TODO(rmistry): This has lots of problems, the non-trybot bot could fail if\n\t\t// Android tree is red. Maybe non-trybot path should not be supported?\n\t\tif _, err := skiaCheckout.Git(ctx, \"checkout\", task.Hash); err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to checkout Skia hash %s: %s\", task.Hash, err)\n\t\t}\n\t\t// Check to see if hash is from origin/master.\n\t\tfromMaster, err := checkCommitFromMasterBranch(ctx, skiaCheckout)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Could not check if commit is from origin/master: %s\", err)\n\t\t}\n\t\ttask.IsMasterBranch = fromMaster\n\t\tif !task.IsMasterBranch {\n\t\t\tif err := UpdateCompileTask(ctx, g, task); err != nil {\n\t\t\t\treturn fmt.Errorf(\"Could not update compile task with ID %d: %s\", datastoreKey.ID, err)\n\t\t\t}\n\t\t\tsklog.Infof(\"Hash %s is not on master branch.\", task.Hash)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// Step 7: Prepare the Skia checkout for compilation: Run gn_to_bp.py and\n\t// create SkUserConfigManual.h from Step 5.\n\tif err := prepareSkiaCheckoutForCompile(ctx, userConfigContent, skiaCheckout); err != nil {\n\t\treturn fmt.Errorf(\"Could not prepare Skia checkout for compile: %s\", err)\n\t}\n\n\t// Step 8: Do the with patch or with hash compilation and update CompileTask\n\t// with link to logs and whether it was successful.\n\twithPatchSuccess, gsWithPatchLink, err := compileCheckout(ctx, checkoutPath, task.LunchTarget, task.MMMATargets, fmt.Sprintf(\"%s_withpatch_\", datastoreKey.Name), pathToCompileScript)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error when compiling checkout withpatch at %s: %s\", checkoutPath, err)\n\t}\n\ttask.WithPatchSucceeded = withPatchSuccess\n\ttask.WithPatchLog = gsWithPatchLink\n\tif err := UpdateCompileTask(ctx, g, task); err != nil {\n\t\treturn fmt.Errorf(\"Could not update compile task with Key %s: %s\", datastoreKey.Name, err)\n\t}\n\n\t// Step 9: If the compilation failed and if it is a trybot run then verify\n\t// that the tree is not broken by building at Skia HEAD. Update CompileTask\n\t// with link to logs and whether the no patch run was successful.\n\tif !withPatchSuccess && trybotRun {\n\t\t// If this failed then check to see if a build without the patch will succeed.\n\t\tif err := resetSkiaCheckout(ctx, skiaCheckout, \"origin/master\"); err != nil {\n\t\t\treturn fmt.Errorf(\"Error when resetting Skia checkout: %s\", err)\n\t\t}\n\t\t// Checkout origin/master.\n\t\tif _, err := skiaCheckout.Git(ctx, \"checkout\", \"origin/master\"); err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to checkout origin/master: %s\", err)\n\t\t}\n\t\tif err := prepareSkiaCheckoutForCompile(ctx, userConfigContent, skiaCheckout); err != nil {\n\t\t\treturn fmt.Errorf(\"Could not prepare Skia checkout for compile: %s\", err)\n\t\t}\n\t\t// Do the no patch compilation.\n\t\tnoPatchSuccess, gsNoPatchLink, err := compileCheckout(ctx, checkoutPath, task.LunchTarget, task.MMMATargets, fmt.Sprintf(\"%s_nopatch_\", datastoreKey.Name), pathToCompileScript)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error when compiling checkout nopatch at %s: %s\", checkoutPath, err)\n\t\t}\n\t\tac_util.UpdateAndroidTreeBrokenMetric(!noPatchSuccess)\n\t\ttask.NoPatchSucceeded = noPatchSuccess\n\t\ttask.NoPatchLog = gsNoPatchLink\n\t\tif err := UpdateCompileTask(ctx, g, task); err != nil {\n\t\t\treturn fmt.Errorf(\"Could not update compile task with Key %s: %s\", datastoreKey.Name, err)\n\t\t}\n\t} else {\n\t\t// The with patch run succeeded. Mark the android_compile_tree_broken metric accordingly.\n\t\tac_util.UpdateAndroidTreeBrokenMetric(false)\n\t}\n\n\treturn nil\n}", "func (c *FileCompiler) Compile(ctx context.Context) error {\n\tf := c.ast\n\n\tpkgPath := c.pkg.PkgPath\n\toutputFilePath := translateGoFilePathToTypescriptFilePath(pkgPath, filepath.Base(c.fullPath))\n\toutputFilePathAbs := filepath.Join(c.compilerConfig.OutputPathRoot, outputFilePath)\n\n\tif err := os.MkdirAll(filepath.Dir(outputFilePathAbs), 0755); err != nil {\n\t\treturn err\n\t}\n\n\tof, err := os.OpenFile(outputFilePathAbs, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer of.Close()\n\n\tc.codeWriter = NewTSCodeWriter(of)\n\tgoWriter := NewGoToTSCompiler(c.codeWriter)\n\tgoWriter.WriteDecls(f.Decls)\n\n\treturn nil\n}" ]
[ "0.5822461", "0.5743868", "0.53007513", "0.5181535", "0.51645374", "0.5097689", "0.500081", "0.47883692", "0.47864297", "0.47329405", "0.46677637", "0.4661549", "0.4639248", "0.4619214", "0.45559263", "0.454181", "0.45335022", "0.44954574", "0.44724452", "0.444016", "0.4402881", "0.43955794", "0.43866083", "0.43865132", "0.43644357", "0.4335648", "0.43243995", "0.43232846", "0.43073818", "0.42877987", "0.4277036", "0.42608327", "0.42518166", "0.42227098", "0.42194158", "0.4219287", "0.42158926", "0.41815606", "0.41797018", "0.41747665", "0.41717955", "0.41687647", "0.41686103", "0.41437912", "0.41420242", "0.41324502", "0.41019902", "0.40899515", "0.40752614", "0.40741888", "0.406555", "0.4062664", "0.405427", "0.40434763", "0.40387774", "0.40377194", "0.4036238", "0.40351674", "0.4034241", "0.40342012", "0.40184638", "0.4013705", "0.40017194", "0.40016097", "0.39970806", "0.39791313", "0.39790973", "0.39450645", "0.39414775", "0.39212412", "0.39212218", "0.3895796", "0.38795027", "0.38723847", "0.3865191", "0.38565382", "0.3853601", "0.38459736", "0.3844965", "0.3844652", "0.38416922", "0.38362393", "0.38360542", "0.38357198", "0.38323653", "0.38224512", "0.38158414", "0.38142112", "0.38141963", "0.38055474", "0.38045222", "0.37962887", "0.3795799", "0.37942263", "0.3779316", "0.37789539", "0.3775603", "0.37672135", "0.3748366", "0.37410235" ]
0.77606845
0
CompileByName will compile solution if not yet compiled. This method will search the language and solution by its name and then call Compile method. This method will return an error if the language or solution with it's name doesn't exist.
func (cptool *CPTool) CompileByName(ctx context.Context, languageName string, solutionName string, debug bool) (CompilationResult, error) { start := time.Now() language, err := cptool.GetLanguageByName(languageName) if err != nil { return CompilationResult{}, err } if cptool.logger != nil { cptool.logger.Println(logger.VERBOSE, "Compiling using language:", language.Name) } solution, err := cptool.GetSolution(solutionName, language) if err != nil { return CompilationResult{}, err } if cptool.logger != nil { cptool.logger.Println(logger.VERBOSE, "Compiling solution:", solution.Name) } result, err := cptool.Compile(ctx, solution, debug) if err != nil { return result, err } result.Duration = time.Since(start) return result, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (cptool *CPTool) Compile(ctx context.Context, solution Solution, debug bool) (CompilationResult, error) {\n\tlanguage := solution.Language\n\tif debug && !language.Debuggable {\n\t\treturn CompilationResult{}, ErrLanguageNotDebuggable\n\t}\n\n\ttargetDir := cptool.getCompiledDirectory(solution, debug)\n\tcptool.fs.MkdirAll(targetDir, os.ModePerm)\n\n\ttargetPath := cptool.getCompiledTarget(solution, debug)\n\tif cptool.logger != nil {\n\t\tcptool.logger.Println(logger.VERBOSE, \"Compiling to: \", targetPath)\n\t}\n\n\tinfo, err := cptool.fs.Stat(targetPath)\n\tif err == nil {\n\t\tcompiledTime := info.ModTime()\n\t\tif compiledTime.After(solution.LastUpdated) {\n\t\t\treturn CompilationResult{\n\t\t\t\tSkipped: true,\n\t\t\t\tTargetPath: targetPath,\n\t\t\t}, nil\n\t\t}\n\t}\n\n\tcommandPath := language.CompileScript\n\tif debug {\n\t\tcommandPath = language.DebugScript\n\t}\n\tif cptool.logger != nil {\n\t\tcptool.logger.Println(logger.VERBOSE, \"Compiling using script: \", commandPath)\n\t}\n\n\tcmd := cptool.exec.CommandContext(ctx, commandPath, solution.Path, targetPath)\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn CompilationResult{}, err\n\t}\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn CompilationResult{}, err\n\t}\n\n\tcompilationError, err := ioutil.ReadAll(stderr)\n\tif err != nil {\n\t\treturn CompilationResult{}, err\n\t}\n\n\terr = cmd.Wait()\n\tif err != nil {\n\t\tif cptool.logger != nil {\n\t\t\tcptool.logger.Print(logger.VERBOSE, \"Compilation script execution giving error result\")\n\t\t}\n\t\treturn CompilationResult{ErrorMessage: string(compilationError)}, err\n\t}\n\n\treturn CompilationResult{\n\t\tSkipped: false,\n\t\tTargetPath: targetPath,\n\t}, nil\n}", "func FromName(name string) (*Project, error) {\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"name is empty\")\n\t}\n\tdir := workdir.ProjectDir(name)\n\tif !fileExists(dir) {\n\t\treturn nil, errors.New(\"project directory does not exist\")\n\t}\n\treturn &Project{Name: name}, nil\n}", "func (c *ProjectService) GetByName(name string) (*Project, *http.Response, error) {\n\tproject := new(Project)\n\tapiError := new(APIError)\n\tpath := fmt.Sprintf(\"byName/%s\", name)\n\tresp, err := c.sling.New().Get(path).Receive(project, apiError)\n\treturn project, resp, relevantError(err, *apiError)\n}", "func New(language string) (Compiler, error) {\n\tif len(language) <= 0 {\n\t\treturn nil, compiler_error.ErrEmptyLanguage\n\t}\n\n\tcnst := constructor(language)\n\tif cnst == nil {\n\t\treturn nil, fmt.Errorf(\"invalid compiler language: %s\", language)\n\t}\n\n\tcmp, err := cnst()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create compiler: %s\", err)\n\t}\n\n\treturn cmp, nil\n}", "func Compile(input string, ops ...Option) (*vm.Program, error) {\n\tconfig := &conf.Config{\n\t\tOperators: make(map[string][]string),\n\t\tConstExprFns: make(map[string]reflect.Value),\n\t\tOptimize: true,\n\t}\n\n\tfor _, op := range ops {\n\t\top(config)\n\t}\n\n\tif err := config.Check(); err != nil {\n\t\treturn nil, err\n\t}\n\n\ttree, err := parser.Parse(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = checker.Check(tree, config)\n\n\t// If we have a patch to apply, it may fix out error and\n\t// second type check is needed. Otherwise it is an error.\n\tif err != nil && len(config.Visitors) == 0 {\n\t\treturn nil, err\n\t}\n\n\t// Patch operators before Optimize, as we may also mark it as ConstExpr.\n\tcompiler.PatchOperators(&tree.Node, config)\n\n\tif len(config.Visitors) >= 0 {\n\t\tfor _, v := range config.Visitors {\n\t\t\tast.Walk(&tree.Node, v)\n\t\t}\n\t\t_, err = checker.Check(tree, config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif config.Optimize {\n\t\terr = optimizer.Optimize(&tree.Node, config)\n\t\tif err != nil {\n\t\t\tif fileError, ok := err.(*file.Error); ok {\n\t\t\t\treturn nil, fileError.Bind(tree.Source)\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tprogram, err := compiler.Compile(tree, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn program, nil\n}", "func (p rProjects) ByName(name, owner string) (*schema.Project, error) {\n\tvar project schema.Project\n\trql := model.Projects.T().GetAllByIndex(\"name\", name).Filter(r.Row.Field(\"owner\").Eq(owner))\n\tif err := model.Projects.Qs(p.session).Row(rql, &project); err != nil {\n\t\treturn nil, mcerr.ErrNotFound\n\t}\n\treturn &project, nil\n}", "func Compile(ctx context.Context, cln *client.Client, w io.Writer, mod *ast.Module, targets []codegen.Target) (solver.Request, error) {\n\terr := checker.SemanticPass(mod)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = linter.Lint(ctx, mod)\n\tif err != nil {\n\t\tfor _, span := range diagnostic.Spans(err) {\n\t\t\tfmt.Fprintln(w, span.Pretty(ctx))\n\t\t}\n\t}\n\n\terr = checker.Check(mod)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresolver, err := module.NewResolver(cln)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcg := codegen.New(cln, resolver)\n\tif solver.ConcurrencyLimiter(ctx) == nil {\n\t\tctx = solver.WithConcurrencyLimiter(ctx, semaphore.NewWeighted(defaultMaxConcurrency))\n\t}\n\treturn cg.Generate(ctx, mod, targets)\n}", "func (s projectService) GetByName(name string) (*Project, error) {\n\tif isEmpty(name) {\n\t\treturn nil, createInvalidParameterError(OperationGetByName, ParameterName)\n\t}\n\n\terr := validateInternalState(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcollection, err := s.GetAll()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, item := range collection {\n\t\tif item.Name == name {\n\t\t\treturn item, nil\n\t\t}\n\t}\n\n\treturn nil, createItemNotFoundError(s.getName(), OperationGetByName, name)\n}", "func (ev *Evaler) Compile(name, text string, n *parse.Chunk) (Op, error) {\n\treturn compile(name, text, makeScope(ev.global), n)\n}", "func (c *Compiler) CompileOne(query Body) (Body, error) {\n\n\tkey := string(Wildcard.Value.(Var))\n\n\tmod := &Module{\n\t\tPackage: &Package{\n\t\t\tPath: Ref{DefaultRootDocument},\n\t\t\tLocation: query.Loc(),\n\t\t},\n\t\tRules: []*Rule{\n\t\t\t&Rule{\n\t\t\t\tName: Var(key),\n\t\t\t\tBody: query,\n\t\t\t\tLocation: query.Loc(),\n\t\t\t},\n\t\t},\n\t}\n\n\tc.Modules[key] = mod\n\tc.compile()\n\n\tif c.Failed() {\n\t\treturn nil, c.Errors[0]\n\t}\n\n\treturn c.Modules[key].Rules[0].Body, nil\n}", "func ProjectByName(name string) (*models.Project, error) {\n\tvar db = context.Config.Get(\"db.database\")\n\n\t// get mongodb session\n\tsession := context.DBSession.Copy()\n\tdefer session.Close()\n\n\tvar project = new(models.Project)\n\tcollection := session.DB(db).C(\"projects\")\n\tif err := collection.Find(bson.M{\"name\": name}).One(project); err != nil {\n\t\treturn nil, err\n\t}\n\treturn project, nil\n}", "func (mem *Mem) Compile() error {\n\tchanged := true\n\tvisited := make(map[int]bool)\n\tfor changed {\n\t\tchanged = false\n\t\tfor state := range mem.patterns {\n\t\t\tif visited[state] {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvisited[state] = true\n\t\t\tif err := compile(mem, state); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tchanged = true\n\t\t}\n\t}\n\treturn nil\n}", "func (domain *Domain) GetSolution(name string) (*Solution, error) {\n\t// determine solution\n\tdomain.SolutionsX.RLock()\n\tsolution, ok := domain.Solutions[name]\n\tdomain.SolutionsX.RUnlock()\n\n\tif !ok {\n\t\treturn nil, errors.New(\"solution not found\")\n\t}\n\n\t// success\n\treturn solution, nil\n}", "func Compile(resolved *resolver.ResolvedProgram) (compiledProg *Program, err error) {\n\tdefer func() {\n\t\t// The compiler uses panic with a *compileError to signal compile\n\t\t// errors internally, and they're caught here. This avoids the\n\t\t// need to check errors everywhere.\n\t\tif r := recover(); r != nil {\n\t\t\t// Convert to compileError or re-panic\n\t\t\terr = r.(*compileError)\n\t\t}\n\t}()\n\n\tp := &Program{}\n\n\t// Reuse identical constants across entire program.\n\tindexes := constantIndexes{\n\t\tnums: make(map[float64]int),\n\t\tstrs: make(map[string]int),\n\t\tregexes: make(map[string]int),\n\t}\n\n\t// Compile functions. For functions called before they're defined or\n\t// recursive functions, we have to set most p.Functions data first, then\n\t// compile Body afterward.\n\tp.Functions = make([]Function, len(resolved.Functions))\n\tfor i, astFunc := range resolved.Functions {\n\t\tarrays := make([]bool, len(astFunc.Params))\n\t\tnumArrays := 0\n\t\tfor j, param := range astFunc.Params {\n\t\t\t_, info, _ := resolved.LookupVar(astFunc.Name, param)\n\t\t\tif info.Type == resolver.Array {\n\t\t\t\tarrays[j] = true\n\t\t\t\tnumArrays++\n\t\t\t}\n\t\t}\n\t\tcompiledFunc := Function{\n\t\t\tName: astFunc.Name,\n\t\t\tParams: astFunc.Params,\n\t\t\tArrays: arrays,\n\t\t\tNumScalars: len(astFunc.Params) - numArrays,\n\t\t\tNumArrays: numArrays,\n\t\t}\n\t\tp.Functions[i] = compiledFunc\n\t}\n\tfor i, astFunc := range resolved.Functions {\n\t\tc := compiler{resolved: resolved, program: p, indexes: indexes, funcName: astFunc.Name}\n\t\tc.stmts(astFunc.Body)\n\t\tp.Functions[i].Body = c.finish()\n\t}\n\n\t// Compile BEGIN blocks.\n\tfor _, stmts := range resolved.Begin {\n\t\tc := compiler{resolved: resolved, program: p, indexes: indexes}\n\t\tc.stmts(stmts)\n\t\tp.Begin = append(p.Begin, c.finish()...)\n\t}\n\n\t// Compile pattern-action blocks.\n\tfor _, action := range resolved.Actions {\n\t\tvar pattern [][]Opcode\n\t\tswitch len(action.Pattern) {\n\t\tcase 0:\n\t\t\t// Always considered a match\n\t\tcase 1:\n\t\t\tc := compiler{resolved: resolved, program: p, indexes: indexes}\n\t\t\tc.expr(action.Pattern[0])\n\t\t\tpattern = [][]Opcode{c.finish()}\n\t\tcase 2:\n\t\t\tc := compiler{resolved: resolved, program: p, indexes: indexes}\n\t\t\tc.expr(action.Pattern[0])\n\t\t\tpattern = append(pattern, c.finish())\n\t\t\tc = compiler{resolved: resolved, program: p, indexes: indexes}\n\t\t\tc.expr(action.Pattern[1])\n\t\t\tpattern = append(pattern, c.finish())\n\t\t}\n\t\tvar body []Opcode\n\t\tif len(action.Stmts) > 0 {\n\t\t\tc := compiler{resolved: resolved, program: p, indexes: indexes}\n\t\t\tc.stmts(action.Stmts)\n\t\t\tbody = c.finish()\n\t\t}\n\t\tp.Actions = append(p.Actions, Action{\n\t\t\tPattern: pattern,\n\t\t\tBody: body,\n\t\t})\n\t}\n\n\t// Compile END blocks.\n\tfor _, stmts := range resolved.End {\n\t\tc := compiler{resolved: resolved, program: p, indexes: indexes}\n\t\tc.stmts(stmts)\n\t\tp.End = append(p.End, c.finish()...)\n\t}\n\n\t// Build slices that map indexes to names (for variables and functions).\n\t// These are only used for disassembly, but set them up here.\n\tresolved.IterVars(\"\", func(name string, info resolver.VarInfo) {\n\t\tif info.Type == resolver.Array {\n\t\t\tfor len(p.arrayNames) <= info.Index {\n\t\t\t\tp.arrayNames = append(p.arrayNames, \"\")\n\t\t\t}\n\t\t\tp.arrayNames[info.Index] = name\n\t\t} else {\n\t\t\tfor len(p.scalarNames) <= info.Index {\n\t\t\t\tp.scalarNames = append(p.scalarNames, \"\")\n\t\t\t}\n\t\t\tp.scalarNames[info.Index] = name\n\t\t}\n\t})\n\tresolved.IterFuncs(func(name string, info resolver.FuncInfo) {\n\t\tfor len(p.nativeFuncNames) <= info.Index {\n\t\t\tp.nativeFuncNames = append(p.nativeFuncNames, \"\")\n\t\t}\n\t\tp.nativeFuncNames[info.Index] = name\n\t})\n\n\treturn p, nil\n}", "func (l *loader) compile(name string) (string, error) {\n // Copy the file to the objects directory with a different name\n // each time, to avoid retrieving the cached version.\n // Apparently the cache key is the path of the file compiled and\n // there's no way to invalidate it.\n\n f, err := ioutil.ReadFile(filepath.Join(l.pluginsDir, name + \".go\"))\n if err != nil {\n return \"\", fmt.Errorf(\"Cannot read %s.go: %v\", name, err)\n }\n\n name = fmt.Sprintf(\"%d.go\", rand.Int())\n srcPath := filepath.Join(l.objectsDir, name)\n if err := ioutil.WriteFile(srcPath, f, 0666); err != nil {\n return \"\", fmt.Errorf(\"Cannot write %s: %v\", name, err)\n }\n\n objectPath := srcPath[:len(srcPath)-3] + \".so\"\n\n cmd := exec.Command(\"go\", \"build\", \"-buildmode=plugin\", \"-o=\"+objectPath, srcPath)\n cmd.Stderr = os.Stderr\n cmd.Stdout = os.Stdout\n if err := cmd.Run(); err != nil {\n return \"\", fmt.Errorf(\"Cannot compile %s: %v\", name, err)\n }\n\n return objectPath, nil\n}", "func (svc *Compiler) Compile(source string, filename string) (*CompileResult, error) {\n\tsvc.ctx.Global().Set(\"source\", source)\n\tsvc.ctx.Global().Set(\"filename\", filename)\n\tval, err := svc.ctx.RunScript(\"compile(source, { filename });\", \"compile_call\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%+v\", err)\n\t}\n\tresult := CompileResult{}\n\terr = json.Unmarshal([]byte(val.String()), &result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &result, nil\n}", "func (dp *DummyProject) Get(name string) *project.Project {\n\tfor _, p := range projects {\n\t\tif p.Name == name {\n\t\t\treturn p\n\t\t}\n\t}\n\n\treturn nil\n}", "func (_BREMFactory *BREMFactoryCaller) GetProjectByName(opts *bind.CallOpts, _projectName string) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _BREMFactory.contract.Call(opts, out, \"getProjectByName\", _projectName)\n\treturn *ret0, err\n}", "func (s *Service) FindByName(name string) ([]*entity.Project, error) {\n\treturn s.repo.FindByName(name)\n}", "func (repo *repo) GetProjectByName(projectName string) (*models.Project, error) {\n\tlog.Debugf(\"GetProject - projectName: %s\", projectName)\n\ttableName := fmt.Sprintf(\"cla-%s-projects\", repo.stage)\n\n\t// This is the key we want to match\n\tcondition := expression.Key(\"project_name_lower\").Equal(expression.Value(strings.ToLower(projectName)))\n\n\t// Use the builder to create the expression\n\texpr, err := expression.NewBuilder().WithKeyCondition(condition).WithProjection(buildProjection()).Build()\n\tif err != nil {\n\t\tlog.Warnf(\"error building expression for Project query, projectName: %s, error: %v\",\n\t\t\tprojectName, err)\n\t\treturn nil, err\n\t}\n\n\t// Assemble the query input parameters\n\tqueryInput := &dynamodb.QueryInput{\n\t\tKeyConditionExpression: expr.KeyCondition(),\n\t\tExpressionAttributeNames: expr.Names(),\n\t\tExpressionAttributeValues: expr.Values(),\n\t\tProjectionExpression: expr.Projection(),\n\t\tTableName: aws.String(tableName),\n\t\tIndexName: aws.String(\"project-name-lower-search-index\"),\n\t}\n\n\t// Make the DynamoDB Query API call\n\tresults, queryErr := repo.dynamoDBClient.Query(queryInput)\n\tif queryErr != nil {\n\t\tlog.Warnf(\"error retrieving project by projectName: %s, error: %v\", projectName, queryErr)\n\t\treturn nil, queryErr\n\t}\n\n\t// Should only have one result\n\tif *results.Count > 1 {\n\t\tlog.Warnf(\"Project scan by name returned more than one result using projectName: %s\", projectName)\n\t}\n\n\t// Didn't find it...\n\tif *results.Count == 0 {\n\t\tlog.Debugf(\"Project scan by name returned no results using projectName: %s\", projectName)\n\t\treturn nil, nil\n\t}\n\n\t// Found it...\n\tvar dbModel DBProjectModel\n\terr = dynamodbattribute.UnmarshalMap(results.Items[0], &dbModel)\n\tif err != nil {\n\t\tlog.Warnf(\"error unmarshalling db project model, error: %+v\", err)\n\t\treturn nil, err\n\t}\n\n\t// Convert the database model to an API response model\n\treturn repo.buildProjectModel(dbModel), nil\n}", "func Compile(ctx context.Context, cli *client.Client, image string, lang string,\n\tpath string) (int, string) {\n\n\teval := \"\"\n\tswitch lang {\n\tcase \"cpp\":\n\t\teval = \"g++ -w -O2 /tests/data/a.cpp -o /tests/data/a.out 2>&1\"\n\t}\n\n\tresp, err := cli.ContainerCreate(ctx, &container.Config{\n\t\tImage: image,\n\t\tCmd: []string{\"/bin/bash\", \"-c\", eval},\n\t}, &container.HostConfig{\n\t\tMounts: []mount.Mount{\n\t\t\tmount.Mount{\n\t\t\t\tType: mount.TypeBind,\n\t\t\t\tSource: path,\n\t\t\t\tTarget: \"/tests/data\",\n\t\t\t},\n\t\t},\n\t}, nil, nil, \"\")\n\n\tif err != nil {\n\t\treturn 2, err.Error()\n\t}\n\n\tout, err := runContainer(ctx, cli, resp.ID)\n\tif err != nil {\n\t\treturn 2, err.Error()\n\t}\n\n\tr, _ := regexp.Compile(\"error\")\n\tif r.MatchString(out) {\n\t\treturn 0, out\n\t}\n\treturn 1, \"\"\n}", "func Compile(input string) (*Regexp, error) {\n\treturn Compile2(input, 0)\n}", "func (_BREM *BREMCaller) GetProjectByName(opts *bind.CallOpts, _projectName string) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _BREM.contract.Call(opts, out, \"getProjectByName\", _projectName)\n\treturn *ret0, err\n}", "func (s *projectService) getProjectByName(projectName, owner, user string) (*schema.Project, error) {\n\tproj, err := s.projects.ByName(projectName, owner)\n\tswitch {\n\tcase err != nil:\n\t\treturn nil, err\n\tcase !s.access.AllowedByOwner(proj.ID, user):\n\t\treturn nil, app.ErrNoAccess\n\tdefault:\n\t\treturn proj, nil\n\t}\n}", "func (pi PackageIndex) PackageByName(name string) (*Package, bool) {\n\tpkg, ok := pi.nameMap[name]\n\treturn pkg, ok\n}", "func (courseRepo *CourseRepositoryImpl) CourseByName(name string) (*models.course, []error) {\n\tcourse := models.Course{}\n\terrs := courseRepo.conn.Find(&course, \"name=?\", name).GetErrors()\n\tif len(errs) > 0 {\n\t\treturn nil, errs\n\t}\n\treturn &course, errs\n}", "func (be Batch) Compile() error {\n\tswitch len(be.errors) {\n\tcase 0:\n\t\treturn nil\n\tcase 1:\n\t\treturn be.errors[0]\n\tdefault:\n\t\treturn be\n\t}\n}", "func (c *Compiler) Compile(ctx context.Context, files ...string) (linker.Files, error) {\n\tif len(files) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tpar := c.MaxParallelism\n\tif par <= 0 {\n\t\tpar = runtime.GOMAXPROCS(-1)\n\t\tcpus := runtime.NumCPU()\n\t\tif par > cpus {\n\t\t\tpar = cpus\n\t\t}\n\t}\n\n\th := reporter.NewHandler(c.Reporter)\n\n\te := executor{\n\t\tc: c,\n\t\th: h,\n\t\ts: semaphore.NewWeighted(int64(par)),\n\t\tcancel: cancel,\n\t\tsym: &linker.Symbols{},\n\t\tresCache: &options.FeaturesResolverCache{},\n\t\tresults: map[string]*result{},\n\t}\n\n\t// We lock now and create all tasks under lock to make sure that no\n\t// async task can create a duplicate result. For example, if files\n\t// contains both \"foo.proto\" and \"bar.proto\", then there is a race\n\t// after we start compiling \"foo.proto\" between this loop and the\n\t// async compilation task to create the result for \"bar.proto\". But\n\t// we need to know if the file is directly requested for compilation,\n\t// so we need this loop to define the result. So this loop holds the\n\t// lock the whole time so async tasks can't create a result first.\n\tresults := make([]*result, len(files))\n\tfunc() {\n\t\te.mu.Lock()\n\t\tdefer e.mu.Unlock()\n\t\tfor i, f := range files {\n\t\t\tresults[i] = e.compileLocked(ctx, f, true)\n\t\t}\n\t}()\n\n\tdescs := make([]linker.File, len(files))\n\tvar firstError error\n\tfor i, r := range results {\n\t\tselect {\n\t\tcase <-r.ready:\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, ctx.Err()\n\t\t}\n\t\tif r.err != nil {\n\t\t\tif firstError == nil {\n\t\t\t\tfirstError = r.err\n\t\t\t}\n\t\t}\n\t\tdescs[i] = r.res\n\t}\n\n\tif err := h.Error(); err != nil {\n\t\treturn descs, err\n\t}\n\t// this should probably never happen; if any task returned an\n\t// error, h.Error() should be non-nil\n\treturn descs, firstError\n}", "func (mr *MockProjectServiceIfaceMockRecorder) GetProjectByName(name interface{}, opts ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{name}, opts...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetProjectByName\", reflect.TypeOf((*MockProjectServiceIface)(nil).GetProjectByName), varargs...)\n}", "func (mr *MockProjectManagerServiceServerMockRecorder) GetProjectByName(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetProjectByName\", reflect.TypeOf((*MockProjectManagerServiceServer)(nil).GetProjectByName), arg0, arg1)\n}", "func FindProjectByName(project *model.Project) (projects []model.Project, err error) {\n\tif projects, err = project.FindByName(); err != nil {\n\t\treturn nil, errors.New(\"project \" + project.Name + \" not found.\")\n\t}\n\tif len(projects) == 0 {\n\t\treturn nil, errors.New(\"no project found\")\n\t}\n\treturn projects, nil\n}", "func (c *CachedCompiler) Compiler(rawQuery string) (*gojq.Code, error) {\n\tcode, found := c.cache.Get(rawQuery)\n\tif found {\n\t\treturn code.(*gojq.Code), nil\n\t}\n\tcode, err := c.compiler(rawQuery)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.cache.Add(rawQuery, code)\n\treturn code.(*gojq.Code), nil\n}", "func Solve(input string) (*solutions.Solution, error) {\n\tinitMem := interpreter.ParseMem(input)\n\treturn &solutions.Solution{Part1: part1(initMem), Part2: part2(initMem)}, nil\n}", "func (s *CommunityActionTemplateService) GetByName(name string) (*CommunityActionTemplate, error) {\n\tif internal.IsEmpty(name) {\n\t\treturn nil, internal.CreateInvalidParameterError(\"GetByName\", \"name\")\n\t}\n\n\tif err := services.ValidateInternalState(s); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcollection, err := s.GetAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, item := range collection {\n\t\tif strings.EqualFold(item.Name, name) {\n\t\t\treturn item, nil\n\t\t}\n\t}\n\n\treturn nil, internal.CreateResourceNotFoundError(s.GetName(), constants.ParameterName, name)\n}", "func Get(name string) (Project, error) {\n\tproj, ok := projects[name]\n\tif !ok {\n\t\treturn nil, ErrProjectNotFound\n\t}\n\treturn proj, nil\n}", "func Compile(constraint string) (go_version.Constraints, error) {\n\treturn go_version.NewConstraint(constraint)\n}", "func (cp *Compiler) Compile(source, destination string) error {\n\tvar err error\n\n\tvar gccer = exec.Command(cp.Path, append(cp.Args, []string{source, \"-o\", destination}...)...)\n\n\tvar stderr = bytes.NewBuffer(make([]byte, 65536))\n\tgccer.Stderr = stderr\n\n\tif err = gccer.Run(); err != nil {\n\t\tvar ce = new(types.CompileError)\n\t\tce.ProcErr = err.Error()\n\t\tce.Info, err = ioutil.ReadAll(stderr)\n\t\tif err != nil {\n\t\t\tce.Info = []byte(err.Error())\n\t\t}\n\t\treturn ce\n\t}\n\treturn nil\n}", "func Compile(spec *openapi.Spec, options ...Option) (*protobuf.Package, error) {\n\tc := newCompileCtx(spec, options...)\n\tc.pushParent(c.pkg)\n\n\tif c.annotate {\n\t\tc.addImport(\"google/api/annotations.proto\")\n\t}\n\n\tif err := c.compileGlobalOptions(spec.GlobalOptions); err != nil {\n\t\treturn nil, errors.Wrap(err, `failed to compile global options`)\n\t}\n\n\t// compile all definitions\n\tif err := c.compileDefinitions(spec.Definitions); err != nil {\n\t\treturn nil, errors.Wrap(err, `failed to compile definitions`)\n\t}\n\tif err := c.compileParameters(spec.Parameters); err != nil {\n\t\treturn nil, errors.Wrap(err, `failed to compile parameters`)\n\t}\n\tif err := c.compileResponses(spec.Responses); err != nil {\n\t\treturn nil, errors.Wrap(err, `failed to compile global responses`)\n\t}\n\n\tp2, err := protobuf.Resolve(c.pkg, c.getTypeFromReference)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, `failed to resolve references`)\n\t}\n\t*(c.pkg) = *(p2.(*protobuf.Package))\n\n\t// compile extensions\n\tc.phase = phaseCompileExtensions\n\tfor _, ext := range spec.Extensions {\n\t\te, err := c.compileExtension(ext)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, `failed to compile extension`)\n\t\t}\n\t\tc.pkg.AddType(e)\n\t}\n\n\t// compile the paths\n\tif !c.skipRpcs {\n\t\tc.phase = phaseCompilePaths\n\t\tif err := c.compilePaths(spec.Paths); err != nil {\n\t\t\treturn nil, errors.Wrap(err, `failed to compile paths`)\n\t\t}\n\t}\n\n\treturn c.pkg, nil\n}", "func (_BREMFactory *BREMFactoryCallerSession) GetProjectByName(_projectName string) (common.Address, error) {\n\treturn _BREMFactory.Contract.GetProjectByName(&_BREMFactory.CallOpts, _projectName)\n}", "func Compile(ctx context.Context, ui *ui.UI, discovered *discover.Discovered) (*Binaries, error) {\n\tegrp, ctx := errgroup.WithContext(ctx)\n\tbinaries := &Binaries{}\n\tif discovered.Local != nil {\n\t\tegrp.Go(func() error {\n\t\t\tpath, err := compile(ctx, ui, discovered.Local)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbinaries.Local = Binary{Path: path}\n\t\t\treturn nil\n\t\t})\n\t}\n\tif discovered.Plan != nil {\n\t\tegrp.Go(func() error {\n\t\t\tpath, err := compile(ctx, ui, discovered.Plan)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbinaries.Plan = Binary{Path: path}\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tif err := egrp.Wait(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn binaries, nil\n}", "func (mr *MockProjectManagerServiceClientMockRecorder) GetProjectByName(ctx, in interface{}, opts ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{ctx, in}, opts...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetProjectByName\", reflect.TypeOf((*MockProjectManagerServiceClient)(nil).GetProjectByName), varargs...)\n}", "func Solve(unsolved string) (result string, errors error) {\n\treturn goku.SolveDirect(unsolved)\n}", "func (a *evalActivation) ResolveName(name string) (any, bool) {\n\tv, found := a.vars[name]\n\tif !found {\n\t\treturn nil, false\n\t}\n\tswitch obj := v.(type) {\n\tcase func() ref.Val:\n\t\tif resolved, found := a.lazyVars[name]; found {\n\t\t\treturn resolved, true\n\t\t}\n\t\tlazy := obj()\n\t\ta.lazyVars[name] = lazy\n\t\treturn lazy, true\n\tcase func() any:\n\t\tif resolved, found := a.lazyVars[name]; found {\n\t\t\treturn resolved, true\n\t\t}\n\t\tlazy := obj()\n\t\ta.lazyVars[name] = lazy\n\t\treturn lazy, true\n\tdefault:\n\t\treturn obj, true\n\t}\n}", "func (r *Regexps) Compile(pattern string) (*regexp.Regexp, error) {\n\tr.Lock()\n\tdefer r.Unlock()\n\tif r.items[pattern] != nil {\n\t\treturn r.items[pattern], nil\n\t}\n\tre, err := regexp.Compile(pattern)\n\tif err == nil {\n\t\tr.items[pattern] = re\n\t}\n\treturn re, err\n}", "func (c *Compiler) Compile(givenAst *ast.Ast) (bleveQuery.Query, error) {\n\tq, err := compile(givenAst)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn q, nil\n}", "func Solve(input string) (*solutions.Solution, error) {\n\ts := parse(input)\n\treturn &solutions.Solution{Part1: part1(s), Part2: part2(s)}, nil\n}", "func (svc *PipelineService) Compile(org, repo, ref string, opt *PipelineOptions) (*yaml.Build, *Response, error) {\n\t// set the API endpoint path we send the request to\n\tu := fmt.Sprintf(\"/api/v1/pipelines/%s/%s/%s/compile\", org, repo, ref)\n\n\t// add optional arguments if supplied\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// yaml Build type we want to return\n\tv := new(yaml.Build)\n\n\t// send request using client\n\tresp, err := svc.client.Call(\"POST\", u, nil, v)\n\n\treturn v, resp, err\n}", "func Solve(input string) (*solutions.Solution, error) {\n\tinitMem := interpreter.ParseMem(input)\n\tch1, ch2 := make(chan int), make(chan int)\n\n\tgo part1(initMem, ch1)\n\tgo part2(initMem, ch2)\n\treturn &solutions.Solution{Part1: <-ch1, Part2: <-ch2}, nil\n}", "func (runAll RunAll) Compile() []string {\n\tcommands := make([]string, len(runAll.Runs))\n\n\tfor i, run := range runAll.Runs {\n\t\tcommands[i] = run.Compile()[0]\n\t}\n\n\treturn []string{strings.Join(commands, \" && \")}\n}", "func (vm *VM) Compile(source string, chunk *Chunk) (int, bool) {\n\tvar scanner Scanner\n\tvar parser Parser\n\tscanner.Init(source)\n\tparser.scanner = &scanner\n\tparser.chunk = chunk\n\tparser.backpatch = map[string][]Label{}\n\tparser.labels = map[string]byte{}\n\tparser.advance()\n\tfor parser.previous.Type != EOF {\n\t\tparser.statement()\n\t\tif parser.errorState {\n\t\t\tparser.synchronize()\n\t\t}\n\t}\n\tparser.consume(EOF, \"Expected EOF.\")\n\tparser.emitHalt()\n\tparser.checkLabels()\n\treturn parser.size, !parser.hasError\n}", "func CompileQuery(q string) (*Compiler, Body, error) {\n\n\tparsed, err := ParseBody(q)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tkey := string(Wildcard.Value.(Var))\n\n\tmod := &Module{\n\t\tPackage: &Package{\n\t\t\tPath: Ref{DefaultRootDocument},\n\t\t\tLocation: parsed.Loc(),\n\t\t},\n\t\tRules: []*Rule{\n\t\t\t&Rule{\n\t\t\t\tName: Var(key),\n\t\t\t\tBody: parsed,\n\t\t\t\tLocation: parsed.Loc(),\n\t\t\t},\n\t\t},\n\t}\n\tmods := map[string]*Module{\n\t\tkey: mod,\n\t}\n\n\tc := NewCompiler()\n\n\tif c.Compile(mods); c.Failed() {\n\t\treturn nil, nil, c.Errors[0]\n\t}\n\n\treturn c, c.Modules[key].Rules[0].Body, nil\n}", "func gcTranslate(gcname string, m map[string]string) string {\n\tswitch gcname {\n\tcase \"Langara\",\"L\", \"1\":\n\t\treturn m[\"Langara\"]\n\tcase \"Fraserview\", \"F\", \"2\":\n\t\treturn m[\"Fraserview\"]\n\tcase \"McCleery\"\t, \"M\", \"3\":\n\t\treturn m[\"McCleery\"]\n\tcase \"All\":\n\t\treturn \"1,2,3\"\n\t}\n\treturn \"no courses options recognized\"\n}", "func (_BREMFactory *BREMFactorySession) GetProjectByName(_projectName string) (common.Address, error) {\n\treturn _BREMFactory.Contract.GetProjectByName(&_BREMFactory.CallOpts, _projectName)\n}", "func (ctx *Context) GetTemplateByName(name string) (*wfv1.Template, error) {\n\tctx.log.Debug(\"Getting the template by name\")\n\n\ttmpl := ctx.tmplBase.GetTemplateByName(name)\n\tif tmpl == nil {\n\t\treturn nil, errors.Errorf(errors.CodeNotFound, \"template %s not found\", name)\n\t}\n\treturn tmpl.DeepCopy(), nil\n}", "func (s Scope) Project(names Names) (Scope, error) {\n\tresult := EmptyScope\n\tfor e := names.Enumerator(); e.MoveNext(); {\n\t\tname := e.Current()\n\t\tif expr, found := s.Get(name); found {\n\t\t\tresult = result.With(name, expr)\n\t\t} else {\n\t\t\treturn Scope{}, errors.Errorf(\n\t\t\t\t\"name %q not found in scope.Project\", name)\n\t\t}\n\t}\n\treturn result, nil\n}", "func (*bzlLibraryLang) Resolve(c *config.Config, ix *resolve.RuleIndex, rc *repo.RemoteCache, r *rule.Rule, importsRaw interface{}, from label.Label) {\n\timports := importsRaw.([]string)\n\n\tr.DelAttr(\"deps\")\n\n\tif len(imports) == 0 {\n\t\treturn\n\t}\n\n\tdeps := make([]string, 0, len(imports))\n\tfor _, imp := range imports {\n\t\timpLabel, err := label.Parse(imp)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%s: import of %q is invalid: %v\", from.String(), imp, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// the index only contains absolute labels, not relative\n\t\timpLabel = impLabel.Abs(from.Repo, from.Pkg)\n\n\t\tif impLabel.Repo == \"bazel_tools\" {\n\t\t\t// The @bazel_tools repo is tricky because it is a part of the \"shipped\n\t\t\t// with bazel\" core library for interacting with the outside world.\n\t\t\t// This means that it can not depend on skylib. Fortunately there is a\n\t\t\t// fairly simple workaround for this, which is that you can add those\n\t\t\t// bzl files as `deps` entries.\n\t\t\tdeps = append(deps, imp)\n\t\t\tcontinue\n\t\t}\n\n\t\tif impLabel.Repo != \"\" || !c.IndexLibraries {\n\t\t\t// This is a dependency that is external to the current repo, or indexing\n\t\t\t// is disabled so take a guess at what the target name should be.\n\t\t\tdeps = append(deps, strings.TrimSuffix(imp, fileType))\n\t\t\tcontinue\n\t\t}\n\n\t\tres := resolve.ImportSpec{\n\t\t\tLang: languageName,\n\t\t\tImp: impLabel.String(),\n\t\t}\n\t\tmatches := ix.FindRulesByImport(res, languageName)\n\n\t\tif len(matches) == 0 {\n\t\t\tlog.Printf(\"%s: %q (%s) was not found in dependency index. Skipping. This may result in an incomplete deps section and require manual BUILD file intervention.\\n\", from.String(), imp, impLabel.String())\n\t\t}\n\n\t\tfor _, m := range matches {\n\t\t\tdepLabel := m.Label\n\t\t\tdepLabel = depLabel.Rel(from.Repo, from.Pkg)\n\t\t\tdeps = append(deps, depLabel.String())\n\t\t}\n\t}\n\n\tsort.Strings(deps)\n\tif len(deps) > 0 {\n\t\tr.SetAttr(\"deps\", deps)\n\t}\n}", "func (r *RegexLexer) maybeCompile() (err error) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tif r.compiled {\n\t\treturn nil\n\t}\n\tfor state, rules := range r.rules {\n\t\tfor i, rule := range rules {\n\t\t\tif rule.Regexp == nil {\n\t\t\t\tpattern := \"(?:\" + rule.Pattern + \")\"\n\t\t\t\tif rule.flags != \"\" {\n\t\t\t\t\tpattern = \"(?\" + rule.flags + \")\" + pattern\n\t\t\t\t}\n\t\t\t\tpattern = `\\G` + pattern\n\t\t\t\trule.Regexp, err = regexp2.Compile(pattern, 0)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"failed to compile rule %s.%d: %s\", state, i, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\nrestart:\n\tseen := map[LexerMutator]bool{}\n\tfor state := range r.rules {\n\t\tfor i := 0; i < len(r.rules[state]); i++ {\n\t\t\trule := r.rules[state][i]\n\t\t\tif compile, ok := rule.Mutator.(LexerMutator); ok {\n\t\t\t\tif seen[compile] {\n\t\t\t\t\treturn fmt.Errorf(\"saw mutator %T twice; this should not happen\", compile)\n\t\t\t\t}\n\t\t\t\tseen[compile] = true\n\t\t\t\tif err := compile.MutateLexer(r.rules, state, i); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t// Process the rules again in case the mutator added/removed rules.\n\t\t\t\t//\n\t\t\t\t// This sounds bad, but shouldn't be significant in practice.\n\t\t\t\tgoto restart\n\t\t\t}\n\t\t}\n\t}\n\tr.compiled = true\n\treturn nil\n}", "func (c *Compiler) Compile(expr string) (*runtime.Program, error) {\n\tprogAST, err := parser.Parse(expr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, pass := range context.Passes {\n\t\terr = progAST.RunPass(c.ctx, pass)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tprog := c.ctx.Builder.Build()\n\tprog.ResultType = progAST.Type()\n\treturn prog, nil\n}", "func Compile(code string, ext vm.Externals) (vm.Program, parser.Messages) {\n\tinput := antlr.NewInputStream(code)\n\treturn compile(input, ext)\n}", "func Compile(m ast.Module) (llvm.Module, error) {\n\tif err := validate.Validate(m); err != nil {\n\t\treturn llvm.Module{}, err\n\t}\n\n\treturn newModuleGenerator().Generate(canonicalize.Canonicalize(m))\n}", "func CompileAll(mroPaths []string, checkSrcPath bool) (int, []*syntax.Ast, error) {\n\tfileNames := make([]string, 0, len(mroPaths)*3)\n\tfor _, mroPath := range mroPaths {\n\t\tfpaths, _ := util.Readdirnames(mroPath)\n\t\tfor _, f := range fpaths {\n\t\t\tif strings.HasSuffix(f, \".mro\") && !strings.HasPrefix(f, \"_\") {\n\t\t\t\tfileNames = append(fileNames, path.Join(mroPath, f))\n\t\t\t}\n\t\t}\n\t}\n\tasts := make([]*syntax.Ast, 0, len(fileNames))\n\tvar parser syntax.Parser\n\tfor _, fpath := range fileNames {\n\t\tif _, _, ast, err := parser.Compile(fpath, mroPaths, checkSrcPath); err != nil {\n\t\t\treturn 0, nil, err\n\t\t} else {\n\t\t\tasts = append(asts, ast)\n\t\t}\n\t}\n\treturn len(fileNames), asts, nil\n}", "func noCompile(runCommandTemplate []string, include func(string) bool, language apipb.LanguageGroup) compileFunc {\n\treturn func(program *apipb.Program, outputBase util.FileBase) (*Compilation, error) {\n\t\tvar filteredPaths []string\n\t\tfor _, file := range program.Sources {\n\t\t\tif include(file.Path) {\n\t\t\t\tfilteredPaths = append(filteredPaths, file.Path)\n\t\t\t}\n\t\t}\n\t\tif len(filteredPaths) == 0 {\n\t\t\treturn &Compilation{CompilerErrors: \"No valid source files found\"}, nil\n\t\t}\n\n\t\trunCommand := substituteFiles(runCommandTemplate, filteredPaths)\n\t\treturn &Compilation{\n\t\t\tProgram: &apipb.CompiledProgram{\n\t\t\t\tProgramRoot: outputBase.Path(),\n\t\t\t\tRunCommand: runCommand,\n\t\t\t\tLanguage: language,\n\t\t\t}}, nil\n\t}\n}", "func (service *ProjectService) TryLoadModFrom(names []string) error {\n\tloaded := world.LoadFiles(false, names)\n\n\tresourcesToTake := loaded.Resources\n\tisSavegame := false\n\tif (len(resourcesToTake) == 0) && (len(loaded.Savegames) == 1) {\n\t\tresourcesToTake = loaded.Savegames\n\t\tisSavegame = true\n\t}\n\tif len(resourcesToTake) == 0 {\n\t\treturn errNoResourcesFound\n\t}\n\tvar locs []*world.LocalizedResources\n\tmodPath := \"\"\n\n\tfor location := range resourcesToTake {\n\t\tif (len(modPath) == 0) || (len(location.DirPath) < len(modPath)) {\n\t\t\tmodPath = location.DirPath\n\t\t}\n\t}\n\n\tfor location, viewer := range resourcesToTake {\n\t\tlang := ids.LocalizeFilename(location.Name)\n\t\ttemplate := location.Name\n\t\tif isSavegame {\n\t\t\ttemplate = string(ids.Archive)\n\t\t}\n\t\tloc := &world.LocalizedResources{\n\t\t\tFile: location,\n\t\t\tTemplate: template,\n\t\t\tLanguage: lang,\n\t\t}\n\t\tfor _, id := range viewer.IDs() {\n\t\t\tview, err := viewer.View(id)\n\t\t\tif err != nil {\n\t\t\t\t// optimistic ignore\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t_ = loc.Store.Put(id, view)\n\t\t}\n\t\tlocs = append(locs, loc)\n\t}\n\n\tservice.setActiveMod(modPath, locs, loaded.ObjectProperties, loaded.TextureProperties)\n\treturn nil\n}", "func try_compile_plugin(tag string) {\n filename := \"../plugins/\" + tag + \".go\"\n if file_exists(filename) {\n compile_plugin(filename, tag)\n }\n}", "func (j jumpSvc) Jump(name string) (string, error) {\n\tif paths, ok := j.names[name]; ok {\n\t\tvar path string\n\t\tpath = paths[0]\n\t\tif len(path) > 1 {\n\t\t\t// TODO: Prompt for choice\n\t\t}\n\n\t\treturn path, nil\n\t}\n\n\treturn \"\", errors.New(\"directory does not exist\")\n}", "func ValidateProjectName(name string) error {\n\tif name == \"\" {\n\t\treturn ErrProjectNameMissing\n\t}\n\treturn ValidateName(name)\n}", "func Find(c *goproject.Config, tplName string) (*Template, error) {\n\t// search for custom templates first, they will take precedence over base templates\n\tfor _, t := range c.CustomTemplates {\n\t\tif t.Name == tplName {\n\t\t\treturn &Template{name: t.Name, path: t.Path}, nil\n\t\t}\n\t}\n\n\t// No custom template was found, try to find a default one that matches the name\n\ttplPath := path.Join(c.TemplatesPath, tplName)\n\n\t_, err := os.Stat(tplPath)\n\tif err == nil {\n\t\treturn &Template{name: tplName, path: tplPath}, err\n\t}\n\n\treturn nil, fmt.Errorf(\"unable to find template '%s' in default or custom template paths\", tplName)\n}", "func (categories *Categories) ReadCategoriesByName(categoryName, language string) ([]Category, error) {\n\n\tvariables := struct {\n\t\tCategoryName string\n\t\tLanguage string\n\t}{\n\t\tCategoryName: categoryName,\n\t\tLanguage: language}\n\n\tqueryTemplate, err := template.New(\"ReadCategoriesByName\").Parse(`{\n\t\t\t\tcategories(func: eq(categoryName@{{.Language}}, \"{{.CategoryName}}\"))\n\t\t\t\t@filter(eq(categoryIsActive, true)) {\n\t\t\t\t\tuid\n\t\t\t\t\tcategoryName: categoryName@{{.Language}}\n\t\t\t\t\tcategoryIsActive\n\t\t\t\t\tbelongs_to_company @filter(eq(companyIsActive, true)) {\n\t\t\t\t\t\tuid\n\t\t\t\t\t\tcompanyName: companyName@{{.Language}}\n\t\t\t\t\t\tcompanyIsActive\n\t\t\t\t\t\thas_category @filter(eq(categoryIsActive, true)) {\n\t\t\t\t\t\t\tuid\n\t\t\t\t\t\t\tcategoryName: categoryName@{{.Language}}\n\t\t\t\t\t\t\tcategoryIsActive\n\t\t\t\t\t\t\tbelong_to_company @filter(eq(companyIsActive, true)) {\n\t\t\t\t\t\t\t\tuid\n\t\t\t\t\t\t\t\tcompanyName: companyName@{{.Language}}\n\t\t\t\t\t\t\t\tcompanyIsActive\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\thas_product @filter(eq(productIsActive, true)) {\n\t\t\t\t\t\tuid\n\t\t\t\t\t\tproductName: productName@{{.Language}}\n\t\t\t\t\t\tproductIri\n\t\t\t\t\t\tpreviewImageLink\n\t\t\t\t\t\tproductIsActive\n\t\t\t\t\t\tbelongs_to_category @filter(eq(categoryIsActive, true)) {\n\t\t\t\t\t\t\tuid\n\t\t\t\t\t\t\tcategoryName: categoryName@{{.Language}}\n\t\t\t\t\t\t\tcategoryIsActive\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbelongs_to_company @filter(eq(companyIsActive, true)) {\n\t\t\t\t\t\t\tuid\n\t\t\t\t\t\t\tcompanyName: companyName@{{.Language}}\n\t\t\t\t\t\t\tcompanyIsActive\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}`)\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, ErrCategoriesByNameCanNotBeFound\n\t}\n\n\tqueryBuf := bytes.Buffer{}\n\terr = queryTemplate.Execute(&queryBuf, variables)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, ErrCategoriesByNameCanNotBeFound\n\t}\n\n\ttransaction := categories.storage.Client.NewTxn()\n\tresponse, err := transaction.Query(context.Background(), queryBuf.String())\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, ErrCategoriesByNameCanNotBeFound\n\t}\n\n\ttype categoriesInStorage struct {\n\t\tAllCategoriesFoundedByName []Category `json:\"categories\"`\n\t}\n\n\tvar foundedCategories categoriesInStorage\n\terr = json.Unmarshal(response.GetJson(), &foundedCategories)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, ErrCategoriesByNameCanNotBeFound\n\t}\n\n\tif len(foundedCategories.AllCategoriesFoundedByName) == 0 {\n\t\treturn nil, ErrCategoriesByNameNotFound\n\t}\n\n\treturn foundedCategories.AllCategoriesFoundedByName, nil\n}", "func (i *CharacterSourceImporter) retrySearchByName(name string) (externalissuesource.CharacterSearchResult, error) {\n\tresultCh := make(chan externalissuesource.CharacterSearchResult, 1)\n\tdefer close(resultCh)\n\tresultErrCh := make(chan error, 1)\n\tdefer close(resultErrCh)\n\terr := retryURL(func() (string, error) {\n\t\ti.logger.Info(\"searching for character name\", zap.String(\"query\", name))\n\t\t// Gonna have to lock this resource to avoid race conditions.\n\t\t// Or I can just pass in a copy of the external source.\n\t\t// TODO: make more intuitive later.\n\t\ti.mu.Lock()\n\t\tresult, err := i.externalSource.SearchCharacter(name)\n\t\ti.mu.Unlock()\n\t\tif err != nil {\n\t\t\treturn name, err\n\t\t}\n\t\tresultCh <- result\n\t\treturn name, nil\n\t})\n\tif err != nil {\n\t\tresultErrCh <- err\n\t}\n\tfor {\n\t\tselect {\n\t\tcase errC := <-resultErrCh:\n\t\t\treturn externalissuesource.CharacterSearchResult{}, errC\n\t\tcase result := <-resultCh:\n\t\t\treturn result, nil\n\t\t}\n\t}\n}", "func (o *GetClusterTemplateByNameInWorkspaceParams) WithName(name string) *GetClusterTemplateByNameInWorkspaceParams {\n\to.SetName(name)\n\treturn o\n}", "func (lm *LanguageManagerImpl) CreateLanguage(name string, debug bool) (Language, error) {\n\treturn lm.createLanguage(name, true, debug)\n}", "func Build(name string, c *Config) error {\n\treturn defaultPlugin.Build(name, c)\n}", "func Compile(c *Clause) (ClauseFunc, error) {\n\treturn DefaultOps.Compile(c)\n}", "func (ops OpsSet) Compile(c *Clause) (ClauseFunc, error) {\n\tbf, ok := ops[c.Operator.Name]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unrecognized operation %s\", c.Operator.Name)\n\t}\n\treturn bf(c.Arguments, ops)\n}", "func SolveFirst(s string) (res string, ok bool) {\n\tSolve(s, func(solution string) bool {\n\t\tok = true\n\t\tres = solution\n\t\treturn true\n\t})\n\treturn\n}", "func (c typeCompiler) compileLit(ptype parse.Type) vdl.TypeOrPending { //nolint:gocyclo\n\tswitch pt := ptype.(type) {\n\tcase *parse.TypeNamed:\n\t\t// Try to resolve the named type from the already-compiled packages in env.\n\t\tif def, matched := c.env.ResolveType(pt.Name, c.file); def != nil {\n\t\t\tif len(matched) < len(pt.Name) {\n\t\t\t\tc.env.Errorf(c.file, pt.Pos(), \"type %s invalid (%s unmatched)\", pt.Name, pt.Name[len(matched):])\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn def.Type\n\t\t}\n\t\t// Try to resolve the named type from the builders in this local package.\n\t\t// This resolve types that haven't been described yet, to handle arbitrary\n\t\t// ordering of types within the package, as well as cyclic types.\n\t\tif b, ok := c.builders[pt.Name]; ok {\n\t\t\t// If transExport is set, ensure all subtypes are exported. We only need\n\t\t\t// to check local names, since names resolved from the packages in env\n\t\t\t// could only be resolved if they were exported to begin with.\n\t\t\tif c.transExport && !b.def.Exported {\n\t\t\t\tc.env.Errorf(c.file, pt.Pos(), \"type %s must be transitively exported\", pt.Name)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn b.pending\n\t\t}\n\t\tc.env.Errorf(c.file, pt.Pos(), \"type %s undefined\", pt.Name)\n\t\treturn nil\n\tcase *parse.TypeList:\n\t\telem := c.compileLit(pt.Elem)\n\t\tif elem == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn c.vtb.List().AssignElem(elem)\n\tcase *parse.TypeSet:\n\t\tkey := c.compileLit(pt.Key)\n\t\tif key == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn c.vtb.Set().AssignKey(key)\n\tcase *parse.TypeMap:\n\t\tkey, elem := c.compileLit(pt.Key), c.compileLit(pt.Elem)\n\t\tif key == nil || elem == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn c.vtb.Map().AssignKey(key).AssignElem(elem)\n\tcase *parse.TypeOptional:\n\t\telem := c.compileLit(pt.Base)\n\t\tif elem == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn c.vtb.Optional().AssignElem(elem)\n\tdefault:\n\t\tc.env.Errorf(c.file, pt.Pos(), \"unnamed %s type invalid (type must be defined)\", ptype.Kind())\n\t\treturn nil\n\t}\n}", "func (m *manager) Get(idOrName interface{}) (*models.Project, error) {\n\tid, ok := idOrName.(int64)\n\tif ok {\n\t\treturn dao.GetProjectByID(id)\n\t}\n\tname, ok := idOrName.(string)\n\tif ok {\n\t\treturn dao.GetProjectByName(name)\n\t}\n\treturn nil, fmt.Errorf(\"invalid parameter: %v, should be ID(int64) or name(string)\", idOrName)\n}", "func LoadProject(name string) (*Project, error) {\n p := new(Project)\n err := Mongo.GetOne(\"project\", bson.M{\"name\": name}, p)\n return p, err\n}", "func (_BREM *BREMCallerSession) GetProjectByName(_projectName string) (common.Address, error) {\n\treturn _BREM.Contract.GetProjectByName(&_BREM.CallOpts, _projectName)\n}", "func (c *Cmds) Compile() {\n\tvar cmp compiler\n\tcmp.compile(c.parseTree)\n\tc.prog = cmp.prog()\n\treturn\n}", "func (s *Scanner) Compile() error {\n\tcompiler, err := yara.NewCompiler()\n\tif err != nil {\n\t\treturn err\n\t}\n\trulesStat, _ := os.Stat(s.rulesPath)\n\tif rulesStat.Mode().IsDir() {\n\t\terr = filepath.Walk(s.rulesPath, func(filePath string, fileInfo os.FileInfo, err error) error {\n\t\t\tfileName := fileInfo.Name()\n\t\t\tif (path.Ext(fileName) == \".yar\") || (path.Ext(fileName) == \".yara\") {\n\t\t\t\trulesFile, _ := os.Open(filePath)\n\t\t\t\tdefer rulesFile.Close()\n\t\t\t\terr = compiler.AddFile(rulesFile, \"\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\n\t// Collect and compile Yara rules.\n\ts.Rules, err = compiler.GetRules()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Compile(ctx context.Context, targets []string) error {\n\tlog := logger.NewDefault(\"compile\")\n\tlog.SetLogLevel(logger.LevelInfo)\n\tif consts.IsDebugMode(ctx) {\n\t\tlog.SetLogLevel(logger.LevelDebug)\n\t}\n\n\tconfigManager, err := configmanager.NewConfigManager(log)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpluginManager, err := pluginmanager.NewPluginManager(pluginmanager.NewConfig(), log)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcompilerManager, err := compilermanager.NewCompilerManager(ctx, log, configManager, pluginManager)\n\tif err != nil {\n\t\treturn err\n\t}\n\tactionManager, err := actionmanager.NewActionManager(log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconfigItems, err := StepLookUpConfigs(ctx, targets, configManager)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := StepInstallProtoc(ctx, pluginManager, configItems); err != nil {\n\t\treturn err\n\t}\n\tif err := StepInstallRepositories(ctx, pluginManager, configItems); err != nil {\n\t\treturn err\n\t}\n\tif err := StepInstallPlugins(ctx, pluginManager, configItems); err != nil {\n\t\treturn err\n\t}\n\tif err := StepCompile(ctx, compilerManager, targets); err != nil {\n\t\treturn err\n\t}\n\n\tif !consts.IsDisableAction(ctx) {\n\t\tif err := StepPostAction(ctx, actionManager, configItems); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := StepPostShell(ctx, actionManager, configItems); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tdisplayWarn := false\n\t\tfor _, configItem := range configItems {\n\t\t\tif len(configItem.Config().PostActions) > 0 || configItem.Config().PostShell != \"\" {\n\t\t\t\tdisplayWarn = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif displayWarn {\n\t\t\tlog.LogWarn(nil, \"PostAction and PostShell is skipped. If you need to allow execution, please append '-p' to command flags to enable\")\n\t\t}\n\t}\n\n\tlog.LogInfo(nil, \"Good job! you are ready to go :)\")\n\treturn nil\n}", "func (r *resolver) lookupNamed(s *scope, a ast.TemplatedName) (sem.Named, error) {\n\ttarget := s.lookup(a.Name)\n\tif target == nil {\n\t\treturn nil, fmt.Errorf(\"%v cannot resolve '%v'\", a.Source, a.Name)\n\t}\n\n\t// Something with the given name was found...\n\tvar params []sem.TemplateParam\n\tvar ty sem.ResolvableType\n\tswitch target := target.object.(type) {\n\tcase *sem.Type:\n\t\tty = target\n\t\tparams = target.TemplateParams\n\tcase *sem.TypeMatcher:\n\t\tty = target\n\t\tparams = target.TemplateParams\n\tcase sem.TemplateParam:\n\t\tif len(a.TemplateArgs) != 0 {\n\t\t\treturn nil, fmt.Errorf(\"%v '%v' template parameters do not accept template arguments\", a.Source, a.Name)\n\t\t}\n\t\treturn target.(sem.Named), nil\n\tcase sem.Named:\n\t\treturn target, nil\n\tdefault:\n\t\tpanic(fmt.Errorf(\"unknown resolved type %T\", target))\n\t}\n\t// ... and that something takes template parameters\n\t// Check the number of templated name template arguments match the number of\n\t// templated parameters for the target.\n\targs := a.TemplateArgs\n\tif len(params) != len(args) {\n\t\treturn nil, fmt.Errorf(\"%v '%v' requires %d template arguments, but %d were provided\", a.Source, a.Name, len(params), len(args))\n\t}\n\n\t// Check templated name template argument kinds match the parameter kinds\n\tfor i, ast := range args {\n\t\tparam := params[i]\n\t\targ, err := r.lookupNamed(s, args[i])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := checkCompatible(arg, param); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"%v %w\", ast.Source, err)\n\t\t}\n\t}\n\treturn ty, nil\n}", "func (games GameCollection) FindGameByName(name string) (Game, error) {\n\tfor _, game := range games {\n\t\tif strings.ToLower(game.Name) == strings.ToLower(name) {\n\t\t\treturn game, nil\n\t\t}\n\t}\n\n\treturn Game{}, errors.New(fmt.Sprintf(\"Could not find a game called %s\", name))\n}", "func (r *FileSystemRepository) FindByName(name string) (*Definition, error) {\n\tr.RLock()\n\tdefer r.RUnlock()\n\n\treturn r.findByName(name)\n}", "func (d PartOneSolver) Solve() string {\n\tlines, err := common.GetLines(\"day/eight/input.txt\")\n\tif err != nil {\n\t\tpanic(\"couldn't open input file for day eight\")\n\t}\n\n\tbootCode := NewBootCodeFromStringInstructions(lines)\n\tres, ok := getAccAtLoop(bootCode)\n\tif !ok {\n\t\tpanic(\"couldn't detect loop\")\n\t}\n\treturn strconv.Itoa(res)\n}", "func (app *AppBuilder) ProjectName(name string) *AppBuilder {\n\tapp.projectName = name\n\treturn app\n}", "func Solve(input string) (*solutions.Solution, error) {\n\tft := makeFlatTree(parseInput(input))\n\treturn &solutions.Solution{Part1: part1(ft), Part2: part2(ft)}, nil\n}", "func (p *PipelineBuild) Translate(lang string) {\n\tfor ks := range p.Stages {\n\t\tfor kj := range p.Stages[ks].PipelineBuildJobs {\n\t\t\tp.Stages[ks].PipelineBuildJobs[kj].Translate(lang)\n\t\t}\n\t}\n}", "func (t Trigger) Compile() (*Trigger, error) {\n\treturn compileTrigger(t)\n}", "func Greet(name string, lang string) string {\n\tvar result string\n\n\tif len(name) == 0 {\n\t\tresult = getSimpleGreet(lang)\n\t} else {\n\t\tresult = getGreetWithName(name, lang)\n\t}\n\n\treturn result\n}", "func (a *ctxEvalActivation) ResolveName(name string) (any, bool) {\n\tif name == \"#interrupted\" {\n\t\ta.interruptCheckCount++\n\t\tif a.interruptCheckCount%a.interruptCheckFrequency == 0 {\n\t\t\tselect {\n\t\t\tcase <-a.interrupt:\n\t\t\t\treturn true, true\n\t\t\tdefault:\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t}\n\t\treturn nil, false\n\t}\n\treturn a.parent.ResolveName(name)\n}", "func (p *PrecompiledTemplate) Name() TemplateName {\n\treturn p.name\n}", "func (*bzlLibraryLang) Name() string { return languageName }", "func (o SolutionOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Solution) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (p *Plugins) ByName(name string) *Plugin {\n\tfor _, plugin := range p.Plugins() {\n\t\tif plugin.Name == name {\n\t\t\treturn plugin\n\t\t}\n\t}\n\treturn nil\n}", "func CompileStructStrict(template string, maps interface{}) (string, error) {\n\twords := findWords.FindAllString(template, -1)\n\thasError := false\n\tfor _, source := range words {\n\t\tsourcetxt := source[1 : len(source)-1]\n\t\tparams := getParameters(sourcetxt)\n\t\ttarget := reflect.ValueOf(maps).FieldByName(params.Source)\n\t\tif target.IsValid() {\n\t\t\ttemplate = strings.ReplaceAll(template, source, params.BuildString(fmt.Sprintf(\"%v\", target)))\n\t\t} else {\n\t\t\thasError = true\n\t\t}\n\t}\n\tif hasError {\n\t\treturn template, fmt.Errorf(\"There are some invalid fields\")\n\t}\n\treturn template, nil\n}", "func (ctx CompilerContext) Compile(schPath string, applicableOnSameInstance bool) (*Schema, error) {\n\tvar stack []schemaRef\n\tif applicableOnSameInstance {\n\t\tstack = ctx.stack\n\t}\n\treturn ctx.c.compileRef(ctx.r, stack, schPath, ctx.res, ctx.r.url+ctx.res.floc+\"/\"+schPath)\n}", "func validateProjectName(s string) error {\n\treturn tokens.ValidateProjectName(s)\n}", "func (c *Compiler) Compile(ctx context.Context, node ast.StmtNode) (ast.Statement, error) {\n\tast.SetFlag(node)\n\tif _, ok := node.(*ast.UpdateStmt); ok {\n\t\tsVars := variable.GetSessionVars(ctx)\n\t\tsVars.InUpdateStmt = true\n\t\tdefer func() {\n\t\t\tsVars.InUpdateStmt = false\n\t\t}()\n\t}\n\n\tis := sessionctx.GetDomain(ctx).InfoSchema()\n\tif err := plan.Preprocess(node, is, ctx); err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\t// Validate should be after NameResolve.\n\tif err := plan.Validate(node, false); err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tp, err := plan.Optimize(ctx, node, is)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\t_, isDDL := node.(ast.DDLNode)\n\tsa := &statement{\n\t\tis: is,\n\t\tplan: p,\n\t\ttext: node.Text(),\n\t\tisDDL: isDDL,\n\t}\n\treturn sa, nil\n}" ]
[ "0.5584038", "0.50285786", "0.50141174", "0.49621782", "0.4875996", "0.48572075", "0.4854768", "0.4850871", "0.4748288", "0.46886098", "0.46881205", "0.4676761", "0.46507636", "0.4649425", "0.4638744", "0.4593714", "0.4560371", "0.45371085", "0.45201123", "0.4494501", "0.44933307", "0.44850707", "0.44822448", "0.44775862", "0.44110024", "0.44078076", "0.4395437", "0.43701237", "0.43678054", "0.43593353", "0.43354324", "0.43117195", "0.43080756", "0.42979133", "0.42758378", "0.4268082", "0.4262412", "0.42560968", "0.4252824", "0.42458814", "0.42277768", "0.42274433", "0.4223413", "0.42153773", "0.42105395", "0.42068347", "0.41984808", "0.4184285", "0.4184176", "0.41794407", "0.4158664", "0.41570944", "0.41565135", "0.41561377", "0.41540417", "0.415284", "0.4152795", "0.41483176", "0.4146716", "0.41423276", "0.41392666", "0.41358903", "0.4132496", "0.412806", "0.41193014", "0.41157925", "0.41122627", "0.41120094", "0.4105347", "0.41052726", "0.41020188", "0.40952808", "0.40926233", "0.40888453", "0.40828544", "0.40825447", "0.40774322", "0.40764832", "0.40648913", "0.40482104", "0.40470943", "0.40401393", "0.4039025", "0.40371352", "0.40348667", "0.40326685", "0.40228057", "0.40195358", "0.4016678", "0.40151006", "0.4011356", "0.40108365", "0.3993188", "0.39929143", "0.39892605", "0.39880246", "0.39835316", "0.39717528", "0.39566043", "0.3955192" ]
0.8204627
0
GetCompilationRootDir returns directory of all compiled solutions.
func (cptool *CPTool) GetCompilationRootDir() string { return path.Join(cptool.workingDirectory, ".cptool/solutions") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetRootProjectDir() (string, error) {\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor !strings.HasSuffix(wd, \"git2consul-go\") {\n\t\tif wd == \"/\" {\n\t\t\treturn \"\", errors.New(`cannot find project directory, \"/\" reached`)\n\t\t}\n\t\twd = filepath.Dir(wd)\n\t}\n\treturn wd, nil\n}", "func GetGoRootDir() string {\n\troseDir := GetRosieDir()\n\treturn path.Join(roseDir, goDirName)\n}", "func (o *Project) GetRootDirectory() string {\n\tif o == nil || o.RootDirectory == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.RootDirectory\n}", "func getProjectRoot() (string, error) {\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error getting pwd: %s\", err)\n\t}\n\tfor {\n\t\tparent, name := filepath.Split(dir)\n\t\tif name == \"acr-builder\" {\n\t\t\tbreak\n\t\t}\n\t\tparent = filepath.Clean(parent)\n\t\tif parent == \"\" {\n\t\t\tpanic(\"no acr-builder directory find on pwd\")\n\t\t}\n\t\tdir = parent\n\t}\n\treturn dir, nil\n}", "func RootDir() string {\n\treturn environ.GetValueStrOrPanic(\"ROOT_DIR\")\n}", "func getProjectRoot(t *testing.T) (rootPath string) {\n\troot, err := os.Getwd()\n\trequire.NoError(t, err, \"could not get current working directory\")\n\tfor root != \"/\" { // Walk up path to find dir containing go.mod\n\t\tif _, err := os.Stat(filepath.Join(root, \"go.mod\")); os.IsNotExist(err) {\n\t\t\troot = filepath.Dir(root)\n\t\t} else {\n\t\t\treturn root\n\t\t}\n\t}\n\tt.Fatal(\"could not find project root\")\n\treturn\n}", "func ProjectRoot() (path string) {\n\t_, err := ioutil.ReadFile(RootConfigFile)\n\tif err != nil {\n\t\tpath = setroot.Set(RootConfigFile, GlobalConfigDir())\n\t} else {\n\t\tdata, err := ioutil.ReadFile(RootConfigFile)\n\t\tif err != nil {\n\t\t\tstatuser.Error(\"Failed to read from global config file\", err, 1)\n\t\t}\n\t\tglobalConfig := struct {\n\t\t\tPath string `yaml:\"path\"`\n\t\t}{}\n\t\terr = yaml.Unmarshal(data, &globalConfig)\n\t\tif err != nil {\n\t\t\tstatuser.Error(\"Failed to parse yaml from global config file\", err, 1)\n\t\t}\n\t\tpath = globalConfig.Path\n\t}\n\treturn path\n}", "func repoRoot() (string, error) {\n\trepoRootState.once.Do(func() {\n\t\tif wsDir := os.Getenv(\"BUILD_WORKSPACE_DIRECTORY\"); wsDir != \"\" {\n\t\t\trepoRootState.dir = wsDir\n\t\t\treturn\n\t\t}\n\t\tdir, err := os.Getwd()\n\t\tif err != nil {\n\t\t\trepoRootState.err = err\n\t\t\treturn\n\t\t}\n\t\tfor {\n\t\t\t_, err := os.Stat(filepath.Join(dir, \"WORKSPACE\"))\n\t\t\tif err == nil {\n\t\t\t\trepoRootState.dir = dir\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err != os.ErrNotExist {\n\t\t\t\trepoRootState.err = err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tparent := filepath.Dir(dir)\n\t\t\tif parent == dir {\n\t\t\t\trepoRootState.err = errors.New(\"could not find workspace directory\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdir = parent\n\t\t}\n\t})\n\treturn repoRootState.dir, repoRootState.err\n}", "func RootDir() string {\n\t_, b, _, _ := runtime.Caller(0)\n\td := path.Join(path.Dir(b))\n\treturn filepath.Dir(d)\n}", "func (w *World) RootDir() string {\n\treturn w.rootdir\n}", "func GetConfigRootDir() string {\n\tconfigFile := viper.GetString(\"viper.config_file\")\n\tif configFile == \"\" {\n\t\tcwd, _ := os.Getwd()\n\t\treturn cwd\n\t}\n\n\treturn path.Dir(configFile)\n}", "func GetProjectRoot(wd string) (string, error) {\n\tsep := string(os.PathSeparator)\n\tparts := append(strings.SplitAfter(wd, sep), sep)\n\n\tfor i := len(parts) - 1; i >= 0; i-- {\n\t\tdir := filepath.Join(parts[:i]...)\n\t\tfile := filepath.Join(dir, projectFileName)\n\n\t\tif _, err := os.Stat(file); err == nil {\n\t\t\tlog.Debugf(\"Found project file at %s\", dir)\n\t\t\treturn dir, nil\n\t\t} else if os.IsNotExist(err) {\n\t\t\tcontinue\n\t\t} else {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn \"\", ErrNoProject\n}", "func (c *Container) RootDirectory() (string, error) {\n\t// The root directory of this container's runtime.\n\trootDir := fmt.Sprintf(\"/var/run/docker/runtime-%s/moby\", c.runtime)\n\t_, err := os.Stat(rootDir)\n\tif err == nil {\n\t\treturn rootDir, nil\n\t}\n\t// In docker v20+, due to https://github.com/moby/moby/issues/42345 the\n\t// rootDir seems to always be the following.\n\tconst defaultDir = \"/var/run/docker/runtime-runc/moby\"\n\t_, derr := os.Stat(defaultDir)\n\tif derr == nil {\n\t\treturn defaultDir, nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"cannot stat %q: %v or %q: %v\", rootDir, err, defaultDir, derr)\n}", "func TemplateRootDir() (string, error) {\n\tconfig, err := os.UserConfigDir()\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to get UserConfigDir\")\n\t}\n\n\ttmplPath := filepath.Join(config, \"suborbital\", \"templates\")\n\n\tif os.Stat(tmplPath); err != nil {\n\t\tif errors.Is(err, os.ErrNotExist) {\n\t\t\tif err := os.MkdirAll(tmplPath, os.ModePerm); err != nil {\n\t\t\t\treturn \"\", errors.Wrap(err, \"failed to MkdirAll template directory\")\n\t\t\t}\n\t\t} else {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to Stat template directory\")\n\t\t}\n\t}\n\n\treturn tmplPath, nil\n}", "func (c *config) BuildRoot() string {\n\trootPom := c.RootPOM()\n\tif rootPom == \"\" {\n\t\treturn \"\"\n\t}\n\treturn filepath.Dir(rootPom)\n}", "func rootDir() (string, error) {\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlastdir := \"\"\n\n\tfor lastdir != dir {\n\t\tif _, err = os.Stat(filepath.Join(dir, \".gong\")); err == nil {\n\t\t\treturn dir, nil\n\t\t}\n\n\t\tlastdir = dir\n\t\tdir = filepath.Dir(dir)\n\t}\n\n\treturn \"\", ErrNoRootDir\n}", "func ProjectDir() string {\n\treturn path.Dir(CallerDir())\n}", "func getContainerRootDir(rootDir, id string) string {\n\treturn filepath.Join(rootDir, containersDir, id)\n}", "func (pctx *processContext) ModuleRoot(ctx context.Context) (string, error) {\n\tfor dir, prevDir := pctx.workdir, \"\"; ; dir = filepath.Dir(dir) {\n\t\tif dir == prevDir {\n\t\t\treturn \"\", xerrors.Errorf(\"couldn't find a Go CDK project root at or above %s\", pctx.workdir)\n\t\t}\n\t\tprevDir = dir\n\t\tif _, err := os.Stat(filepath.Join(dir, \"go.mod\")); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif _, err := os.Stat(biomesRootDir(dir)); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\treturn dir, nil\n\t}\n}", "func (c ContainerInfo) RootDir() string {\n\treturn fmt.Sprintf(\"/var/lib/lxc/%s\", c.Name)\n}", "func getRootPath() string {\n\tp, _ := filepath.Abs(\"../../\")\n\treturn p + string(filepath.Separator)\n}", "func (o TransferJobTransferSpecPosixDataSourcePtrOutput) RootDirectory() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TransferJobTransferSpecPosixDataSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.RootDirectory\n\t}).(pulumi.StringPtrOutput)\n}", "func GetRootPath() string {\n\t_, filename, _, ok := runtime.Caller(0)\n\tif !ok {\n\t\tpanic(\"Cannot get file into for env\")\n\t}\n\n\treturn path.Dir(path.Dir(filename))\n}", "func repoRoot() string {\n\treturn filepath.Clean(trim(cmdOutput(\"git\", \"rev-parse\", \"--show-toplevel\")))\n}", "func (o TransferJobTransferSpecPosixDataSourceOutput) RootDirectory() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TransferJobTransferSpecPosixDataSource) string { return v.RootDirectory }).(pulumi.StringOutput)\n}", "func ContainerRuntimeRootPath(sandboxID, containerID string) string {\n\treturn filepath.Join(VCStorePrefix, RunStoragePath(), sandboxID, containerID)\n}", "func rootDir(path string) string {\n\tif runtime.GOOS == \"windows\" {\n\t\treturn filepath.Join(`c:\\`, path)\n\t}\n\treturn filepath.Join(\"/\", path)\n}", "func ExecutableDir() string {\n\texecutable, _ := os.Executable()\n\treturn filepath.Dir(executable)\n}", "func WorkDir() string {\n\tworkDirOnce.Do(func() {\n\t\tworkDir = os.Getenv(\"GOGS_WORK_DIR\")\n\t\tif workDir != \"\" {\n\t\t\treturn\n\t\t}\n\n\t\tworkDir = filepath.Dir(AppPath())\n\t})\n\n\treturn workDir\n}", "func getRootDir() (string, error) {\n\t//TODO: fix this!! think it's a tad dodgy!\n\tpwd, _ := os.Getwd()\n\tlog.Printf(\"[DEBUG] getRootDir pwd is: %v\", pwd)\n\n\tb := strings.Contains(pwd, rootDirName)\n\tif !b {\n\t\treturn \"\", fmt.Errorf(\"could not find '%v' root directory in %v\", rootDirName, pwd)\n\t}\n\n\ts := strings.SplitAfter(pwd, rootDirName)\n\tlog.Printf(\"[DEBUG] path(s) after splitting: %v\\n\", s)\n\n\tif len(s) < 1 {\n\t\t//expect at least one result\n\t\treturn \"\", fmt.Errorf(\"could not split out '%v' from directory in %v\", rootDirName, pwd)\n\t}\n\n\tif !strings.HasSuffix(s[0], rootDirName) {\n\t\t//the first path should end with \"probr\"\n\t\treturn \"\", fmt.Errorf(\"first path after split (%v) does not end with '%v'\", s[0], rootDirName)\n\t}\n\n\treturn s[0], nil\n}", "func GetGitDirectory() string {\n\tcurrentDirectory, _ := os.Getwd()\n\tvar projectDirectory = \"\"\n\tdirectoryParts := strings.Split(currentDirectory, string(os.PathSeparator))\n\n\tfor projectDirectory == \"\" {\n\t\tif _, err := os.Stat(filepath.Join(currentDirectory, \"/.git\")); err == nil {\n\t\t\treturn currentDirectory\n\t\t}\n\n\t\tif directoryParts[0]+\"\\\\\" == currentDirectory || currentDirectory == \"/\" {\n\t\t\treturn \"\"\n\t\t}\n\n\t\tcurrentDirectory = filepath.Dir(currentDirectory)\n\t}\n\n\treturn \"\"\n}", "func (crypto LocalCryptomatorVault) GetRootDirectory() Directory {\n\tvar dir LocalDirectory\n\n\tdir.crypto = &crypto\n\tdir.decryptedPath = \"\"\n\tdir.uuid = \"\"\n\tdir.encryptedPath = crypto.getFilePath(dir.uuid)\n\n\tdir.updateDirectory()\n\n\treturn Directory(&dir)\n}", "func RootPath() string {\n\treturn configRootPath\n}", "func GetRootlessDir() string {\n\treturn rootlessDir\n}", "func GetRuntimeDir() (string, error) {\n\treturn \"\", errors.New(\"this function is not implemented for windows\")\n}", "func (o TransferJobTransferSpecPosixDataSinkPtrOutput) RootDirectory() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TransferJobTransferSpecPosixDataSink) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.RootDirectory\n\t}).(pulumi.StringPtrOutput)\n}", "func (s *State) GetContainerRoot() (string, error) {\n\tspec, err := s.LoadSpec()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar containerRoot string\n\tif spec.Root != nil {\n\t\tcontainerRoot = spec.Root.Path\n\t}\n\n\tif filepath.IsAbs(containerRoot) {\n\t\treturn containerRoot, nil\n\t}\n\n\treturn filepath.Join(s.Bundle, containerRoot), nil\n}", "func (o *Project) GetRootDirectoryOk() (*string, bool) {\n\tif o == nil || o.RootDirectory == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RootDirectory, true\n}", "func (o TransferJobTransferSpecPosixDataSinkOutput) RootDirectory() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TransferJobTransferSpecPosixDataSink) string { return v.RootDirectory }).(pulumi.StringOutput)\n}", "func ContainerConfigurationRootPath(sandboxID, containerID string) string {\n\treturn filepath.Join(VCStorePrefix, ConfigStoragePath(), sandboxID, containerID)\n}", "func WorkDir() (string, error) {\n\texecPath, err := ExecPath()\n\treturn path.Dir(strings.Replace(execPath, \"\\\\\", \"/\", -1)), err\n}", "func (l *Loader) AppRoot() string {\n\tif appRoot, ok := l.lookUp(l.EnvironmentPrefix() + _appRoot); ok {\n\t\treturn appRoot\n\t}\n\n\tif cwd, err := os.Getwd(); err != nil {\n\t\tpanic(fmt.Sprintf(\"Unable to get the current working directory: %q\", err.Error()))\n\t} else {\n\t\treturn cwd\n\t}\n}", "func ChdirProjectRoot() {\n\tpath := ProjectRoot()\n\terr := os.Chdir(path)\n\tif err != nil {\n\t\tstatuser.Error(\"Failed to change directory to project root\", err, 1)\n\t}\n}", "func solveProjRoot(start string) (string, error) {\n\tcwd, err := filepath.Abs(start)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor f := cwd; f != \"\"; cwd, f = path.Split(cwd) {\n\t\tcwd = filepath.Clean(cwd)\n\t\t//log.Println(\"Cwd\", cwd)\n\t\tfor _, dir := range BoilerDir { // Several possibilities\n\t\t\tboilerpath := filepath.Join(cwd, dir)\n\t\t\tst, err := os.Stat(boilerpath)\n\t\t\tif err == nil && st.IsDir() { // ignore error\n\t\t\t\treturn cwd, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn start, nil\n}", "func GetRuntimeDirectory(subDir string) (path string) {\n\tsubDir = \"/\" + subDir + \"/\"\n\texecutableDirectory, _ := filepath.Abs(filepath.Dir(os.Args[0]))\n\tpath = filepath.Clean(executableDirectory+subDir) + string(filepath.Separator)\n\tif _, err := os.Stat(path); err == nil {\n\t\treturn\n\t}\n\tdevelopDirectory := build.Default.GOPATH + ProjectDirectory\n\tpath = filepath.Clean(developDirectory+subDir) + string(filepath.Separator)\n\tif _, err := os.Stat(path); err == nil {\n\t\treturn\n\t}\n\tpath = \"\"\n\treturn\n}", "func (r *Repository) RootPath() string {\n\treturn r.root\n}", "func GetExecDirectory() string {\n\tex, err := os.Executable()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\texPath := filepath.Dir(ex)\n\treturn exPath\n}", "func GoModRootPath(path string) (string, error) {\n\tif path == \"\" {\n\t\treturn \"\", &PathIsNotSetError{}\n\t}\n\n\tpath = filepath.Clean(path)\n\n\tfor {\n\t\tif fi, err := os.Stat(filepath.Join(path, goModFilename)); err == nil && !fi.IsDir() {\n\t\t\treturn path, nil\n\t\t}\n\n\t\td := filepath.Dir(path)\n\t\tif d == path {\n\t\t\tbreak\n\t\t}\n\n\t\tpath = d\n\t}\n\n\treturn \"\", nil\n}", "func ResolveRootDir(p string) string {\n\tparts := strings.Split(path.Dir(p), \"/\")\n\tvar roots []string\n\tfor _, part := range parts {\n\t\tif HasGlobChar(part) {\n\t\t\tbreak\n\t\t}\n\t\troots = append(roots, part)\n\t}\n\n\tif len(roots) == 0 {\n\t\treturn \"\"\n\t}\n\n\treturn strings.Join(roots, \"/\")\n}", "func Root(directory string) (string, error) {\n\tcmd := exec.Command(\"git\", \"rev-parse\", \"--show-toplevel\")\n\tcmd.Dir = filepath.Dir(directory)\n\tresult, err := cmd.CombinedOutput()\n\ttrimmed := strings.TrimSpace(string(result))\n\tif err != nil {\n\t\treturn \"\", errors.New(trimmed)\n\t}\n\treturn strings.TrimSpace(trimmed), nil\n}", "func GetProjectPath() string {\n\tb, err := exec.Command(\"go\", \"env\", \"GOPATH\").CombinedOutput()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tprojectPath := \"\"\n\tfor _, p := range filepath.SplitList(strings.TrimSpace(string(b))) {\n\t\tp = filepath.Join(p, filepath.FromSlash(\"/src/github.com/YuheiNakasaka/sandbox-golang\"))\n\t\tif _, err = os.Stat(p); err == nil {\n\t\t\tprojectPath = p\n\t\t\tbreak\n\t\t}\n\t}\n\treturn projectPath\n}", "func GetExecDirectory() (string, error) {\n\treturn filepath.Abs(filepath.Dir(os.Args[0]))\n}", "func (o BuildSpecRuntimeOutput) WorkDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildSpecRuntime) *string { return v.WorkDir }).(pulumi.StringPtrOutput)\n}", "func (c *ConfigFile) ConfigDir() string {\n\trw, err := NewConfigReadWriter(c.version)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn rw.ConfigDir(c)\n}", "func (gc *GlobalOpts) OutputDir() string {\n\tyear, month, day := gc.StartTime.Date()\n\thour, min, sec := gc.StartTime.Clock()\n\tyearMonthDay := fmt.Sprintf(\"%d%d%d\", year, month, day)\n\ttimestamp := fmt.Sprintf(\"%d%d%d\", hour, min, sec)\n\n\treturn filepath.Join(gc.InstallDir, \"output\", yearMonthDay, timestamp)\n}", "func (conf *Config) GetExecRoot() string {\n\treturn conf.ExecRoot\n}", "func (conf *Config) GetExecRoot() string {\n\treturn conf.ExecRoot\n}", "func (s *DjangoEngine) RootDir(root string) *DjangoEngine {\n\tif s.fs != nil && root != \"\" && root != \"/\" && root != \".\" && root != s.rootDir {\n\t\tsub, err := fs.Sub(s.fs, s.rootDir)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\ts.fs = sub // here so the \"middleware\" can work.\n\t}\n\n\ts.rootDir = filepath.ToSlash(root)\n\treturn s\n}", "func FindRootRepoPath() (string, error) {\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Error getting pwd: \", err)\n\t\tos.Exit(1)\n\t}\n\n\tparts := strings.SplitAfter(pwd, string(os.PathSeparator))\n\tfor i, _ := range parts {\n\t\ttestPath := path.Join(parts[:i+1]...)\n\t\tif IsRepo(testPath) {\n\t\t\treturn testPath, nil\n\t\t}\n\t}\n\n\t// Return pwd in case we're cloning into pwd.\n\treturn pwd, fmt.Errorf(\"No .git found in %s or any parent dir.\", pwd)\n}", "func BinDir() string {\n\treturn binDir\n}", "func (builder *Builder) GetBuildDir(build string) string {\n\treturn filepath.Join(builder.BuildDir, build)\n}", "func GameDir() (string, error) {\n\texePath, err := os.Executable()\n\tif err != nil {\n\t\treturn \"\", errors.WithStack(err)\n\t}\n\tgameDir := filepath.Dir(exePath)\n\treturn gameDir, nil\n}", "func (o BuildRunStatusBuildSpecRuntimeOutput) WorkDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildRunStatusBuildSpecRuntime) *string { return v.WorkDir }).(pulumi.StringPtrOutput)\n}", "func (c *Compactor) compactRootDir() string {\n\treturn filepath.Join(c.compactorCfg.DataDir, \"compact\")\n}", "func (config *Config) WorkingDir() string {\n\tconfigPath, err := filepath.Abs(config.configPath)\n\tif err != nil {\n\t\tlog.Fatalf(\"config error: unable to find config file (%s):\\n%v\\n\", config.configPath, err)\n\t}\n\n\tconfigPath, err = filepath.EvalSymlinks(configPath)\n\tif err != nil {\n\t\tlog.Fatalf(\"config error: unable to remove symbolic links for filepath (%s):\\n%v\\n\", configPath, err)\n\t}\n\n\treturn filepath.Join(filepath.Dir(configPath), config.Path)\n}", "func (o BuildSpecRuntimePtrOutput) WorkDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildSpecRuntime) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.WorkDir\n\t}).(pulumi.StringPtrOutput)\n}", "func projectDir() string {\n\t_, filename, _, _ := runtime.Caller(1)\n\treturn path.Dir(filename)\n}", "func SandboxRuntimeRootPath(id string) string {\n\treturn filepath.Join(VCStorePrefix, RunStoragePath(), id)\n}", "func WorkDir() string { return workDir }", "func SandboxConfigurationRootPath(id string) string {\n\treturn filepath.Join(VCStorePrefix, ConfigStoragePath(), id)\n}", "func (e *Engine) WorkDir() string {\n\treturn e.dirs.work\n}", "func ConfigDir() string {\n\treturn configDir\n}", "func getSandboxRootDir(rootDir, id string) string {\n\treturn filepath.Join(rootDir, sandboxesDir, id)\n}", "func (p *Project) Root() string {\n\treturn p.root\n}", "func (e *Engine) SourceDir() string {\n\treturn e.dirs.src\n}", "func (o BuildRunStatusBuildSpecRuntimePtrOutput) WorkDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildRunStatusBuildSpecRuntime) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.WorkDir\n\t}).(pulumi.StringPtrOutput)\n}", "func rootDir(dir string) string {\n\tpkgIndex := -1\n\tparts := strings.Split(dir, string(filepath.Separator))\n\tfor i, d := range parts {\n\t\tif d == \"pkg\" {\n\t\t\tpkgIndex = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif pkgIndex == -1 {\n\t\treturn dir\n\t}\n\treturn strings.Join(parts[:pkgIndex], string(filepath.Separator))\n}", "func (r Rust) SourceDirectory() string { return \"src\" }", "func (info AppInfo) ConfigRoot() (string, error) {\n\t// In Mac, there are no separate config and data directories\n\treturn info.DataRoot()\n}", "func getWorkingDir() (string, error) {\n\texPath, err := os.Executable()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Dir(exPath), nil\n}", "func (this *Path) WorkDir() string {\n\texecPath := this.ExecPath()\n\treturn path.Dir(strings.Replace(execPath, \"\\\\\", \"/\", -1))\n}", "func GetTestAssetsDir() string {\n\n\twd := testutil.RunOnRootFolder()\n\twd = wd + \"/test_assets\"\n\n\t_, err := os.Stat(wd)\n\tif os.IsNotExist(err) {\n\t\tio.CheckErr(err)\n\t}\n\n\treturn wd\n}", "func (c *TestContext) FindRepoRoot() string {\n\tgoMod := c.findRepoFile(\"go.mod\")\n\treturn filepath.Dir(goMod)\n}", "func GetBasePath() string {\n\tvendorDir := filepath.Join(GetHomeDir(), \".autoai\")\n\tCreateFolderIfNotExist(vendorDir)\n\ttargetDir := filepath.Join(vendorDir, \"aid\")\n\treturn targetDir\n}", "func (c AppConfig) DictionaryDir() string {\n\tval, ok := c.ConfigVars[\"DictionaryDir\"]\n\tif ok {\n\t\treturn c.ProjectHome + \"/\" + val\n\t}\n\treturn c.ProjectHome + \"/data\"\n}", "func RootPkg(pkgs []*pkgs.Package) string {\n\troot := \"\"\n\tfor _, pkg := range pkgs {\n\t\tif root == \"\" {\n\t\t\troot = pkg.PkgPath\n\t\t} else {\n\t\t\troot = commonPrefix(root, pkg.PkgPath)\n\t\t}\n\t}\n\treturn root\n}", "func (e *Environment) GetRoot() Namespace {\n\treturn e.Get(RootDomain, func() Namespace {\n\t\treturn e.New(RootDomain)\n\t})\n}", "func (r *Ruby) BaseDir() string {\n\treturn path.Join(BaseDir(), \"ruby\")\n}", "func (b *KRMBlueprintTest) GetBuildDir() string {\n\tif b.buildDir == \"\" {\n\t\tb.t.Fatalf(\"unable to get a valid build directory\")\n\t}\n\n\treturn b.buildDir\n}", "func (p *Process) WorkDir() string {\n\treturn p.Builder.WorkDir\n}", "func (r *RelativePath) RootPath() string {\n\treturn \"/\" + strings.Join(r.stack[:r.limit], \"/\")\n}", "func (o BuildRunStatusBuildSpecSourcePtrOutput) ContextDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildRunStatusBuildSpecSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ContextDir\n\t}).(pulumi.StringPtrOutput)\n}", "func (c AppConfig) IndexDir() string {\n\treturn c.ProjectHome + \"/index\"\n}", "func (c DirCollector) Root() *VDir {\n\t// do we have a single slashed directory path /\n\tif c.Has(\"/\") {\n\t\treturn c.Get(\"/\")\n\t}\n\n\t// do we have a single dot directory path .\n\tif c.Has(\".\") {\n\t\treturn c.Get(\".\")\n\t}\n\n\t//else fallback to search for root boolean set\n\tvar vdir *VDir\n\n\tfor _, dir := range c {\n\t\tif dir.root {\n\t\t\tvdir = dir\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn vdir\n}", "func (o BuildRunStatusBuildSpecSourceOutput) ContextDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildRunStatusBuildSpecSource) *string { return v.ContextDir }).(pulumi.StringPtrOutput)\n}", "func (app *AppBuilder) WorkingDir() *AppBuilder {\n\tnowPath, _ := os.Getwd()\n\tapp.workingDir = path.Join(nowPath, app.projectName)\n\treturn app\n}", "func (o *EditorSettings) GetProjectSettingsDir() gdnative.String {\n\t//log.Println(\"Calling EditorSettings.GetProjectSettingsDir()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"EditorSettings\", \"get_project_settings_dir\")\n\n\t// Call the parent method.\n\t// String\n\tretPtr := gdnative.NewEmptyString()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewStringFromPointer(retPtr)\n\treturn ret\n}", "func GetRootPath(name string) string {\n\tfor prefix, num := range setting.RootPathPairs {\n\t\tif strings.HasPrefix(name, prefix) {\n\t\t\treturn joinPath(name, num)\n\t\t}\n\t}\n\n\tif strings.HasPrefix(name, \"gopkg.in\") {\n\t\tm := gopkgPathPattern.FindStringSubmatch(strings.TrimPrefix(name, \"gopkg.in\"))\n\t\tif m == nil {\n\t\t\treturn name\n\t\t}\n\t\tuser := m[1]\n\t\trepo := m[2]\n\t\tif len(user) == 0 {\n\t\t\tuser = \"go-\" + repo\n\t\t}\n\t\treturn path.Join(\"gopkg.in\", user, repo+\".\"+m[3])\n\t}\n\treturn name\n}", "func getConfigFolderPath() string {\n\tsep := string(filepath.Separator)\n\twd, _ := os.Getwd()\n\n\twdPath := strings.Split(wd, sep)\n\tindexOfSrc := lastIndexOf(wdPath, \"src\")\n\tindexOfBin := lastIndexOf(wdPath, \"bin\")\n\n\tcfgPath := \"\"\n\tvar pathEl []string\n\tif indexOfBin > -1 && indexOfBin > indexOfSrc {\n\t\tpathEl = wdPath[:indexOfBin] // take up to bin (exclusive)\n\t} else if indexOfSrc > -1 {\n\t\tpathEl = wdPath[:indexOfSrc] // take up to src (exclusive)\n\t}\n\n\tif len(pathEl) > 0 {\n\t\tcfgPath = strings.Join(pathEl, sep) + sep\n\t\tcfgPath += \"config\" + sep\n\t}\n\n\treturn cfgPath\n}", "func (o BuildSpecSourceOutput) ContextDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildSpecSource) *string { return v.ContextDir }).(pulumi.StringPtrOutput)\n}" ]
[ "0.6979527", "0.61517763", "0.6130268", "0.61221904", "0.60577995", "0.601363", "0.60060436", "0.59370095", "0.59113044", "0.58437407", "0.5744886", "0.5708629", "0.5654415", "0.5567339", "0.55459046", "0.55438185", "0.5506007", "0.54859716", "0.5441813", "0.5428665", "0.54114777", "0.5363497", "0.53282374", "0.52893496", "0.5261283", "0.52528065", "0.5247457", "0.5236701", "0.5230631", "0.52301407", "0.5220869", "0.52022225", "0.5193572", "0.5190122", "0.5148385", "0.5141817", "0.51367027", "0.50940627", "0.5092368", "0.50705034", "0.50698817", "0.50668967", "0.5045427", "0.5012539", "0.4998328", "0.49980885", "0.4970547", "0.4956446", "0.49559978", "0.49377823", "0.49289218", "0.49278304", "0.4926596", "0.49079785", "0.48912764", "0.48774734", "0.48774734", "0.48665875", "0.4865658", "0.48589322", "0.48574874", "0.4853114", "0.48459595", "0.48203334", "0.48033655", "0.47919282", "0.47681642", "0.47581714", "0.4757688", "0.47460526", "0.47431841", "0.47399518", "0.47335914", "0.47326234", "0.47284523", "0.47259086", "0.47197244", "0.47119376", "0.47048822", "0.4701461", "0.46984273", "0.4697064", "0.46876472", "0.46855816", "0.46667156", "0.4645184", "0.4641102", "0.463975", "0.4632821", "0.46174318", "0.46001515", "0.45975238", "0.45812526", "0.45743835", "0.4558528", "0.455495", "0.45548886", "0.4550104", "0.45474738", "0.45432597" ]
0.8156449
0
initializes a queryTerm from a given Filter
func (dg *dependencyGraph) makeQueryTerm(t sparql.Triple) *queryTerm { qt := &queryTerm{ t, []*queryTerm{}, []string{}, } if qt.Subject.IsVariable() { dg.variables[qt.Subject.String()] = false qt.variables = append(qt.variables, qt.Subject.String()) } if qt.Predicates[0].Predicate.IsVariable() { dg.variables[qt.Predicates[0].Predicate.String()] = false qt.variables = append(qt.variables, qt.Predicates[0].Predicate.String()) } if qt.Object.IsVariable() { dg.variables[qt.Object.String()] = false qt.variables = append(qt.variables, qt.Object.String()) } return qt }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (f *FilterOp) Term(field string, value interface{}) *FilterOp {\n\tif len(f.TermMap) == 0 {\n\t\tf.TermMap = make(map[string]interface{})\n\t}\n\n\tf.TermMap[field] = value\n\treturn f\n}", "func NewFilterType(t string) (FilterType, error) {\n\t_, ok := fts[t]\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"unsupported FilterType: %s\", t)\n\t}\n\treturn FilterType(t), nil\n}", "func NewFilter() *Builder {\n\treturn &Builder{}\n}", "func NewFilter(query string) (*Filter, error) {\n\tq, err := parse.ParseFilter(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Recursively walk the filter expression, \"compiling\" it into\n\t// a filterFn.\n\t//\n\t// We cache extractor functions since it's common to see the\n\t// same key multiple times.\n\textractors := make(map[string]extractor)\n\tvar walk func(q parse.Filter) (filterFn, error)\n\twalk = func(q parse.Filter) (filterFn, error) {\n\t\tvar err error\n\t\tswitch q := q.(type) {\n\t\tcase *parse.FilterOp:\n\t\t\tsubs := make([]filterFn, len(q.Exprs))\n\t\t\tfor i, sub := range q.Exprs {\n\t\t\t\tsubs[i], err = walk(sub)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn filterOp(q.Op, subs), nil\n\n\t\tcase *parse.FilterMatch:\n\t\t\tif q.Key == \".unit\" {\n\t\t\t\treturn func(res *benchfmt.Result) (mask, bool) {\n\t\t\t\t\t// Find the units this matches.\n\t\t\t\t\tm := newMask(len(res.Values))\n\t\t\t\t\tfor i := range res.Values {\n\t\t\t\t\t\tif q.MatchString(res.Values[i].Unit) || (res.Values[i].OrigUnit != \"\" && q.MatchString(res.Values[i].OrigUnit)) {\n\t\t\t\t\t\t\tm.set(i)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn m, false\n\t\t\t\t}, nil\n\t\t\t}\n\n\t\t\tif q.Key == \".config\" {\n\t\t\t\treturn nil, &parse.SyntaxError{query, q.Off, \".config is only allowed in projections\"}\n\t\t\t}\n\n\t\t\t// Construct the extractor.\n\t\t\text := extractors[q.Key]\n\t\t\tif ext == nil {\n\t\t\t\text, err = newExtractor(q.Key)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, &parse.SyntaxError{query, q.Off, err.Error()}\n\t\t\t\t}\n\t\t\t\textractors[q.Key] = ext\n\t\t\t}\n\n\t\t\t// Make the filter function.\n\t\t\treturn func(res *benchfmt.Result) (mask, bool) {\n\t\t\t\treturn nil, q.Match(ext(res))\n\t\t\t}, nil\n\t\t}\n\t\tpanic(fmt.Sprintf(\"unknown query node type %T\", q))\n\t}\n\tf, err := walk(q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Filter{f}, nil\n}", "func (s *Searcher) Filter(filter *Filter, values *resource.MetaValues) *Searcher {\n\tnewSearcher := s.clone()\n\tif newSearcher.filters == nil {\n\t\tnewSearcher.filters = map[*Filter]*resource.MetaValues{}\n\t}\n\tnewSearcher.filters[filter] = values\n\treturn newSearcher\n}", "func (q ConstantScoreQuery) Filter(filter Filter) ConstantScoreQuery {\n\tq.query = nil\n\tq.filter = filter\n\treturn q\n}", "func InitFilter(opts []string) (filters.Spec, error) {\n\treturn noopSpec{}, nil\n}", "func NewFilter(config *config.Config) (filter *Filter) {\n\tfilter = new(Filter)\n\tfilter.Config = config\n\treturn\n}", "func (r ApiGetHyperflexServiceAuthTokenListRequest) Filter(filter string) ApiGetHyperflexServiceAuthTokenListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (r ApiGetHyperflexNodeListRequest) Filter(filter string) ApiGetHyperflexNodeListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func NewFilter(t FilterOperator) *Filter {\n\treturn &Filter{\n\t\toperator: t,\n\t}\n}", "func NewPathFilter(filterPath string, describer Describer) *PathFilter {\n\n\t// handle special case where you get `/namespace/default` by making\n\t// it optional in the regex.\n\tif filterPath == \"/\" {\n\t\tfilterPath += \"?\"\n\t}\n\tre := regexp.MustCompile(fmt.Sprintf(`^(/namespace/(?P<namespace>[^/]+))?%s$`, filterPath))\n\treturn &PathFilter{\n\t\tre: re,\n\t\tfilterPath: filterPath,\n\t\tDescriber: describer,\n\t}\n}", "func NewFilter() Filter {\n\treturn &filter{}\n}", "func (c *Client) NewFilter(filter Filter) (*QuantityResponse, error) {\n\trequest := c.newRequest(EthNewFilter)\n\n\trequest.Params = []interface{}{\n\t\tfilter,\n\t}\n\tresponse := &QuantityResponse{}\n\n\treturn response, c.send(request, response)\n}", "func (f *FilterOp) Terms(field string, executionMode TermExecutionMode, values ...interface{}) *FilterOp {\n\t//You can only have one terms in a filter\n\tf.TermsMap = make(map[string]interface{})\n\n\tif executionMode != \"\" {\n\t\tf.TermsMap[\"execution\"] = executionMode\n\t}\n\n\tf.TermsMap[field] = values\n\n\treturn f\n}", "func NewFilter(filter string) (Filter, error) {\n\tconst errorString = \"Invalid filter: %s\"\n\n\ttokens := strings.Split(filter, \"/\")\n\tif len(tokens) < 2 {\n\t\treturn nil, fmt.Errorf(errorString, filter)\n\t}\n\tif tokens[0] != \"\" {\n\t\treturn nil, fmt.Errorf(errorString, filter)\n\t}\n\tswitch tokens[1] {\n\tcase \"all-events\":\n\t\tif len(tokens) == 2 {\n\t\t\treturn &AllEventsFilter{}, nil\n\t\t}\n\tcase \"user\":\n\t\tif len(tokens) == 3 {\n\t\t\treturn &UserFilter{user: tokens[2]}, nil\n\t\t}\n\tcase \"problem\":\n\t\tif len(tokens) == 3 {\n\t\t\treturn &ProblemFilter{problem: tokens[2]}, nil\n\t\t}\n\tcase \"problemset\":\n\t\tproblemset, err := strconv.ParseInt(tokens[2], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tswitch len(tokens) {\n\t\tcase 3:\n\t\t\treturn &ProblemsetFilter{problemset: problemset}, nil\n\t\tcase 4:\n\t\t\treturn &ProblemsetFilter{problemset: problemset, token: tokens[3]}, nil\n\t\t}\n\tcase \"contest\":\n\t\tswitch len(tokens) {\n\t\tcase 3:\n\t\t\treturn &ContestFilter{contest: tokens[2]}, nil\n\t\tcase 4:\n\t\t\treturn &ContestFilter{contest: tokens[2], token: tokens[3]}, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(errorString, filter)\n}", "func NewFilter(prop, op string, val interface{}) *Filter {\n\treturn &Filter{\n\t\tProp: prop,\n\t\tOperator: op,\n\t\tValue: val,\n\t}\n}", "func New() filters.Spec {\n\treturn filter{}\n}", "func initFilter(node *uast.Node, xpath string) (*C.char, C.uintptr_t, func()) {\n\tfindMutex.Lock()\n\tcquery := spool.getCstring(xpath)\n\tptr := nodeToPtr(node)\n\n\treturn cquery, ptr, func() {\n\t\tspool.release()\n\t\tkpool = make(map[*uast.Node][]string)\n\t\tfindMutex.Unlock()\n\t}\n}", "func (r ApiGetHyperflexLunListRequest) Filter(filter string) ApiGetHyperflexLunListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (r ApiGetHyperflexHwCatalogListRequest) Filter(filter string) ApiGetHyperflexHwCatalogListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (r *BaseRequest) Filter(value string) {\n\tif r.query == nil {\n\t\tr.query = url.Values{}\n\t}\n\tr.query.Add(\"$filter\", value)\n}", "func NewFilter(field Field, value interface{}, operation Operation) *Filter {\n\treturn &Filter{\n\t\tField: field,\n\t\tValue: value,\n\t\tOperation: operation,\n\t}\n}", "func (a *AliasAddAction) Filter(filter Query) *AliasAddAction {\n\ta.filter = filter\n\treturn a\n}", "func (r ApiGetHyperflexAppCatalogListRequest) Filter(filter string) ApiGetHyperflexAppCatalogListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func ExampleTerm_Filter() {\n\t// Fetch the row from the database\n\tres, err := DB(\"examples\").Table(\"users\").Filter(map[string]interface{}{\n\t\t\"age\": 30,\n\t}).Run(session)\n\tif err != nil {\n\t\tfmt.Print(err)\n\t\treturn\n\t}\n\tdefer res.Close()\n\n\t// Scan query result into the person variable\n\tvar users []interface{}\n\terr = res.All(&users)\n\tif err != nil {\n\t\tfmt.Printf(\"Error scanning database result: %s\", err)\n\t\treturn\n\t}\n\tfmt.Printf(\"%d users\", len(users))\n\n\t// Output: 2 users\n}", "func NewFilter(rule string) BaseFilter {\n\t// set the appropriate comparison function based on which\n\t// operator is given\n\tvar op string\n\tvar isregex, negate bool\n\n\tif strings.Contains(rule, \"!=\") {\n\t\top = \"!=\"\n\t\tnegate = true\n\t\tisregex = false\n\t} else if strings.Contains(rule, \"=\") {\n\t\top = \"=\"\n\t\tnegate = false\n\t\tisregex = false\n\t} else if strings.Contains(rule, \"!~\") {\n\t\top = \"!~\"\n\t\tnegate = true\n\t\tisregex = true\n\t} else if strings.Contains(rule, \"~\") {\n\t\top = \"~\"\n\t\tnegate = false\n\t\tisregex = true\n\t} else {\n\t\tfmt.Println(\"[ERROR] not sure how to parse rule: \" + rule)\n\t\tos.Exit(1)\n\t}\n\n\t// split the rule into fields/values and set the appropriate fields\n\t// in the new filter\n\topsides := strings.Split(rule, op)\n\tif len(opsides) != 2 {\n\t\tfmt.Println(\"[ERROR] rule contains too many boolean operators: \" + rule)\n\t\tos.Exit(1)\n\t}\n\n\tfields := strings.Split(opsides[0], \",\")\n\tvalues := strings.Split(opsides[1], \",\")\n\n\t// choose the comparison operator based on whether or not to negate\n\t// the filter\n\tif isregex {\n\t\tf := &RegexFilter{}\n\n\t\t// compile the user-supplied regexes\n\t\tregex_values := make([]*regexp.Regexp, len(values))\n\t\tfor i, v := range values {\n\t\t\tmy_regex, err := regexp.Compile(v)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"[ERROR] unable to compile POSIX regex: \" + v)\n\t\t\t\tos.Exit(1)\n\t\t\t} else {\n\t\t\t\tregex_values[i] = my_regex\n\t\t\t}\n\t\t}\n\n\t\t// set the fields and values of the filter\n\t\tf.fields = fields\n\t\tf.values = regex_values\n\n\t\t// set the compare function based on whether or not negation should be used\n\t\tif negate {\n\t\t\tf.compare_function = func(a string, re *regexp.Regexp) bool {\n\t\t\t\treturn !re.MatchString(a)\n\t\t\t}\n\t\t} else {\n\t\t\tf.compare_function = func(a string, re *regexp.Regexp) bool {\n\t\t\t\treturn re.MatchString(a)\n\t\t\t}\n\t\t}\n\n\t\treturn BaseFilter(f)\n\t} else {\n\t\tf := &Filter{}\n\n\t\t// set the fields and values of the filter\n\t\tf.fields = fields\n\t\tf.values = values\n\n\t\t// set the compare function based on whether or not negation should be used\n\t\tif negate {\n\t\t\tf.compare_function = func(a string, b string) bool {\n\t\t\t\treturn (a != b)\n\t\t\t}\n\t\t} else {\n\t\t\tf.compare_function = func(a string, b string) bool {\n\t\t\t\treturn (a == b)\n\t\t\t}\n\t\t}\n\n\t\treturn BaseFilter(f)\n\t}\n}", "func New() *Filter {\n\treturn &Filter{\n\t\tinclude: []Matcher{},\n\t\texclude: []Matcher{},\n\t\tlimit: 0,\n\t}\n}", "func (r ApiGetHyperflexClusterListRequest) Filter(filter string) ApiGetHyperflexClusterListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (r ApiGetHyperflexLicenseListRequest) Filter(filter string) ApiGetHyperflexLicenseListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func NewFilter() *Filterdata {\r\n\tfilter := new(Filterdata)\r\n\tvar err error\r\n\tfilter.regRData, err = lru.New(P2pCacheTxSize)\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\treturn filter\r\n}", "func NewFilter(action Action, rules ...Rule) *List { return &List{rules: rules, action: action} }", "func NewFilter(mod *elf.Module) *Filter {\n\treturn &Filter{\n\t\tmod: mod,\n\t\tconfig: mod.Map(defaultFilterConfig),\n\t\tfilter: mod.Map(defaultFilterName),\n\t\trules: make([]*FilterRule, 0),\n\t}\n}", "func NewFilter() Filter {\n\treturn &msfilter{parameters: make(map[string]interface{})}\n}", "func (r ApiGetHyperflexTargetListRequest) Filter(filter string) ApiGetHyperflexTargetListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (r ApiGetHyperflexConfigResultEntryListRequest) Filter(filter string) ApiGetHyperflexConfigResultEntryListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func newFilter() *tasks.DateFiltering {\n return &tasks.DateFiltering{}\n}", "func (s *FieldSort) Filter(filter Query) *FieldSort {\n\ts.filter = filter\n\treturn s\n}", "func NewTerm(v Value) *Term {\n\treturn &Term{\n\t\tValue: v,\n\t}\n}", "func (b *QueryBuilder) Init(ctx context.Context) {\n\tb.Ctx = ctx\n}", "func NewFilter(ctx *pulumi.Context,\n\tname string, args *FilterArgs, opts ...pulumi.ResourceOption) (*Filter, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.FilterAction == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'FilterAction'\")\n\t}\n\tif args.FilterCriteria == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'FilterCriteria'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Filter\n\terr := ctx.RegisterResource(\"aws-native:inspectorv2:Filter\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (r ApiGetHyperflexSoftwareDistributionEntryListRequest) Filter(filter string) ApiGetHyperflexSoftwareDistributionEntryListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (r ApiGetHyperflexServerModelListRequest) Filter(filter string) ApiGetHyperflexServerModelListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func BindFilter(r *http.Request, filter Filter) (err error) {\n\tdecoder := schema.NewDecoder()\n\tif err = decoder.Decode(filter, r.URL.Query()); err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func (o *StandardOptions) NewFilter(agent string) (*protocol.Filter, error) {\n\treturn filter.NewFilter(\n\t\tfilter.FactFilter(o.FactFilter...),\n\t\tfilter.AgentFilter(o.AgentFilter...),\n\t\tfilter.ClassFilter(o.ClassFilter...),\n\t\tfilter.IdentityFilter(o.IdentityFilter...),\n\t\tfilter.CombinedFilter(o.CombinedFilter...),\n\t\tfilter.CompoundFilter(o.CompoundFilter),\n\t\tfilter.AgentFilter(agent),\n\t)\n}", "func (r ApiGetResourcepoolUniverseListRequest) Filter(filter string) ApiGetResourcepoolUniverseListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func SearchTerm(query url.Values, m martini.Context) {\n\tp := SearchParameter{}\n\tp.Search = query.Get(\"search\")\n\tp.Success = p.Search != \"\"\n\n\tm.Map(p)\n}", "func NewFilter() *Filter {\n\treturn new(Filter)\n}", "func newFilterView(gui *gocui.Gui) (controller *Filter) {\n\tcontroller = new(Filter)\n\n\tcontroller.filterEditListeners = make([]FilterEditListener, 0)\n\n\t// populate main fields\n\tcontroller.name = \"filter\"\n\tcontroller.gui = gui\n\tcontroller.labelStr = \"Path Filter: \"\n\tcontroller.hidden = true\n\n\tcontroller.requestedHeight = 1\n\n\treturn controller\n}", "func (fq *FactQuery) Filter() *FactFilter {\n\treturn &FactFilter{fq}\n}", "func NewDataFilter(s string) *DataFilter {\n\tkeywords := strings.Split(s, \" \")\n\treturn &DataFilter{ keywords: keywords }\n}", "func (o _KeyColumnUsageObjs) newFilter(name, op string, params ...interface{}) gmq.Filter {\n\tif strings.ToUpper(op) == \"IN\" {\n\t\treturn gmq.InFilter(name, params)\n\t}\n\treturn gmq.UnitFilter(name, op, params[0])\n}", "func (r ApiGetHyperflexConfigResultListRequest) Filter(filter string) ApiGetHyperflexConfigResultListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (r ApiGetHyperflexSoftwareDistributionVersionListRequest) Filter(filter string) ApiGetHyperflexSoftwareDistributionVersionListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (m *QueryMatch)WithZeroTermsQuery(){\n\tm.Options[\"zero_terms_query\"]=\"all\"\n}", "func (r ApiGetIqnpoolUniverseListRequest) Filter(filter string) ApiGetIqnpoolUniverseListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (r ApiDatacentersLansGetRequest) Filter(key string, value string) ApiDatacentersLansGetRequest {\n\tfilterKey := fmt.Sprintf(FilterQueryParam, key)\n\tr.filters[filterKey] = []string{value}\n\treturn r\n}", "func (r ApiGetHyperflexHxdpVersionListRequest) Filter(filter string) ApiGetHyperflexHxdpVersionListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func SetTerm(t ...*Term) *Term {\n\tset := NewSet(t...)\n\treturn &Term{\n\t\tValue: set,\n\t}\n}", "func (r ApiGetHyperflexVolumeListRequest) Filter(filter string) ApiGetHyperflexVolumeListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (r ApiGetHyperflexHealthCheckDefinitionListRequest) Filter(filter string) ApiGetHyperflexHealthCheckDefinitionListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (r ApiGetHyperflexAlarmListRequest) Filter(filter string) ApiGetHyperflexAlarmListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (r ApiDatacentersGetRequest) Filter(key string, value string) ApiDatacentersGetRequest {\n\tfilterKey := fmt.Sprintf(FilterQueryParam, key)\n\tr.filters[filterKey] = []string{value}\n\treturn r\n}", "func (r ApiGetHyperflexHealthListRequest) Filter(filter string) ApiGetHyperflexHealthListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (r ApiGetHyperflexInitiatorGroupListRequest) Filter(filter string) ApiGetHyperflexInitiatorGroupListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (r ApiGetHyperflexServerFirmwareVersionEntryListRequest) Filter(filter string) ApiGetHyperflexServerFirmwareVersionEntryListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (ftq *FactTypeQuery) Filter() *FactTypeFilter {\n\treturn &FactTypeFilter{ftq}\n}", "func (r ApiGetHyperflexDatastoreStatisticListRequest) Filter(filter string) ApiGetHyperflexDatastoreStatisticListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (sfr *stateFilterRunner) Init(\n\tcriteria criterias.Criteriable,\n\tconnection *golastic.Connection,\n\tinfo ...infos.Informable,\n) (Runnerable, error) {\n\tif err := sfr.BaseInit(criteria, connection, info...); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsfr.criteria = criteria.(*criterias.State)\n\n\treturn sfr, nil\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_Classifers_Classifier_Terms) NewTerm(Id string) (*OpenconfigQos_Qos_Interfaces_Interface_Input_Classifers_Classifier_Terms_Term, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Term == nil {\n\t\tt.Term = make(map[string]*OpenconfigQos_Qos_Interfaces_Interface_Input_Classifers_Classifier_Terms_Term)\n\t}\n\n\tkey := Id\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Term[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Term\", key)\n\t}\n\n\tt.Term[key] = &OpenconfigQos_Qos_Interfaces_Interface_Input_Classifers_Classifier_Terms_Term{\n\t\tId: &Id,\n\t}\n\n\treturn t.Term[key], nil\n}", "func (r ApiGetHyperflexDriveListRequest) Filter(filter string) ApiGetHyperflexDriveListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (r ApiGetHyperflexVmRestoreOperationListRequest) Filter(filter string) ApiGetHyperflexVmRestoreOperationListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (r ApiGetHyperflexVmImportOperationListRequest) Filter(filter string) ApiGetHyperflexVmImportOperationListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (f *Filter) Init(x *mat64.Vector, v mat64.Matrix) error {\n\n\trF, cF := f.F.Dims()\n\n\t//\n\t// X\n\t//\n\n\tif x == nil {\n\t\tf.X = mat64.NewVector(cF, nil)\n\n\t} else {\n\t\trX, _ := x.Dims()\n\t\tif rX != cF {\n\t\t\treturn errors.New(\"row dim of x must be matched to column dim of F\")\n\t\t}\n\t\tf.X = x\n\t}\n\n\t//\n\t// V\n\t//\n\n\tif v == nil {\n\t\tvar m *mat64.Dense\n\t\tm.Mul(f.G, f.Q)\n\t\tf.V.Mul(m, f.G.T())\n\n\t} else {\n\t\trV, cV := v.Dims()\n\t\tif rV != cV {\n\t\t\treturn errors.New(\"V must be square matrix\")\n\t\t}\n\t\tif rV != rF {\n\t\t\treturn errors.New(\"row dim of V must be matched to row dim of F\")\n\t\t}\n\n\t\tf.V = mat64.DenseCopyOf(v)\n\t}\n\n\treturn nil\n}", "func (r ApiGetHyperflexHypervisorHostListRequest) Filter(filter string) ApiGetHyperflexHypervisorHostListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (q *Query) Filter(fn Filter) error {\n\terr, Answer := fn(q.Answer)\n\tif err == nil {\n\t\tq.Answer = Answer\n\t\treturn nil\n\t}\n\treturn err\n}", "func createFilter(searchAttr string, attr []string, baseFilter string) string {\n\tfilterAttr := \"\"\n\n\tfor _, attr := range attr {\n\t\tfilterAttr += fmt.Sprintf(\"(%s=%s)\", attr, searchAttr)\n\t}\n\n\tfilterAllArgs := fmt.Sprintf(\"(|%s)\", filterAttr)\n\n\treturn fmt.Sprintf(\"(&(%s)%s)\", baseFilter, filterAllArgs)\n}", "func NewFilter(filters ...Filter) Filter {\n\tvar filter Filter\n\tfor _, f := range filters {\n\t\tfilter = filter | f\n\t}\n\treturn filter\n}", "func (r ApiGetResourcepoolLeaseListRequest) Filter(filter string) ApiGetResourcepoolLeaseListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (r ApiGetHyperflexServerFirmwareVersionListRequest) Filter(filter string) ApiGetHyperflexServerFirmwareVersionListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (r ApiGetHyperflexIscsiNetworkListRequest) Filter(filter string) ApiGetHyperflexIscsiNetworkListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func NewStringFilter() *StringFilter {\n\tr, err := ring.Init(1000000, 0.001)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn &StringFilter{filter: r}\n}", "func (r ApiGetResourcepoolLeaseResourceListRequest) Filter(filter string) ApiGetResourcepoolLeaseResourceListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func NewTerm(prompt string) *Term {\n\tt := &Term{make(map[string]func([]string), 1), make(chan bool), prompt, nil}\n\treturn t\n}", "func Filter(f *protocol.Filter) RequestOption {\n\treturn func(o *RequestOptions) {\n\t\to.Filter = f\n\t}\n}", "func (r ApiGetIqnpoolLeaseListRequest) Filter(filter string) ApiGetIqnpoolLeaseListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (r ApiGetHyperflexProtectedClusterListRequest) Filter(filter string) ApiGetHyperflexProtectedClusterListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func NewFilter(filePath string) (filter *Filter, err error) {\n\n\tif !PathExists(filePath) {\n\t\treturn nil, fmt.Errorf(\"File does not existst: %s\", filePath)\n\t}\n\n\tf := &Filter{}\n\n\tlines, err := GetFileLines(filePath)\n\tif err != nil {\n\t\treturn f, fmt.Errorf(\"Error while reading file: %s -> %v\", filePath, err)\n\t}\n\tf.parts = lines\n\n\treturn f, nil\n}", "func (v TermsResource) New(c buffalo.Context) error {\n\tcurrent_user := c.Value(\"current_user\").(*models.User)\n\tif !current_user.IsAdmin {\n\t\tc.Flash().Add(\"danger\", \"You don't have access to that!\")\n\t\treturn c.Render(404, r.HTML(\"index.html\"))\n\t}\n\n\tc.Set(\"type_options\", models.TermTypeOptions())\n\treturn c.Render(200, r.Auto(c, &models.Term{}))\n}", "func (r ApiGetHyperflexFeatureLimitExternalListRequest) Filter(filter string) ApiGetHyperflexFeatureLimitExternalListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (res *Resource) Filter(filter *Filter) {\n\tfilter.Resource = res\n\n\tif filter.Label == \"\" {\n\t\tfilter.Label = utils.HumanizeString(filter.Name)\n\t}\n\n\tif filter.Config != nil {\n\t\tfilter.Config.ConfigureQORAdminFilter(filter)\n\t}\n\n\tif filter.Handler == nil {\n\t\t// generate default handler\n\t\tfilter.Handler = func(db *gorm.DB, filterArgument *FilterArgument) *gorm.DB {\n\t\t\tif metaValue := filterArgument.Value.Get(\"Value\"); metaValue != nil {\n\t\t\t\treturn defaultFieldFilter(res, []string{filter.Name}, utils.ToString(metaValue.Value), db, filterArgument.Context)\n\t\t\t}\n\t\t\treturn db\n\t\t}\n\t}\n\n\tif filter.Type != \"\" {\n\t\tres.filters = append(res.filters, filter)\n\t} else {\n\t\tutils.ExitWithMsg(\"Invalid filter definition %v for resource %v\", filter.Name, res.Name)\n\t}\n}", "func (d *Data) NewFilter(fs storage.FilterSpec) (storage.Filter, error) {\n\tfilter := &Filter{Data: d, fs: fs}\n\n\t// Get associated labelblk. If none, we can't use roi filter so just do standard data send.\n\tlblk, err := d.GetSyncedLabelblk()\n\tif err != nil {\n\t\tdvid.Infof(\"Unable to get synced labelblk for labelvol %q. Unable to do any ROI-based filtering.\\n\", d.DataName())\n\t\treturn nil, nil\n\t}\n\n\troiIterator, _, found, err := roi.NewIteratorBySpec(fs, lblk)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !found || roiIterator == nil {\n\t\tdvid.Debugf(\"No ROI found so using generic data push for data %q.\\n\", d.DataName())\n\t\treturn nil, nil\n\t}\n\tfilter.it = roiIterator\n\n\treturn filter, nil\n}", "func (r ApiGetBulkSubRequestObjListRequest) Filter(filter string) ApiGetBulkSubRequestObjListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (f *Find) Filter(filter bsoncore.Document) *Find {\n\tif f == nil {\n\t\tf = new(Find)\n\t}\n\n\tf.filter = filter\n\treturn f\n}", "func NewTerm(id NodeID, rdfType, lbl, iri string) Term {\n\treturn &node{\n\t\tid: id,\n\t\trdfType: rdfType,\n\t\tlbl: lbl,\n\t\tiri: iri,\n\t}\n}", "func NewWorkbookFilter()(*WorkbookFilter) {\n m := &WorkbookFilter{\n Entity: *NewEntity(),\n }\n return m\n}", "func (r ApiGetHyperflexSoftwareDistributionComponentListRequest) Filter(filter string) ApiGetHyperflexSoftwareDistributionComponentListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (s *Select) SetFilter(fieldName string, op string, value interface{}) *Select {\n\tf := filter{\n\t\tfield: strings.ToLower(fieldName),\n\t\top: op,\n\t\tvalue: value,\n\t}\n\ts.filters = append(s.filters, f)\n\treturn s\n}", "func (r ApiGetEtherPortChannelListRequest) Filter(filter string) ApiGetEtherPortChannelListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func NewFilter(keys []uint32, bitsPerKey int) Filter {\n\treturn Filter(appendFilter(nil, keys, bitsPerKey))\n}" ]
[ "0.6605704", "0.5789612", "0.57304215", "0.5618418", "0.5567405", "0.5561679", "0.5525997", "0.5523312", "0.5509586", "0.5504292", "0.5501908", "0.5493613", "0.5439888", "0.5420406", "0.5420044", "0.5417559", "0.540583", "0.5404166", "0.54037094", "0.5397271", "0.53965527", "0.53915006", "0.5371231", "0.5365192", "0.53540283", "0.534388", "0.5335438", "0.5330881", "0.530483", "0.52845526", "0.5270697", "0.5260144", "0.52522767", "0.5223091", "0.5214831", "0.52000517", "0.5191512", "0.5163089", "0.51600987", "0.5158921", "0.5158178", "0.5153951", "0.51489836", "0.5116395", "0.51139593", "0.5111534", "0.5105547", "0.5104867", "0.510204", "0.50792724", "0.5068538", "0.5064065", "0.50616217", "0.5061315", "0.5056103", "0.5045064", "0.5044486", "0.50410426", "0.5039812", "0.50353444", "0.50272685", "0.50264275", "0.5018457", "0.5013191", "0.5012815", "0.5011266", "0.50102013", "0.5009791", "0.5005644", "0.5001267", "0.4994354", "0.49862847", "0.49820676", "0.49753028", "0.4971485", "0.49706197", "0.49681723", "0.49595276", "0.49588418", "0.49576455", "0.4948748", "0.49454784", "0.49451256", "0.49373475", "0.49356064", "0.4933956", "0.49249396", "0.49160516", "0.49132863", "0.49090457", "0.49080113", "0.49011737", "0.48981896", "0.48864684", "0.4884891", "0.48825672", "0.48815554", "0.4879615", "0.4871395", "0.48688364" ]
0.50716466
50
returns true if two query terms are equal
func (qt *queryTerm) equals(qt2 *queryTerm) bool { return qt.Subject == qt2.Subject && qt.Object == qt2.Object && reflect.DeepEqual(qt.Predicates, qt2.Predicates) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (term *Term) Equal(other *Term) bool {\n\tif term == nil && other != nil {\n\t\treturn false\n\t}\n\tif term != nil && other == nil {\n\t\treturn false\n\t}\n\tif term == other {\n\t\treturn true\n\t}\n\n\t// TODO(tsandall): This early-exit avoids allocations for types that have\n\t// Equal() functions that just use == underneath. We should revisit the\n\t// other types and implement Equal() functions that do not require\n\t// allocations.\n\tswitch v := term.Value.(type) {\n\tcase Null:\n\t\treturn v.Equal(other.Value)\n\tcase Boolean:\n\t\treturn v.Equal(other.Value)\n\tcase Number:\n\t\treturn v.Equal(other.Value)\n\tcase String:\n\t\treturn v.Equal(other.Value)\n\tcase Var:\n\t\treturn v.Equal(other.Value)\n\t}\n\n\treturn term.Value.Compare(other.Value) == 0\n}", "func allEq(doc string) bool {\n\n\tbase := doc[0]\n\tfor i := 1; i < len(doc); i++ {\n\t\tif base != doc[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func allEq(doc string) bool {\n\n\tbase := doc[0]\n\tfor i := 1; i < len(doc); i++ {\n\t\tif base != doc[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func Eql(v1, v2 Vect) bool { return v1.X == v2.X && v1.Y == v2.Y }", "func FQNameEquals(fqNameA, fqNameB []string) bool {\n\tif len(fqNameA) != len(fqNameB) {\n\t\treturn false\n\t}\n\tfor i, v := range fqNameA {\n\t\tif v != fqNameB[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func BoundQueriesEqual(x, y []BoundQuery) bool {\n\tif len(x) != len(y) {\n\t\treturn false\n\t}\n\tfor i := range x {\n\t\tif !BoundQueryEqual(&x[i], &y[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (recv *TypeQuery) Equals(other *TypeQuery) bool {\n\treturn other.ToC() == recv.ToC()\n}", "func QueryResponsesEqual(r1, r2 []QueryResponse) bool {\n\tif len(r1) != len(r2) {\n\t\treturn false\n\t}\n\tfor i, r := range r1 {\n\t\tif !r.QueryResult.Equal(r2[i].QueryResult) {\n\t\t\treturn false\n\t\t}\n\t\tif !reflect.DeepEqual(r.QueryError, r2[i].QueryError) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func equ(a *GLSQObj, b *GLSQObj) bool {\n\tif a == b {\n\t\t// If they point to the same location they obviously are equal.\n\t\treturn true\n\t}\n\n\tif a.GlsqType != b.GlsqType {\n\t\treturn false // can't be equal if types are not the same\n\t}\n\n\tswitch a.GlsqType {\n\tcase GLSQ_TYPE_INT:\n\t\treturn a.GlsqInt == b.GlsqInt\n\n\tcase GLSQ_TYPE_FLOAT:\n\t\treturn a.GlsqFloat == b.GlsqFloat\n\n\tcase GLSQ_TYPE_BOOL:\n\t\treturn a.GlsqBool == b.GlsqBool\n\t}\n\n\treturn false\n}", "func Equal(t1, t2 Token) bool {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tif t1 == nil && t2 == nil {\n\t\treturn true\n\t}\n\n\t// we already checked for t1 == t2 == nil, so safe to do this\n\tif t1 == nil || t2 == nil {\n\t\treturn false\n\t}\n\n\tm1, err := t1.AsMap(ctx)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tfor iter := t2.Iterate(ctx); iter.Next(ctx); {\n\t\tpair := iter.Pair()\n\n\t\tv1 := m1[pair.Key.(string)]\n\t\tv2 := pair.Value\n\t\tswitch tmp := v1.(type) {\n\t\tcase time.Time:\n\t\t\ttmp2, ok := v2.(time.Time)\n\t\t\tif !ok {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\ttmp = tmp.Round(0).Truncate(time.Second)\n\t\t\ttmp2 = tmp2.Round(0).Truncate(time.Second)\n\t\t\tif !tmp.Equal(tmp2) {\n\t\t\t\treturn false\n\t\t\t}\n\t\tdefault:\n\t\t\tif v1 != v2 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\tdelete(m1, pair.Key.(string))\n\t}\n\n\treturn len(m1) == 0\n}", "func Quat2Equals(a, b []float64) bool {\n\treturn equals(a[0], b[0]) &&\n\t\tequals(a[1], b[1]) &&\n\t\tequals(a[2], b[2]) &&\n\t\tequals(a[3], b[3]) &&\n\t\tequals(a[4], b[4]) &&\n\t\tequals(a[5], b[5]) &&\n\t\tequals(a[6], b[6]) &&\n\t\tequals(a[7], b[7])\n}", "func (recv *SignalQuery) Equals(other *SignalQuery) bool {\n\treturn other.ToC() == recv.ToC()\n}", "func IsEqual(eq string) bool {\n var equalsIndex int = strings.Index(eq, \"=\")\n var lhs string = eq[0:equalsIndex]\n var rhs string = eq[equalsIndex + 1:]\n var side1 float64 = NotateToDouble(Pemdas(lhs))\n var side2 float64 = NotateToDouble(Pemdas(rhs))\n\n return side1 == side2\n}", "func equalsRest(a, b language.Tag) bool {\n\t// TODO: don't include extensions in this comparison. To do this efficiently,\n\t// though, we should handle private tags separately.\n\treturn a.ScriptID == b.ScriptID && a.RegionID == b.RegionID && a.VariantOrPrivateUseTags() == b.VariantOrPrivateUseTags()\n}", "func (q Quat) Equals(other Quat) bool {\n\treturn q.EqualsEps(other, Epsilon)\n}", "func Quat2ExactEquals(a, b []float64) bool {\n\treturn a[0] == b[0] && a[1] == b[1] && a[2] == b[2] && a[3] == b[3] && a[4] == b[4] && a[5] == b[5] && a[6] == b[6] && a[7] == b[7]\n}", "func (q Quad) Equal(other *Quad) bool {\n\tif !q.ctx.Equal(other.ctx) {\n\t\treturn false\n\t}\n\n\tif !q.Triple.Equal(other.Triple) {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func eq(x, y []string) bool {\n\t// NOTE: set equal\n\treturn sset.Equal(x, y)\n}", "func (kt KeyToken) Equal(okt KeyToken) bool {\n\tif kt.Tok.IsKeyword() && kt.Key != \"\" {\n\t\treturn kt.Tok == okt.Tok && kt.Key == okt.Key\n\t}\n\treturn kt.Tok == okt.Tok\n}", "func EqualTokens(a, b *oauth2.Token) bool {\n\tif a == b {\n\t\treturn true\n\t}\n\taTok := \"\"\n\tif a != nil {\n\t\taTok = a.AccessToken\n\t}\n\tbTok := \"\"\n\tif b != nil {\n\t\tbTok = b.AccessToken\n\t}\n\treturn aTok == bTok\n}", "func JEQ(r operand.Op) { ctx.JEQ(r) }", "func (q Query) SequenceEqual(ctx Context, q2 Query) (bool, error) {\n\tnext1 := q.Iterate()\n\tnext2 := q2.Iterate()\n\n\tfor {\n\t\titem1, e := next1(ctx)\n\t\tif e != nil {\n\t\t\tif IsNoRows(e) {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\treturn false, e\n\t\t}\n\n\t\titem2, e := next2(ctx)\n\t\tif e != nil {\n\t\t\tif IsNoRows(e) {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\treturn false, e\n\t\t}\n\n\t\tok3, err := item1.EqualTo(item2, vm.EmptyCompareOption())\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif !ok3 {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\t_, err := next2(ctx)\n\tif err == nil {\n\t\treturn false, nil\n\t}\n\tif IsNoRows(err) {\n\t\treturn true, nil\n\t}\n\treturn false, err\n}", "func equal(lhs, rhs semantic.Expression) semantic.Expression {\n\treturn &semantic.BinaryOp{Type: semantic.BoolType, LHS: lhs, Operator: ast.OpEQ, RHS: rhs}\n}", "func RegexpEqual(x *syntax.Regexp, y *syntax.Regexp,) bool", "func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase string, int, int64, byte, float32, float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase string, int, int64, byte, float32, float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (r ShardResults) Equal(other ShardResults) bool {\n\tfor shard, result := range r {\n\t\totherResult, ok := r[shard]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\tallSeries := result.AllSeries()\n\t\totherAllSeries := otherResult.AllSeries()\n\t\tif len(allSeries) != len(otherAllSeries) {\n\t\t\treturn false\n\t\t}\n\t\tfor id, series := range allSeries {\n\t\t\totherSeries, ok := otherAllSeries[id]\n\t\t\tif !ok {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tallBlocks := series.Blocks.AllBlocks()\n\t\t\totherAllBlocks := otherSeries.Blocks.AllBlocks()\n\t\t\tif len(allBlocks) != len(otherAllBlocks) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tfor start, block := range allBlocks {\n\t\t\t\totherBlock, ok := otherAllBlocks[start]\n\t\t\t\tif !ok {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\t// Just performing shallow equals so simply compare block addresses\n\t\t\t\tif block != otherBlock {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func (c context) eq(d context) bool {\n\treturn c.state == d.state &&\n\t\tc.delim == d.delim &&\n\t\tc.urlPart == d.urlPart &&\n\t\tc.jsCtx == d.jsCtx &&\n\t\tc.attr == d.attr &&\n\t\tc.element == d.element &&\n\t\tc.err == d.err\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tch1, ch2 := Walker(t1), Walker(t2)\n\n\tfor {\n\n\t\tv1, ok1 := <-ch1\n\t\tv2, ok2 := <-ch2\n\n\t\tif !ok1 || !ok2 {\n\t\t\treturn ok1 == ok2\n\t\t}\n\t\tif v1 != v2 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn false\n}", "func OneTermEqualSelector(k, v string) Selector {\n\treturn &hasTerm{field: k, value: v}\n}", "func (t *token) Equal(tt *token) bool {\n\treturn t.code == tt.code && t.Text == tt.Text\n}", "func queriesMostlyEqual(m1 dnsmessage.Message, m2 dnsmessage.Message) bool {\n\t// Make fields we don't care about match, so that equality check is easy.\n\tm1.Header.ID = m2.Header.ID\n\tm1.Additionals = m2.Additionals\n\treturn reflect.DeepEqual(m1, m2)\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tch1 := make(chan int)\n\tch2 := make(chan int)\n\tseq1 := make([]int, 10)\n\tseq2 := make([]int, 10)\n\tgo Walk(t1, ch1)\n\tgo Walk(t2, ch2)\n\tfor i := 0; i < 10; i++ {\n\t\tseq1 = append(seq1, <-ch1)\n\t}\n\tfor j := 0; j < 10; j++ {\n\t\tseq2 = append(seq2, <-ch2)\n\t}\n\trs1 := fmt.Sprintf(\"%q\", seq1)\n\trs2 := fmt.Sprintf(\"%q\", seq2)\n\n\treturn rs1 == rs2\n}", "func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase int:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase int:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase int64:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase int:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase float32:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase float32:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase float64:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase float32:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase float64:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase string, byte:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (me TComparator) IsEqualTo() bool { return me.String() == \"EqualTo\" }", "func isExactEquivalent(l language.Language) bool {\n\tfor _, o := range notEquivalent {\n\t\tif o == l {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tvalues := make(map[int]bool)\n\tch := make(chan int)\n\tgo func() {\n\t\tWalk(t1, ch)\n\t\tWalk(t2, ch)\n\t\tclose(ch)\n\t}()\n\tfor val := range ch {\n\t\tif _, ok := values[val]; ok {\n\t\t\tdelete(values, val)\n\t\t} else {\n\t\t\tvalues[val] = true\n\t\t}\n\t}\n\treturn len(values) == 0\n}", "func lvalEq(firstArg *LVal, secondArg *LVal) bool {\n\tif firstArg.Type != secondArg.Type {\n\t\treturn false\n\t}\n\n\tswitch firstArg.Type {\n\tcase LVAL_NUM:\n\t\treturn firstArg.Number == secondArg.Number\n\tcase LVAL_ERR:\n\t\treturn firstArg.Err == secondArg.Err\n\tcase LVAL_SYM:\n\t\treturn firstArg.Sym == secondArg.Sym\n\tcase LVAL_FUN:\n\t\tif firstArg.Builtin != nil || secondArg.Builtin != nil {\n\t\t\treturn reflect.ValueOf(firstArg.Builtin) == reflect.ValueOf(secondArg.Builtin)\n\t\t}\n\n\t\treturn lvalEq(firstArg.Formals, secondArg.Formals) && lvalEq(firstArg.Body, secondArg.Body)\n\tcase LVAL_STR:\n\t\treturn firstArg.String == secondArg.String\n\tcase LVAL_QEXPR:\n\t\tfallthrough\n\tcase LVAL_SEXPR:\n\t\tif len(firstArg.Cell) != len(secondArg.Cell) {\n\t\t\treturn false\n\t\t}\n\n\t\tfor i := 0; i < len(firstArg.Cell); i++ {\n\t\t\tif !lvalEq(firstArg.Cell[i], secondArg.Cell[i]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (t Token) Equal(t2 Token) bool {\n\tif t.TokenType == t2.TokenType && bytes.Equal(t.Data, t2.Data) && len(t.Args) == len(t2.Args) {\n\t\tfor i := 0; i < len(t.Args); i++ {\n\t\t\tif t.Args[i].TokenType != t2.Args[i].TokenType || !bytes.Equal(t.Args[i].Data, t2.Args[i].Data) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}", "func (v MetadataRequest) Equal(o MetadataRequest) bool {\n\treturn string(v.Metadata) == string(o.Metadata)\n}", "func (args *RequestArgs) Same(anthor *RequestArgs) bool {\n\tif anthor == nil {\n\t\treturn false\n\t}\n\tif args.MaxDepth != anthor.MaxDepth {\n\t\treturn false\n\t}\n\tif len(args.AcceptedDomains) != len(anthor.AcceptedDomains) {\n\t\treturn false\n\t}\n\tif anthor.AcceptedDomains != nil {\n\t\tfor i, acceptedDomain := range anthor.AcceptedDomains {\n\t\t\tif args.AcceptedDomains[i] != acceptedDomain {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func (q1 Quat) ApproxEqual(q2 Quat) bool {\n\treturn FloatEqual(q1.W, q2.W) && q1.V.ApproxEqual(q2.V)\n}", "func isSubset(query, subject asciiset.ASCIISet) bool {\n\t// A ⊆ B iff (A ∪ B) = B\n\tunion := query.Union(subject)\n\treturn union.Equals(subject)\n}", "func eq(x, y []string) bool {\n\t// NOTE: list equal, not set equal\n\treturn strs.Equal(x, y)\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tvalues1 := make(chan int)\n\tvalues2 := make(chan int)\n\n\tgo Walk(t1, values1)\n\tgo Walk(t2, values2)\n\n\tfor val := range values1 {\n\t\tif val != <-values2 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Equal(a *ROBDD, b *ROBDD) (bool, error) {\n\tif !reflect.DeepEqual(a.Vocabulary, b.Vocabulary) {\n\t\treturn false, fmt.Errorf(\"Mismatched vocabularies in GraphEqual: %v, %v\", a.Vocabulary, b.Vocabulary)\n\t}\n\treturn seq.Equal(a.Node, b.Node)\n}", "func eq(o1, o2 interface{}) bool {\n\n\tf1, ok1 := ToFloat(o1)\n\tf2, ok2 := ToFloat(o2)\n\tif ok1 && ok2 {\n\t\treturn f1 == f2\n\t}\n\n\tb1, ok1 := ToBool(o1)\n\tb2, ok1 := ToBool(o2)\n\tif ok1 && ok2 {\n\t\treturn b1 == b2\n\t}\n\n\treturn o1 == o2\n}", "func Equals(t1, t2 Type) bool {\n\tt1, t2 = t1.Root(), t2.Root()\n\tswitch t1 := t1.(type) {\n\tcase *Variable:\n\t\tr2, ok := t2.(*Variable)\n\t\tif !ok {\n\t\t\treturn occursInType(t1, t2)\n\t\t}\n\t\treturn t1.ID == r2.ID\n\tcase *Operator:\n\t\tt2, ok := t2.(*Operator)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\tif t1.Name != t2.Name {\n\t\t\treturn false\n\t\t}\n\t\tif len(t1.Args) != len(t2.Args) {\n\t\t\treturn false\n\t\t}\n\t\tfor i := range t1.Args {\n\t\t\tif !Equals(t1.Args[i], t2.Args[i]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase *TypeError:\n\t\treturn false\n\t}\n\treturn true\n}", "func samePath(p1, p2 IndexMap, path IndexPath) bool {\n\tp1i, p1ok := p1[path]\n\tp2i, p2ok := p2[path]\n\n\t// It's in one but not the other directly, so it's not\n\t// the same.\n\tif p1ok != p2ok {\n\t\treturn false\n\t}\n\n\t// Avoid a nil pointer below by explicitly checking if one\n\t// is missing.\n\tif p1ok == false {\n\t\treturn p2ok == false\n\t}\n\tif p2ok == false {\n\t\treturn p1ok == false\n\t}\n\n\t// It's in both, so we can safely check the sha\n\treturn p1i.Sha1 == p2i.Sha1\n\n}", "func parseTermsQuery(p *parser) parserStateFn {\n\tp.next() // Consume the ':' token.\n\n\tvar terms []term\n\ttok := p.peek()\n\tswitch tok.typ {\n\tcase tokTypeError:\n\t\treturn parseErrorTok\n\tcase tokTypeUnquotedLiteral, tokTypeQuotedLiteral:\n\t\t// E.g. `foo:val1 val2`, `breakfast:*am eggs` or `foo:*`.\n\t\t// If at least one of the terms is `*`, then this is an \"exists query\".\n\t\thaveExistsTerm := false\n\t\tfor {\n\t\t\ttok := p.next()\n\t\t\tif tok.typ == tokTypeUnquotedLiteral {\n\t\t\t\tif tok.val == \"*\" {\n\t\t\t\t\thaveExistsTerm = true\n\t\t\t\t}\n\t\t\t\tterms = append(terms, newTerm(tok.val))\n\t\t\t} else if tok.typ == tokTypeQuotedLiteral {\n\t\t\t\tterms = append(terms, newQuotedTerm(tok.val))\n\t\t\t} else {\n\t\t\t\tp.backup(tok)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif haveExistsTerm {\n\t\t\tp.filter.addStep(&rpnExistsQuery{field: p.field.val})\n\t\t} else {\n\t\t\tp.filter.addStep(&rpnTermsQuery{field: p.field.val, terms: terms})\n\t\t}\n\t\tp.field = nil\n\t\treturn parseAfterQuery\n\tcase tokTypeOpenParen:\n\t\t// E.g. `foo:(a or b ...)` or `foo:(a and b and c)`.\n\t\tp.next() // Consume the open paren.\n\t\tmatchAll := false // True if the second form with `and`: `foo:(a and b ...)`.\n\t\tfor i := 0; true; i++ {\n\t\t\t// Expect literal ...\n\t\t\ttermTok := p.next()\n\t\t\tif termTok.typ == tokTypeUnquotedLiteral {\n\t\t\t\tterms = append(terms, newTerm(termTok.val))\n\t\t\t} else if termTok.typ == tokTypeQuotedLiteral {\n\t\t\t\tterms = append(terms, newQuotedTerm(termTok.val))\n\t\t\t} else {\n\t\t\t\treturn p.errorfAt(termTok.pos, \"expected literal, got %s\", termTok.typ)\n\t\t\t}\n\t\t\t// ... then ')' to complete the query, or 'and' or 'or' to repeat.\n\t\t\topTok := p.next()\n\t\t\tswitch opTok.typ {\n\t\t\tcase tokTypeCloseParen:\n\t\t\t\tif matchAll {\n\t\t\t\t\tp.filter.addStep(&rpnMatchAllTermsQuery{field: p.field.val, terms: terms})\n\t\t\t\t} else {\n\t\t\t\t\tp.filter.addStep(&rpnTermsQuery{field: p.field.val, terms: terms})\n\t\t\t\t}\n\t\t\t\tp.field = nil\n\t\t\t\treturn parseAfterQuery\n\t\t\tcase tokTypeOr:\n\t\t\t\tif i == 0 {\n\t\t\t\t\tmatchAll = false\n\t\t\t\t} else if matchAll {\n\t\t\t\t\treturn p.errorfAt(opTok.pos,\n\t\t\t\t\t\t\"cannot mix 'and' and 'or' in parenthesized value group\")\n\t\t\t\t}\n\t\t\tcase tokTypeAnd:\n\t\t\t\tif i == 0 {\n\t\t\t\t\tmatchAll = true\n\t\t\t\t} else if !matchAll {\n\t\t\t\t\treturn p.errorfAt(opTok.pos,\n\t\t\t\t\t\t\"cannot mix 'and' and 'or' in parenthesized value group\")\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn p.errorfAt(opTok.pos, \"expected ')', 'or', or 'and'; got %s\",\n\t\t\t\t\topTok.typ)\n\t\t\t}\n\t\t}\n\t\tpanic(fmt.Sprintf(\"unreachable code hit with KQL %q\", p.kql))\n\tdefault:\n\t\treturn p.errorfAt(tok.pos, \"expected a literal or '('; got %s\", tok.typ)\n\t}\n}", "func BoundQueryEqual(x, y *BoundQuery) bool {\n\treturn x.Sql == y.Sql &&\n\t\tsqltypes.BindVariablesEqual(x.BindVariables, y.BindVariables)\n}", "func (t *Trie) Equal(compareTo *Trie) bool {\n\treturn t.String() == compareTo.String()\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tch1 := make(chan int)\n\tch2 := make(chan int)\n\n\tgo Walk(t1, ch1, true)\n\tgo Walk(t2, ch2, true)\n\n\t// all nodes from tree1 the same as in the tree2\n\tvar difFound bool\n\tfor v1 := range ch1 {\n\t\tv2, closed := <-ch2\n\t\tif !closed || v1 != v2 {\n\t\t\tdifFound = true\n\t\t}\n\t}\n\n\tresult := !difFound\n\n\t// if there more values left in tree2 after tree1 has been walked\n\tfor range ch2 {\n\t\tresult = false\n\t}\n\n\treturn result\n}", "func Eql(x, y reflect.Value) reflect.Value {\n\tmustSameType(x, y)\n\tz := reflect.New(reflect.TypeOf(false)).Elem()\n\tswitch x.Type().Kind() {\n\tcase reflect.Int:\n\t\txx := int(x.Int())\n\t\tyy := int(y.Int())\n\t\tzz := xx == yy\n\t\tz.SetBool(zz)\n\t\treturn z\n\tcase reflect.Int8:\n\t\txx := int8(x.Int())\n\t\tyy := int8(y.Int())\n\t\tzz := xx == yy\n\t\tz.SetBool(zz)\n\t\treturn z\n\tcase reflect.Int16:\n\t\txx := int16(x.Int())\n\t\tyy := int16(y.Int())\n\t\tzz := xx == yy\n\t\tz.SetBool(zz)\n\t\treturn z\n\tcase reflect.Int32:\n\t\txx := int32(x.Int())\n\t\tyy := int32(y.Int())\n\t\tzz := xx == yy\n\t\tz.SetBool(zz)\n\t\treturn z\n\tcase reflect.Int64:\n\t\txx := int64(x.Int())\n\t\tyy := int64(y.Int())\n\t\tzz := xx == yy\n\t\tz.SetBool(zz)\n\t\treturn z\n\tcase reflect.Uint:\n\t\txx := uint(x.Uint())\n\t\tyy := uint(y.Uint())\n\t\tzz := xx == yy\n\t\tz.SetBool(zz)\n\t\treturn z\n\tcase reflect.Uint8:\n\t\txx := uint8(x.Uint())\n\t\tyy := uint8(y.Uint())\n\t\tzz := xx == yy\n\t\tz.SetBool(zz)\n\t\treturn z\n\tcase reflect.Uint16:\n\t\txx := uint16(x.Uint())\n\t\tyy := uint16(y.Uint())\n\t\tzz := xx == yy\n\t\tz.SetBool(zz)\n\t\treturn z\n\tcase reflect.Uint32:\n\t\txx := uint32(x.Uint())\n\t\tyy := uint32(y.Uint())\n\t\tzz := xx == yy\n\t\tz.SetBool(zz)\n\t\treturn z\n\tcase reflect.Uint64:\n\t\txx := uint64(x.Uint())\n\t\tyy := uint64(y.Uint())\n\t\tzz := xx == yy\n\t\tz.SetBool(zz)\n\t\treturn z\n\tcase reflect.Uintptr:\n\t\txx := uintptr(x.Uint())\n\t\tyy := uintptr(y.Uint())\n\t\tzz := xx == yy\n\t\tz.SetBool(zz)\n\t\treturn z\n\tcase reflect.Float32:\n\t\txx := float32(x.Float())\n\t\tyy := float32(y.Float())\n\t\tzz := xx == yy\n\t\tz.SetBool(zz)\n\t\treturn z\n\tcase reflect.Float64:\n\t\txx := float64(x.Float())\n\t\tyy := float64(y.Float())\n\t\tzz := xx == yy\n\t\tz.SetBool(zz)\n\t\treturn z\n\tcase reflect.Complex64:\n\t\txx := complex64(x.Complex())\n\t\tyy := complex64(y.Complex())\n\t\tzz := xx == yy\n\t\tz.SetBool(zz)\n\t\treturn z\n\tcase reflect.Complex128:\n\t\txx := complex128(x.Complex())\n\t\tyy := complex128(y.Complex())\n\t\tzz := xx == yy\n\t\tz.SetBool(zz)\n\t\treturn z\n\tcase reflect.String:\n\t\txx := string(x.String())\n\t\tyy := string(y.String())\n\t\tzz := xx == yy\n\t\tz.SetBool(zz)\n\t\treturn z\n\tcase reflect.Bool:\n\t\txx := bool(x.Bool())\n\t\tyy := bool(y.Bool())\n\t\tzz := xx == yy\n\t\tz.SetBool(zz)\n\t\treturn z\n\t}\n\tpanic(fmt.Sprintf(\"operator == not defined on %v\", x.Type()))\n}", "func (r *RegexpObject) equal(e *RegexpObject) bool {\n\treturn r.ToString() == r.ToString()\n}", "func equal(a, b float64) bool {\n\treturn math.Abs(a-b) <= equalityThreshold\n}", "func ResultsEqual(r1, r2 []Result) bool {\n\tif len(r1) != len(r2) {\n\t\treturn false\n\t}\n\tfor i, r := range r1 {\n\t\tif !r.Equal(&r2[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (v SearchTransactionsRequest) Equal(o SearchTransactionsRequest) bool {\n\treturn v.AccountIdentifier.Value.Equal(o.AccountIdentifier.Value) &&\n\t\tv.AccountIdentifier.Set == o.AccountIdentifier.Set &&\n\t\tv.Address.Value == o.Address.Value &&\n\t\tv.Address.Set == o.Address.Set &&\n\t\tv.CoinIdentifier.Value.Equal(o.CoinIdentifier.Value) &&\n\t\tv.CoinIdentifier.Set == o.CoinIdentifier.Set &&\n\t\tv.Currency.Value.Equal(o.Currency.Value) &&\n\t\tv.Currency.Set == o.Currency.Set &&\n\t\tv.Limit.Value == o.Limit.Value &&\n\t\tv.Limit.Set == o.Limit.Set &&\n\t\tv.MaxBlock.Value == o.MaxBlock.Value &&\n\t\tv.MaxBlock.Set == o.MaxBlock.Set &&\n\t\tv.Offset.Value == o.Offset.Value &&\n\t\tv.Offset.Set == o.Offset.Set &&\n\t\tv.Operator.Value == o.Operator.Value &&\n\t\tv.Operator.Set == o.Operator.Set &&\n\t\tv.Status.Value == o.Status.Value &&\n\t\tv.Status.Set == o.Status.Set &&\n\t\tv.Success.Value == o.Success.Value &&\n\t\tv.Success.Set == o.Success.Set &&\n\t\tv.TransactionIdentifier.Value.Equal(o.TransactionIdentifier.Value) &&\n\t\tv.TransactionIdentifier.Set == o.TransactionIdentifier.Set &&\n\t\tv.Type.Value == o.Type.Value &&\n\t\tv.Type.Set == o.Type.Set\n}", "func Same(s1, s2 Set) bool {\n\treturn *(*uintptr)(unsafe.Pointer(&s1)) == *(*uintptr)(unsafe.Pointer(&s2))\n}", "func equalish(a, b, tolerance float64) bool {\n\tif a == b {\n\t\treturn true\n\t}\n\n\tdiff := math.Abs(a - b)\n\n\tif diff <= tolerance {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func Equal(s1, s2 Set) bool {\n\tif Same(s1, s2) {\n\t\treturn true\n\t}\n\n\tif len(s1) != len(s2) {\n\t\treturn false\n\t}\n\n\tfor e := range s1 {\n\t\tif _, ok := s2[e]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func Equal(a, b Node) bool {\n\tif a == nil {\n\t\treturn b == nil\n\t}\n\tswitch a := a.(type) {\n\tcase *Text:\n\t\tb, ok := b.(*Text)\n\t\treturn ok &&\n\t\t\tEqual(a.Leading, b.Leading) &&\n\t\t\tEqual(&a.Discourse, &b.Discourse)\n\n\tcase *Discourse:\n\t\tb, ok := b.(*Discourse)\n\t\tif !ok || len(*a) != len(*b) {\n\t\t\treturn false\n\t\t}\n\t\tfor i := range *a {\n\t\t\tif !Equal((*a)[i], (*b)[i]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\n\tcase *StatementSentence:\n\t\tb, ok := b.(*StatementSentence)\n\t\treturn ok &&\n\t\t\twordEqual(a.JE, b.JE) &&\n\t\t\twordEqual(a.DA, b.DA) &&\n\t\t\tEqual(a.Statement, b.Statement)\n\n\tcase *CoPSentence:\n\t\tb, ok := b.(*CoPSentence)\n\t\treturn ok && copEqual((*CoP)(a), (*CoP)(b))\n\n\tcase *Prenex:\n\t\tb, ok := b.(*Prenex)\n\t\treturn ok && wordEqual(&a.BI, &b.BI) && Equal(a.Terms, b.Terms)\n\n\tcase *PrenexStatement:\n\t\tb, ok := b.(*PrenexStatement)\n\t\treturn ok && Equal(&a.Prenex, &b.Prenex) && Equal(a.Statement, b.Statement)\n\n\tcase *Predication:\n\t\tb, ok := b.(*Predication)\n\t\treturn ok &&\n\t\t\twordEqual(a.NA, b.NA) &&\n\t\t\tEqual(a.Predicate, b.Predicate) &&\n\t\t\t(a.Terms == nil) == (b.Terms == nil) &&\n\t\t\t(a.Terms == nil || Equal(a.Terms, b.Terms))\n\n\tcase *CoPStatement:\n\t\tb, ok := b.(*CoPStatement)\n\t\treturn ok && copEqual((*CoP)(a), (*CoP)(b))\n\n\tcase *PrefixedPredicate:\n\t\tb, ok := b.(*PrefixedPredicate)\n\t\treturn ok && wordEqual(&a.MU, &b.MU) && Equal(a.Predicate, b.Predicate)\n\n\tcase *SerialPredicate:\n\t\tb, ok := b.(*SerialPredicate)\n\t\treturn ok && Equal(a.Left, b.Left) && Equal(a.Right, b.Right)\n\n\tcase *WordPredicate:\n\t\tb, ok := b.(*WordPredicate)\n\t\treturn ok && wordEqual((*Word)(a), (*Word)(b))\n\n\tcase *MIPredicate:\n\t\tb, ok := b.(*MIPredicate)\n\t\treturn ok && wordEqual(&a.MI, &b.MI) && wordEqual(a.GA, b.GA) && Equal(a.Phrase, b.Phrase)\n\n\tcase *POPredicate:\n\t\tb, ok := b.(*POPredicate)\n\t\treturn ok && wordEqual(&a.PO, &b.PO) && wordEqual(a.GA, b.GA) && Equal(a.Argument, b.Argument)\n\n\tcase *MOPredicate:\n\t\tb, ok := b.(*MOPredicate)\n\t\treturn ok && wordEqual(&a.MO, &b.MO) && wordEqual(&a.TEO, &b.TEO) && Equal(&a.Discourse, &b.Discourse)\n\n\tcase *LUPredicate:\n\t\tb, ok := b.(*LUPredicate)\n\t\treturn ok && wordEqual(&a.LU, &b.LU) && Equal(a.Statement, b.Statement)\n\n\tcase *CoPPredicate:\n\t\tb, ok := b.(*CoPPredicate)\n\t\treturn ok && copEqual((*CoP)(a), (*CoP)(b))\n\n\tcase *LinkedTerm:\n\t\tb, ok := b.(*LinkedTerm)\n\t\treturn ok && wordEqual(&a.GO, &b.GO) && Equal(a.Argument, b.Argument)\n\n\tcase Terms:\n\t\tb, ok := b.(Terms)\n\t\tif !ok || len(a) != len(b) {\n\t\t\treturn false\n\t\t}\n\t\tfor i := range a {\n\t\t\tif !Equal(a[i], b[i]) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\n\tcase *TermSet:\n\t\tb, ok := b.(*TermSet)\n\t\treturn ok && copEqual((*CoP)(a), (*CoP)(b))\n\n\tcase *PredicateArgument:\n\t\tb, ok := b.(*PredicateArgument)\n\t\treturn ok &&\n\t\t\twordEqual(a.Focus, b.Focus) &&\n\t\t\twordEqual(a.Quantifier, b.Quantifier) &&\n\t\t\tEqual(a.Predicate, b.Predicate) &&\n\t\t\t(a.Relative == nil) == (b.Relative == nil) &&\n\t\t\t(a.Relative == nil || Equal(a.Relative, b.Relative))\n\n\tcase *CoPArgument:\n\t\tb, ok := b.(*CoPArgument)\n\t\treturn ok && copEqual((*CoP)(a), (*CoP)(b))\n\n\tcase *PredicationRelative:\n\t\tb, ok := b.(*PredicationRelative)\n\t\treturn ok && Equal(&a.Predication, &b.Predication)\n\n\tcase *LURelative:\n\t\tb, ok := b.(*LURelative)\n\t\treturn ok && wordEqual(&a.LU, &b.LU) && Equal(a.Statement, b.Statement)\n\n\tcase *CoPRelative:\n\t\tb, ok := b.(*CoPRelative)\n\t\treturn ok && copEqual((*CoP)(a), (*CoP)(b))\n\n\tcase *PredicateAdverb:\n\t\tb, ok := b.(*PredicateAdverb)\n\t\treturn ok && Equal(a.Predicate, b.Predicate)\n\n\tcase *CoPAdverb:\n\t\tb, ok := b.(*CoPAdverb)\n\t\treturn ok && copEqual((*CoP)(a), (*CoP)(b))\n\n\tcase *PredicationPreposition:\n\t\tb, ok := b.(*PredicationPreposition)\n\t\treturn ok && Equal(a.Predicate, b.Predicate) && Equal(a.Argument, b.Argument)\n\n\tcase *CoPPreposition:\n\t\tb, ok := b.(*CoPPreposition)\n\t\treturn ok && copEqual((*CoP)(a), (*CoP)(b))\n\n\tcase *PredicationContent:\n\t\tb, ok := b.(*PredicationContent)\n\t\treturn ok && Equal(&a.Predication, &b.Predication)\n\n\tcase *LUContent:\n\t\tb, ok := b.(*LUContent)\n\t\treturn ok && wordEqual(&a.LU, &b.LU) && Equal(a.Statement, b.Statement)\n\n\tcase *CoPContent:\n\t\tb, ok := b.(*CoPContent)\n\t\treturn ok && copEqual((*CoP)(a), (*CoP)(b))\n\n\tcase *Parenthetical:\n\t\tb, ok := b.(*Parenthetical)\n\t\treturn ok && wordEqual(&a.KI, &b.KI) && wordEqual(&a.KIO, &b.KIO) && Equal(&a.Discourse, &b.Discourse)\n\n\tcase *Incidental:\n\t\tb, ok := b.(*Incidental)\n\t\treturn ok && wordEqual(&a.JU, &b.JU) && Equal(a.Statement, b.Statement)\n\n\tcase *Vocative:\n\t\tb, ok := b.(*Vocative)\n\t\treturn ok && wordEqual(&a.HU, &b.HU) && Equal(a.Argument, b.Argument)\n\n\tcase *Interjection:\n\t\tb, ok := b.(*Interjection)\n\t\treturn ok && wordEqual((*Word)(a), (*Word)(b))\n\n\tcase *Space:\n\t\tb, ok := b.(*Space)\n\t\treturn ok && wordEqual((*Word)(a), (*Word)(b))\n\n\tcase *Word:\n\t\tb, ok := b.(*Word)\n\t\treturn ok && wordEqual(a, b)\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown node type %T\", a))\n\t}\n}", "func (a seriesIDs) equals(other seriesIDs) bool {\n\tif len(a) != len(other) {\n\t\treturn false\n\t}\n\tfor i, s := range other {\n\t\tif a[i] != s {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Equals(a, b interface{}) bool {\n\treturn neogointernal.Opcode2(\"EQUAL\", a, b).(bool)\n}", "func isEqual(g1 types.GeometryValue, g2 types.GeometryValue) bool {\n\treturn isWithin(g1, g2) && isWithin(g2, g1)\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tch1 := make(chan int)\n\tgo DoWalk(t1, ch1)\n\tch2 := make(chan int)\n\tgo DoWalk(t2, ch2)\n\n\tfor {\n\t\tv1, ok1 := <-ch1\n\t\tv2, ok2 := <-ch2\n\n\t\tif v1 != v2 || ok1 && !ok2 || !ok1 && ok2 {\n\t\t\treturn false\n\t\t}\n\n\t\tif !ok1 && !ok2 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn true\n}", "func eq128(a0, a1, b0, b1 uint64) bool {\n\treturn (a0 == b0) && (a1 == b1)\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tw1, w2, quit := make(chan int), make(chan int), make(chan int)\n\t// Defer close of quit channel after return\n\tdefer close(quit)\n\n\t// Use a goroutine to walk both tree 1 & 2\n\tgo Walk(t1, w1, quit)\n\tgo Walk(t2, w2, quit)\n\n\tfor {\n\t\t// Get progressive values & channel status of both channels\n\t\tv1, ok1 := <-w1\n\t\tv2, ok2 := <-w2\n\n\t\t// If any one channel is closed, return equality channel status\n\t\t// If both are closed, that means they were the same because their value\n\t\t// did not defer (see other if)\n\t\t// If they close at same time and values did not defer, they are same\n\t\t// If values did not defer but doesn't close at same time, not same\n\t\t// as more values can be in one another tree\n\t\tif !ok1 || !ok2 {\n\t\t\treturn ok1 == ok2\n\t\t}\n\n\t\t// If values are not equal, trees are not equal, return false\n\t\tif v1 != v2 {\n\t\t\treturn false\n\t\t}\n\t}\n}", "func (ft *FieldType) Equal(other *FieldType) bool {\n\t// We do not need to compare whole `ft.flag == other.flag` when wrapping cast upon an Expression.\n\t// but need compare unsigned_flag of ft.flag.\n\t// When tp is float or double with decimal unspecified, do not check whether flen is equal,\n\t// because flen for them is useless.\n\t// The decimal field can be ignored if the type is int or string.\n\ttpEqual := (ft.GetType() == other.GetType()) || (ft.GetType() == mysql.TypeVarchar && other.GetType() == mysql.TypeVarString) || (ft.GetType() == mysql.TypeVarString && other.GetType() == mysql.TypeVarchar)\n\tflenEqual := ft.flen == other.flen || (ft.EvalType() == ETReal && ft.decimal == UnspecifiedLength)\n\tignoreDecimal := ft.EvalType() == ETInt || ft.EvalType() == ETString\n\tpartialEqual := tpEqual &&\n\t\t(ignoreDecimal || ft.decimal == other.decimal) &&\n\t\tft.charset == other.charset &&\n\t\tft.collate == other.collate &&\n\t\tflenEqual &&\n\t\tmysql.HasUnsignedFlag(ft.flag) == mysql.HasUnsignedFlag(other.flag)\n\tif !partialEqual || len(ft.elems) != len(other.elems) {\n\t\treturn false\n\t}\n\tfor i := range ft.elems {\n\t\tif ft.elems[i] != other.elems[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tch1 := make(chan int, 10)\n\tch2 := make(chan int, 10)\n\tgo Walk(t1, ch1)\n\tgo Walk(t2, ch2)\n\n\tv1, v2 := 0, 0\n\tok1, ok2 := true, true\n\n\tfor ok1 && ok2 {\n\t\tv1, ok1 = <-ch1\n\t\tv2, ok2 = <-ch2\n\t\tif ok1 != ok2 || v1 != v2 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (t Tags) Equal(other Tags) bool {\n\tif len(t.Values()) != len(other.Values()) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(t.Values()); i++ {\n\t\tequal := t.values[i].Name.Equal(other.values[i].Name) &&\n\t\t\tt.values[i].Value.Equal(other.values[i].Value)\n\t\tif !equal {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tc1 := make(chan int)\n\tgo Walk(t1, c1)\n\tc2 := make(chan int)\n\tgo Walk(t2, c2)\n\n\tfor {\n\t\tv1, ok1 := <-c1\n\t\tv2, ok2 := <-c2\n\n\t\tif v1 != v2 || ok1 && !ok2 || !ok1 && ok2 {\n\t\t\treturn false\n\t\t}\n\n\t\tif !ok1 && !ok2 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn true\n}", "func (n *Node) isEqualTo(other *Node) bool {\n\treturn n.val.IsEqualTo(other.val)\n}", "func Eqtype(t1, t2 *Type) bool", "func Same(t1, t2 *tree.Tree) bool {\n\tch1 := make(chan int)\n\tch2 := make(chan int)\n\n\tgo Walk(t1, ch1)\n\tgo Walk(t2, ch2)\n\n\tfor i := 0; i < 10; i++ {\n\t\tif <-ch1 != <-ch2 {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (a *Token) Equal(b *Token) bool {\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\tif a.tokenType != b.tokenType {\n\t\treturn false\n\t}\n\treturn a.value == b.value\n}", "func (a joinedTable) equal(b joinedTable) bool {\n\treturn a.secondaryTable == b.secondaryTable && a.primaryColumn == b.primaryColumn && a.secondaryColumn == b.secondaryColumn\n}", "func equalIds(wantedId, foundId, foundHref string) bool {\n\n\twantedUuid := extractUuid(wantedId)\n\tfoundUuid := \"\"\n\n\tif wantedUuid == \"\" {\n\t\treturn false\n\t}\n\tif foundId != \"\" {\n\t\t// In some entities, the ID is a simple UUID without prefix\n\t\tfoundUuid = extractUuid(foundId)\n\t} else {\n\t\tfoundUuid = extractUuid(foundHref)\n\t}\n\treturn foundUuid == wantedUuid\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tch1 := make(chan int)\n\tch2 := make(chan int)\n\tgo Walk(t1, ch1)\n\tgo Walk(t2, ch2)\n\n\tt1Map := makeMap(ch1)\n\tt2Slice := makeSlice(ch2)\n\n\tok := true\n\tfor _, v := range t2Slice {\n\t\tok = ok && t1Map[v]\n\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn ok\n}", "func mutants(str1 string, str2 string) bool {\n\tstr1 = strings.ToLower(str1) // convert both inputs to lower\n\tstr2 = strings.ToLower(str2) // case to check for word matches\n\t// convert second input to slice of runes\n\ttest := []rune(str2)\n\t// iterate over runes\n\tfor _, r := range test {\n\t\t// if rune does not exist in first string, return false\n\t\tif strings.IndexRune(str1, r) < 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\t// otherwise return true\n\treturn true\n}", "func Eq(f1, f2 Formula) Formula {\n\treturn and{or{not{f1}, f2}, or{f1, not{f2}}}\n}", "func equal(left, right []string) bool {\n\tif len(left) != len(right) {\n\t\treturn false\n\t}\n\tfor i, value := range left {\n\t\tif value != right[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Equal(s1, s2 Set) bool {\n\tif s1.Len() != s2.Len() {\n\t\treturn false\n\t}\n\tfor k := range s1 {\n\t\tif _, ok := s2[k]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Eq(lhs TermT, rhs TermT) TermT {\n\treturn TermT(C.yices_eq(C.term_t(lhs), (C.term_t(rhs))))\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tone, two := Walker(t1), Walker(t2)\n\tfor i := range one {\n\t\tj := <-two\n\t\tif i != j {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func GraphEqual(a *ROBDD, b *ROBDD) (bool, error) {\n\tif !reflect.DeepEqual(a.Vocabulary, b.Vocabulary) {\n\t\treturn false, fmt.Errorf(\"Mismatched vocabularies in Equal: %v, %v\", a.Vocabulary, b.Vocabulary)\n\t}\n\treturn seq.GraphEqual(a.Node, b.Node)\n}", "func strEq(s1, s2 string) bool {\n if len(s1) != len(s2) {\n return false\n }\n\n for i := range s1 {\n if s1[i] != s2[i] {\n return false\n }\n }\n\n return true\n}", "func (v ConstructionParseRequest) Equal(o ConstructionParseRequest) bool {\n\treturn v.Signed == o.Signed &&\n\t\tv.Transaction == o.Transaction\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tch1, ch2 := make(chan int), make(chan int)\n\n\tgo Walk(t1, ch1)\n\tgo Walk(t2, ch2)\n\n\tfor {\n\t\tv1, ok1 := <-ch1\n\t\tv2, ok2 := <-ch2\n\t\tif !ok1 || !ok2 {\n\t\t\treturn ok1 == ok2\n\t\t}\n\t\tif v1 != v2 {\n\t\t\treturn false\n\t\t}\n\t}\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tch1 := make(chan int)\n\tch2 := make(chan int)\n\n\tgo Walk(t1, ch1)\n\tgo Walk(t2, ch2)\n\n\tvar v1, v2 int\n\tok1 := true\n\tok2 := true\n\tfor ok1 {\n\t\tv1, ok1 = <-ch1\n\t\tv2, ok2 = <-ch2\n\n\t\tif ok1 != ok2 {\n\t\t\treturn false\n\t\t} else if ok1 && (v1 != v2) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tch1 := make(chan int, 10)\n\tch2 := make(chan int, 10)\n\tgo Walk(t1, ch1)\n\tgo Walk(t2, ch2)\n\tfor i:= 0; i < 10; i++ {\n\t\tv1, v2 := <- ch1, <- ch2\n\t\tif v1 == v2 {\n\t\t\tcontinue;\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Command_Eq(script *rex.Script, params []*rex.Value) {\n\tif len(params) != 2 {\n\t\trex.ErrorParamCount(\"float:eq\", \"2\")\n\t}\n\n\tresult := params[0].Float64() == params[1].Float64()\n\tif result {\n\t\tscript.RetVal = rex.NewValueBool(true)\n\t\treturn\n\t}\n\tscript.RetVal = rex.NewValueBool(false)\n}", "func Equals(strFirst, strSecond string) bool {\n\treturn strFirst == strSecond\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tch_1 := make(chan int)\n\tch_2 := make(chan int)\n\t\n\tgo Walk(t1, ch_1)\n\tgo Walk(t2, ch_2)\n\t\n\tfor counter := 0; counter < 10; counter++{\n\t\tvalue_1 := <-ch_1\n\t\tvalue_2 := <-ch_2\n\t\tfmt.Println(\"Verify \", value_1, \" \", value_2)\n\t\tif value_1 != value_2 {\n\t\t\treturn false\n\t\t}\n\t}\n\t\n\treturn true\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tc1 := make(chan int)\n\tc2 := make(chan int)\n\tgo Walk(t1, c1)\n\tgo Walk(t2, c2)\n\n\tfor {\n\t\tv1, ok1 := <-c1\n\t\tv2, ok2 := <-c2\n\n\t\tif v1 != v2 || ok1 != ok2 {\n\t\t\treturn false\n\t\t}\n\n\t\tif ok1 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn true\n}", "func Same(t1, t2 *Tree) bool {\n\tch1 := make(chan int)\n\tch2 := make(chan int)\n\tgo Walk(t1, ch1)\n\tgo Walk(t2, ch2)\n\tfor k := range ch1 {\n\t\tselect {\n\t\tcase g := <-ch2:\n\t\t\tif k != g {\n\t\t\t\treturn false\n\t\t\t}\n\t\tdefault:\n\t\t\tbreak\n\t\t}\n\t}\n\treturn true\n}", "func (t Token) Equal(b Token) bool {\n\treturn bytes.Equal(t, b)\n}", "func (qo *QueryOperator) and(selector string, values []Object) func(Object) bool {\n\tvar queries []Query\n\n\tfor _, v := range values {\n\t\tq := Query{Criteria: v}\n\t\tq.compile()\n\t\tqueries = append(queries, q)\n\t}\n\n\treturn func(obj Object) bool {\n\t\tfor _, v := range queries {\n\t\t\tif !v.Test(obj) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tt1chan := make(chan int)\n\tt2chan := make(chan int)\n\tgo Walk(t1, t1chan)\n\tgo Walk(t2, t2chan)\n\tfor {\n\t\tv1, ok1 := <-t1chan\n\t\tv2, ok2 := <-t2chan\n\n\t\tif ok1 != ok2 {\n\t\t\treturn false\n\t\t} else if v1 != v2 {\n\t\t\treturn false\n\t\t} else if ok1 && ok2 {\n\t\t\treturn true\n\t\t}\n\t}\n}", "func (s UserSet) Equals(other m.UserSet) bool {\n\tres := s.Collection().Call(\"Equals\", other)\n\tresTyped, _ := res.(bool)\n\treturn resTyped\n}" ]
[ "0.6278379", "0.6265848", "0.6265848", "0.58121437", "0.57692087", "0.57653165", "0.56731814", "0.56653947", "0.56645983", "0.5595077", "0.5544077", "0.5541766", "0.55375445", "0.5516763", "0.5507575", "0.54917985", "0.5487973", "0.54676956", "0.5441852", "0.54316646", "0.53894866", "0.5388279", "0.5386614", "0.53768814", "0.5336545", "0.5336545", "0.53253865", "0.5308953", "0.53087354", "0.5297162", "0.52631325", "0.5234223", "0.5217252", "0.5207104", "0.51983625", "0.51973087", "0.51956636", "0.5190696", "0.51868784", "0.51758134", "0.51655626", "0.51621944", "0.5157755", "0.51518804", "0.51403064", "0.51402044", "0.51264167", "0.5105698", "0.5095067", "0.5089158", "0.5088335", "0.50751024", "0.50706905", "0.5067999", "0.5066479", "0.5060906", "0.5054517", "0.50476557", "0.5046296", "0.50459254", "0.50427425", "0.5037687", "0.50349", "0.5034541", "0.50281835", "0.50277734", "0.5016327", "0.4999445", "0.4989131", "0.49864233", "0.49821827", "0.49696958", "0.4969426", "0.49671155", "0.49647593", "0.4953919", "0.4948866", "0.4944923", "0.49433285", "0.49393132", "0.4938477", "0.49339712", "0.49235892", "0.49205658", "0.49058107", "0.4905679", "0.49027455", "0.49006298", "0.48905146", "0.48815674", "0.48807618", "0.48806185", "0.48733884", "0.48666677", "0.48622268", "0.4848336", "0.48470607", "0.4842906", "0.48386276", "0.48315257" ]
0.71522367
0
NewGitHubClient creates and initializes a new GitHubClient
func NewGitHubClient(owner, repo, token string) (GitHub, error) { var client *github.Client if token != "" { ts := oauth2.StaticTokenSource(&oauth2.Token{ AccessToken: token, }) tc := oauth2.NewClient(context.TODO(), ts) client = github.NewClient(tc) } else { client = github.NewClient(nil) } return &Client{ Owner: owner, Repo: repo, Client: client, }, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewGitHubClient(httpClient *http.Client) GitHubClient {\n\tclient := github.NewClient(httpClient)\n\n\treturn GitHubClient{\n\t\tRepositories: client.Repositories,\n\t}\n}", "func newTestGitHubClient() *Client {\n\tgclient := github.NewClient(nil)\n\tclient := Client{\n\t\tclient: gclient,\n\t}\n\treturn &client\n}", "func NewClient(config Config) ClientInterface {\n\tcontext := ctx.Background()\n\tif config.GitHubToken == \"\" {\n\t\treturn Client{\n\t\t\tClient: github.NewClient(nil),\n\t\t\tContext: context,\n\t\t\tConfig: config,\n\t\t}\n\t}\n\toauth2Client := oauth2.NewClient(context, oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: config.GitHubToken},\n\t))\n\treturn Client{\n\t\tClient: github.NewClient(oauth2Client),\n\t\tContext: context,\n\t\tConfig: config,\n\t}\n}", "func NewClient(ctx context.Context, cfg *config.Config) *github.Client {\n\t// OAuth\n\ttokenSrc := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: cfg.GitHub.AccessToken},\n\t)\n\ttokenClient := oauth2.NewClient(ctx, tokenSrc)\n\n\t// GH\n\tclient := github.NewClient(tokenClient)\n\treturn client\n}", "func newClient(ctx context.Context, cfg v1.Github) *client {\n\tgithubToken := os.Getenv(cfg.AccessTokenEnvVar)\n\t// Setup the token for github authentication\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: githubToken},\n\t)\n\ttc := oauth2.NewClient(context.Background(), ts)\n\n\t// Return a client instance from github\n\tc := github.NewClient(tc)\n\treturn &client{\n\t\tctx: ctx,\n\t\tClient: c,\n\t\towner: cfg.Owner,\n\t\trepo: cfg.Repo,\n\t\tbotName: cfg.BotName,\n\t}\n}", "func newClient(conf Config) (*github.Client, error) {\n\tctx := context.Background()\n\n\tvar ts oauth2.TokenSource\n\tswitch {\n\tcase conf.HasAPIToken():\n\t\tts = oauth2.StaticTokenSource(\n\t\t\t&oauth2.Token{AccessToken: conf.GetAPIToken()},\n\t\t)\n\tdefault:\n\t\treturn nil, errors.New(\"Cannot find GitHub credentials\")\n\t}\n\n\ttc := oauth2.NewClient(ctx, ts)\n\treturn github.NewClient(tc), nil\n}", "func New() *github.Client {\n\tctx := context.Background()\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: GithubAccessToken},\n\t)\n\n\ttc := oauth2.NewClient(ctx, ts)\n\tclient := github.NewClient(tc)\n\n\treturn client\n}", "func NewClient(ctx context.Context, auth *repository.Auth) (repository.Client, error) {\n\tif auth == nil {\n\t\treturn nil, fmt.Errorf(\"Must provide authentication\")\n\t}\n\tif auth.Type() != repository.TokenAuthType {\n\t\treturn nil, fmt.Errorf(\"Unsupported auth type: %s\", auth.Type())\n\t}\n\n\tsts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: auth.Token},\n\t)\n\thttpClient := oauth2.NewClient(ctx, sts)\n\trtw := newRoundTripperWrapper(httpClient.Transport)\n\thttpClient.Transport = rtw\n\treturn &client{\n\t\tghClient: githubv4.NewClient(httpClient),\n\t}, nil\n}", "func NewClient(githubToken string) *GithubClient {\n\tctx := context.Background()\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: githubToken},\n\t)\n\ttc := oauth2.NewClient(ctx, ts)\n\n\treturn &GithubClient{github.NewClient(tc), githubToken}\n}", "func newClient(token string) *github.Client {\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: token},\n\t)\n\ttc := oauth2.NewClient(context.Background(), ts)\n\n\treturn github.NewClient(tc)\n}", "func New(inConf *config.Config) Client {\n\turl := OrgRepoUrl\n\n\tspecifiedUrl := os.Getenv(\"GHCLI_GITHUB_URL\")\n\tif specifiedUrl != \"\" {\n\t\turl = specifiedUrl\n\t}\n\n\treturn Client{\n\t\tconf: inConf,\n\t\tgithubUrl: url,\n\t\tclient: &http.Client{},\n\t}\n}", "func NewClient() *Client {\n\treturn &Client{\n\t\tClient: github.NewClient(nil),\n\t}\n}", "func newGithubClient(token string) *github.Client {\n\tsrc := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: token},\n\t)\n\thttpClient := oauth2.NewClient(context.Background(), src)\n\treturn github.NewClient(httpClient)\n}", "func New(logger *logrus.Entry, accessToken string) (Github, error) {\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: accessToken},\n\t)\n\n\t// Base http client with authentication\n\thttpClient := oauth2.NewClient(context.Background(), ts)\n\thttpClient.Timeout = Timeout\n\n\tc := github.NewClient(httpClient)\n\n\treturn &baseGithub{\n\t\tlogger: logger,\n\t\thttp: httpClient,\n\t\tc: c,\n\t\tdownloadClient: &http.Client{\n\t\t\tTimeout: Timeout,\n\t\t},\n\t}, nil\n}", "func NewClient(token string, dryRun bool) *Client {\n\treturn NewClientWithEndpoint(\"https://api.github.com\", token, dryRun)\n}", "func NewClient(owner, repo, authToken string) *Client {\n\tctx := context.Background()\n\ttokenSource := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: authToken},\n\t)\n\tclient := &Client{\n\t\tGithubClient: github.NewClient(oauth2.NewClient(ctx, tokenSource)),\n\t\towner: owner,\n\t\trepo: repo,\n\t}\n\treturn client\n}", "func NewGitHub() *GitHub {\n\treturn &GitHub{}\n}", "func NewClient(cfg Config) *Server {\r\n\tctx := context.Background()\r\n\tts := oauth2.StaticTokenSource(\r\n\t\t&oauth2.Token{AccessToken: cfg.OAuth2Token},\r\n\t)\r\n\ttc := oauth2.NewClient(ctx, ts)\r\n\tclient := github.NewClient(tc)\r\n\treturn &Server{\r\n\t\tRepoService: client.Repositories,\r\n\t\tLogger: zap.NewExample().Sugar(),\r\n\t\tConfig: cfg,\r\n\t}\r\n}", "func NewClient(token string) *Client {\n\treturn &Client{\n\t\tclient: &http.Client{},\n\t\ttoken: token,\n\t\tbase: githubBase,\n\t\tdry: false,\n\t}\n}", "func GitHub(ctx *context.Context) *github.Client {\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: ctx.Token},\n\t)\n\treturn github.NewClient(oauth2.NewClient(ctx, ts))\n}", "func NewClient(accessToken string) *Client {\n\t// TODO set friendlier defaults for eg, read timeout\n\tvar c = http.DefaultClient\n\n\tif accessToken != \"\" {\n\t\tc = createOAuthClient(accessToken)\n\t}\n\n\t// TODO support persistent http caching via \"httpcache\"\n\n\treturn castClient(github.NewClient(c))\n}", "func NewGithubClient(client *http.Client, repoMock GithubGitService) GithubClient {\n\tif repoMock != nil {\n\t\treturn GithubClient{\n\t\t\tRepositories: repoMock,\n\t\t}\n\t}\n\n\tcli := github.NewClient(client)\n\treturn GithubClient{\n\t\tRepositories: cli.Repositories,\n\t}\n}", "func NewGithubClient(token, owner, repo, filePath, name, email string) *GithubClient {\n\tgc := &GithubClient{}\n\tgc.repo.Path = filePath\n\tgc.repo.Owner = owner\n\tgc.repo.Token = token\n\tgc.repo.Repository = repo\n\tgc.repo.Name = name\n\tgc.repo.Email = email\n\n\treturn gc\n}", "func provideGithubClient(config config.Config) *scm.Client {\n\tclient, err := github.New(config.Github.APIServer)\n\tif err != nil {\n\t\tlogrus.WithError(err).\n\t\t\tFatalln(\"main: cannot create the GitHub client\")\n\t}\n\tif config.Github.Debug {\n\t\tclient.DumpResponse = httputil.DumpResponse\n\t}\n\tclient.Client = &http.Client{\n\t\tTransport: &oauth2.Transport{\n\t\t\tSource: oauth2.ContextTokenSource(),\n\t\t\tBase: defaultTransport(config.Github.SkipVerify),\n\t\t},\n\t}\n\treturn client\n}", "func (s *server) newRepoClient(ctx context.Context, storageName string) (gitalypb.RepositoryServiceClient, error) {\n\tgitalyServerInfo, err := storage.ExtractGitalyServer(ctx, storageName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconn, err := s.conns.Dial(ctx, gitalyServerInfo.Address, gitalyServerInfo.Token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn gitalypb.NewRepositoryServiceClient(conn), nil\n}", "func (o *ClientConfig) NewClient(options ...ClientOption) (Client, error) {\n\n\t// Run provided ClientOption configuration options.\n\tfor _, opt := range options {\n\t\terr := opt(o)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed applying functional option: %w\", err)\n\t\t}\n\t}\n\n\t// Check mandatory option is provided.\n\tif o.githubUserClient == nil {\n\t\treturn nil, fmt.Errorf(\"github client not provided\")\n\t}\n\n\ttokenGenerator := secret.GetTokenGenerator(o.tokenPath)\n\n\tgitFactory, err := o.GitClient(o.githubUserClient, tokenGenerator, secret.Censor, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgitClient := &client{}\n\t// Initialize map to enable writing to it in methods.\n\tgitClient.clonedRepos = make(map[string]string)\n\tgitClient.ClientFactory = gitFactory\n\treturn gitClient, err\n}", "func NewGitHubKeys(opts GitHubKeysOptions) (*GitHubKeys, error) {\n\tvar repo GitHubKeys\n\n\t// using a token requires use of a transport.\n\t// we create one using oauth2.NewClient().\n\tvar oauthTransport http.RoundTripper\n\tif opts.Token != \"\" {\n\t\toauthTransport = oauth2.NewClient(\n\t\t\tcontext.Background(),\n\t\t\toauth2.StaticTokenSource(\n\t\t\t\t&oauth2.Token{AccessToken: opts.Token},\n\t\t\t),\n\t\t).Transport\n\t}\n\n\t// create a new (cached) transport\n\t// based on the client above\n\ttransport := &httpcache.Transport{\n\t\tTransport: oauthTransport,\n\t\tCache: lrucache.New(\n\t\t\topts.MaxCacheSize,\n\t\t\tint64(opts.MaxCacheAge.Seconds()),\n\t\t),\n\t\tMarkCachedResponses: true,\n\t}\n\n\t// finally make an http client with that cache\n\t// and the timeout above.\n\tclient := &http.Client{\n\t\tTransport: transport,\n\t\tTimeout: opts.Timeout,\n\t}\n\n\t// initialize the client\n\trepo.Client = github.NewClient(client)\n\tif repo.Client == nil {\n\t\treturn nil, errClientReturnedNil\n\t}\n\n\treturn &repo, nil\n}", "func NewClient(\n\tctx context.Context,\n\tappID int64,\n\tinstallationID int64,\n\tkeyPEM []byte,\n) (*github.Client, error) {\n\tinstallationToken, err := getInstallationToken(\n\t\tctx,\n\t\tappID,\n\t\tinstallationID,\n\t\tkeyPEM,\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to negotiate an installation token: %s\", err)\n\t}\n\treturn github.NewClient(\n\t\toauth2.NewClient(\n\t\t\tctx,\n\t\t\toauth2.StaticTokenSource(\n\t\t\t\t&oauth2.Token{\n\t\t\t\t\tTokenType: \"token\", // This type indicates an installation token\n\t\t\t\t\tAccessToken: installationToken,\n\t\t\t\t},\n\t\t\t),\n\t\t),\n\t), nil\n}", "func newClient(cfg Config) *Client {\n\treturn &Client{bitclient: bitclient.NewBitClient(cfg.Bitbucket.Host, cfg.Bitbucket.User, cfg.Bitbucket.Password)}\n}", "func NewGithubClient() *GithubClient {\n\tctx := context.Background()\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: accessToken()},\n\t)\n\ttc := oauth2.NewClient(ctx, ts)\n\n\tclient := github.NewClient(tc)\n\treturn &GithubClient{\n\t\tctx: ctx,\n\t\tclient: client,\n\t}\n}", "func New(ctx context.Context, provider config.Provider, configClient config.Client, options ...Option) (Client, error) {\n\treturn newRepositoryClient(ctx, provider, configClient, options...)\n}", "func newGitlabClient(server, username, token string) (*gitlab.Client, error) {\n\tvar client *gitlab.Client\n\n\tif len(username) == 0 {\n\t\tclient = gitlab.NewClient(nil, token)\n\t} else {\n\t\tclient = gitlab.NewOAuthClient(nil, token)\n\t}\n\n\tif err := client.SetBaseURL(server + \"/api/v3/\"); err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn nil, err\n\t}\n\n\treturn client, nil\n}", "func NewGitHub(opts GitHubOpts) *GitHub {\n\treturn &GitHub{\n\t\tclient: github.NewClient(nil),\n\t\torg: opts.Org,\n\t\trepo: opts.Repo,\n\t\tbinary: opts.Binary,\n\t}\n}", "func New(githubServer string,\n\tclientID string,\n\tclientSecret string,\n\tcookieSecret string,\n\trequiredOrg string,\n\tloginURL string) Auth {\n\n\tfatalError := false\n\tif clientID == \"\" {\n\t\tlogger.Error(\"Authentication ClientId missing\")\n\t\tfatalError = true\n\t}\n\tif clientSecret == \"\" {\n\t\tlogger.Error(\"Authentication ClientSecret missing\")\n\t\tfatalError = true\n\t}\n\tif len(cookieSecret) != 32 {\n\t\tlogger.WithField(\"cookie_secret\", cookieSecret).\n\t\t\tError(\"Missing or broken cookie secret, must be length 32\")\n\t\tfatalError = true\n\t}\n\tif fatalError {\n\t\tlogger.Fatal(\"Malformed auth input, exiting\")\n\t}\n\n\treturn &GithubAuth{\n\t\tRequiredOrg: requiredOrg,\n\t\tLoginURL: loginURL,\n\t\tCookieStore: sessions.NewCookieStore([]byte(cookieSecret)),\n\t\tGithubServer: githubServer,\n\t\tLoginTTL: 7 * 24 * time.Hour, // 1 week\n\t\tOauthConfig: &oauth2.Config{\n\t\t\tClientID: clientID,\n\t\t\tClientSecret: clientSecret,\n\t\t\tScopes: []string{\"read:org\"},\n\n\t\t\tEndpoint: oauth2.Endpoint{\n\t\t\t\tAuthURL: githubServer + \"/login/oauth/authorize\",\n\t\t\t\tTokenURL: githubServer + \"/login/oauth/access_token\",\n\t\t\t},\n\t\t},\n\t}\n}", "func NewGithub(config *models.Config) services.Service {\n\treturn &Github{NewFetcher(config, utils.NewHTTPJsonFetcher(config.HTTPRequestTimeout))}\n}", "func New(cfg *config.Config, githubOAuthToken GitHubOAuthToken) (*GitHubService, error) {\n\tif githubOAuthToken == \"\" {\n\t\treturn nil, errors.New(\"GitHub OAuth Token (passed as -t) is needed\")\n\t}\n\n\tctx := context.Background()\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: string(githubOAuthToken)},\n\t)\n\ttc := oauth2.NewClient(ctx, ts)\n\tclient := github.NewClient(tc)\n\treturn &GitHubService{\n\t\tclient: client,\n\t\torgName: cfg.Orgname,\n\t}, nil\n}", "func CreateClient(token, toURL, repoType string, tlsVerify bool) *scm.Client {\n\tvar client *scm.Client\n\tu, _ := url.Parse(toURL)\n\tif repoType == \"gitlab\" {\n\t\tclient = gitlab.NewDefault()\n\t} else if repoType == \"ghe\" {\n\t\tclient, _ = github.New(u.Scheme + \"://\" + u.Host + \"/api/v3\")\n\t} else {\n\t\tclient = github.NewDefault()\n\t}\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: token},\n\t)\n\tif tlsVerify {\n\t\tclient.Client = oauth2.NewClient(context.Background(), ts)\n\t} else {\n\t\tclient.Client = &http.Client{\n\t\t\tTransport: &oauth2.Transport{\n\t\t\t\tSource: ts,\n\t\t\t\tBase: &http.Transport{\n\t\t\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\tif repoType == \"gitlab\" {\n\t\tu, _ := url.Parse(toURL)\n\t\tclient.BaseURL.Host = u.Host\n\t}\n\treturn client\n}", "func NewGithub(token string) (*Github, error) {\n\tclient := http.DefaultClient\n\treturn &Github{\n\t\ttoken: token,\n\t\tclient: client,\n\t}, nil\n}", "func New(\n\tconfig Config,\n) (*Github, error) {\n\tctx := context.Background()\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: config.Token},\n\t)\n\ttc := oauth2.NewClient(ctx, ts)\n\ttc.Transport = config.TransportMiddleware(tc.Transport)\n\n\tvar client *github.Client\n\tif config.BaseURL != \"\" {\n\t\tvar err error\n\t\tclient, err = github.NewEnterpriseClient(config.BaseURL, \"\", tc)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tclient = github.NewClient(tc)\n\t}\n\n\treturn &Github{\n\t\tRepositoryListing: config.RepoListing,\n\t\tMergeTypes: config.MergeTypes,\n\t\ttoken: config.Token,\n\t\tbaseURL: config.BaseURL,\n\t\tFork: config.ForkMode,\n\t\tForkOwner: config.ForkOwner,\n\t\tSSHAuth: config.SSHAuth,\n\t\tghClient: client,\n\t\tReadOnly: config.ReadOnly,\n\t\tcheckPermissions: config.CheckPermissions,\n\t\thttpClient: &http.Client{\n\t\t\tTransport: config.TransportMiddleware(http.DefaultTransport),\n\t\t},\n\t}, nil\n}", "func New(accessToken string) *GithubProvider {\n\tghp := GithubProvider{}\n\tghp.ctx = context.Background()\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: accessToken},\n\t)\n\ttc := oauth2.NewClient(ghp.ctx, ts)\n\tghp.client = github.NewClient(tc)\n\treturn &ghp\n}", "func NewGithubClient(githubAccount string, dryrun bool) (*GithubClient, error) {\n\tghc, err := ghutil.NewGithubClient(githubAccount)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tuser, err := ghc.GetGithubUser()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &GithubClient{ghc, *user.Login, dryrun}, nil\n}", "func New(repoURLPath string) (repo.Importer, error) {\n\t// Parse URL\n\t// Examples:\n\t// - https://github.com/[nikoksr]/[proji] -> extracts user and repo name; no branch name\n\t// - https://github.com/[nikoksr]/[proji]/tree/[master] -> extracts user, repo and branch name\n\tr := regexp.MustCompile(`github.com/(?P<User>[^/]+)/(?P<Repo>[^/]+)(/tree/(?P<Branch>[^/]+))?`)\n\tspecs := r.FindStringSubmatch(repoURLPath)\n\n\tif specs == nil || len(specs) < 5 {\n\t\treturn nil, fmt.Errorf(\"could not parse url path\")\n\t}\n\n\tuserName := specs[1]\n\trepoName := specs[2]\n\tbranchName := specs[4]\n\n\tif userName == \"\" || repoName == \"\" {\n\t\treturn nil, fmt.Errorf(\"could not extract user and/or repository name. Please check the URL\")\n\t}\n\n\t// Default to master if no branch was defined\n\tif branchName == \"\" {\n\t\tbranchName = \"master\"\n\t}\n\n\tg := &github{apiBaseURI: \"https://api.github.com/repos/\", userName: userName, repoName: repoName, branchName: branchName, repoSHA: \"\"}\n\treturn g, g.setRepoSHA()\n}", "func NewClient(proto, registry, version, username, password string) (*Client, error) {\n\tif registry == \"\" || version == \"\" || proto == \"\" || registry == \"index.docker.io\" {\n\t\tglog.V(4).Infof(\"create a docker hub client, registry:%s\\n\", registry)\n\t\treturn &Client{\n\t\t\tisHub: true,\n\t\t\tproto: \"https\",\n\t\t\tregistry: \"index.docker.io\",\n\t\t\tversion: \"v2\",\n\t\t\tHubClient: &dockerhub.DockerHubClient{},\n\t\t}, nil\n\t}\n\tswitch version {\n\tcase \"v1\":\n\t\tsrcClient, err := registryV1.NewClient(proto, registry)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tglog.V(4).Infof(\"create a docker registry v1 client, registry:%s\\n\", registry)\n\t\treturn &Client{\n\t\t\tisHub: false,\n\t\t\tproto: proto,\n\t\t\tregistry: registry,\n\t\t\tversion: version,\n\t\t\tRegClient: srcClient,\n\t\t}, nil\n\tcase \"v2\":\n\t\tsrcClient, err := registryV2.New(fmt.Sprintf(\"%s://%s/\", proto, registry), username, password)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tglog.V(4).Infof(\"create a docker registry v2 client, registry:%s\\n\", registry)\n\t\treturn &Client{\n\t\t\tisHub: false,\n\t\t\tproto: proto,\n\t\t\tregistry: registry,\n\t\t\tversion: version,\n\t\t\tRegClientV2: srcClient,\n\t\t}, nil\n\t}\n\n\treturn nil, errors.New(\"invalid client config\")\n}", "func client(accessToken string) *github.Client {\n\tif c == nil {\n\t\t// log.Println(\"create auth client\")\n\t\tctx := context.Background()\n\t\tts := oauth2.StaticTokenSource(\n\t\t\t&oauth2.Token{AccessToken: accessToken},\n\t\t)\n\t\ttc := oauth2.NewClient(ctx, ts)\n\t\tc = github.NewClient(tc)\n\t}\n\treturn c\n}", "func New(uri, repository string) *Client {\n\treturn &Client{uri, repository}\n}", "func NewGitClient(t mockConstructorTestingTNewGitClient) *GitClient {\n\tmock := &GitClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func CreateClient(token string) *Client {\n\tvar c Client\n\tc.personalAccessToken = extractTokenStringFromJSONToken(token)\n\n\ttokenSource := &TokenSource{\n\t\tAccessToken: c.personalAccessToken,\n\t}\n\n\tc.oauthClient = oauth2.NewClient(context.WithValue(context.Background(), oauth2.HTTPClient, &http.Client{}), tokenSource)\n\tc.GithubClient = github.NewClient(c.oauthClient)\n\t//fmt.Println(\"Client:\", c)\n\treturn &c\n}", "func New(ctx context.Context, token string) *GitHub {\n\treturn newWithContext(ctx, token)\n}", "func getGithubClient(context context.Context, githubAccessToken string) github.Client {\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: githubAccessToken},\n\t)\n\ttc := oauth2.NewClient(context, ts)\n\n\tclient := github.NewClient(tc)\n\treturn *client\n}", "func getClient() (*github.Client, error) {\n\tintegrationID, err := strconv.Atoi(os.Getenv(\"GITHUB_APP_IDENTIFIER\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinstallationID, err := strconv.Atoi(os.Getenv(\"GITHUB_INSTALLATION_IDENTIFIER\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tprivateKeyFile := os.Getenv(\"GITHUB_PRIVATE_KEY_FILE\")\n\titr, err := ghinstallation.NewKeyFromFile(http.DefaultTransport, integrationID, installationID, privateKeyFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Use installation transport with client.\n\treturn github.NewClient(&http.Client{Transport: itr}), nil\n}", "func NewGoogleGithubClient(httpClient *http.Client, repoMock GoogleGithubRepoService) GoogleGithubClient {\n\tif repoMock != nil {\n\t\treturn GoogleGithubClient{\n\t\t\tRepositories: repoMock,\n\t\t}\n\t}\n\tgithubClient := ggithub.NewClient(httpClient)\n\n\treturn GoogleGithubClient{\n\t\tRepositories: githubClient.Repositories,\n\t}\n}", "func NewGitHubOAuth(log logrus.FieldLogger, umClient *umClient.Client, state, ghClientID, ghClientSecret string) *GitHubOAuth {\n\tconf := &oauth2.Config{\n\t\tClientID: ghClientID,\n\t\tClientSecret: ghClientSecret,\n\t\tEndpoint: ghOAuth.Endpoint,\n\t}\n\n\treturn &GitHubOAuth{\n\t\tstate: state,\n\t\toAuthConf: conf,\n\t\tlog: log,\n\t\tusermanClient: umClient,\n\t}\n}", "func MockGitHubClientForRepositoryCommits(githubCommitSHA string, commitTimestamp time.Time) client.GetGitHubClientFunc {\n\tmockedHTTPClient := MockGithubRepositoryCommit(\n\t\tNewMockedGithubCommit(githubCommitSHA, commitTimestamp),\n\t)\n\tmockedGitHubClient := github.NewClient(mockedHTTPClient)\n\treturn func(string) *github.Client {\n\t\treturn mockedGitHubClient\n\t}\n}", "func init() {\n\turls = &pluginWebURL{\n\t\tbase: \"https://api.github.com\",\n\t\tauthURL: \"/authorizations\",\n\t\tassigneeURL: \"/repos/%s/%s/issues/%d/assignees\",\n\t\tissueURL: \"/repos/%s/%s/issues\",\n\t\tlabelURL: \"/repos/%s/%s/issues/%d/labels\",\n\t\trepo: \"/repos/%s/%s\",\n\t\tuserRepo: \"/user/repos\",\n\t}\n\n\tinfo, _ := githandler.Remote()\n\torg := info.Organisation\n\trepo := info.Repository\n\ttoken := githandler.ConfigGet(\"token\", \"phlow\")\n\n\tGitHub = &GitHubImpl{\n\t\turls,\n\t\trepo,\n\t\torg,\n\t\ttoken,\n\t}\n}", "func newClient(project string) (*client, error) {\n\tctx := context.Background()\n\tcl, err := pubsub.NewClient(ctx, project)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &client{\n\t\tclient: cl,\n\t}, nil\n}", "func NewUserClient(ctx context.Context, githubAuthToken string) *github.Client {\n\ttokenClient := oauth2.NewClient(ctx, oauth2.StaticTokenSource(&oauth2.Token{\n\t\tAccessToken: githubAuthToken,\n\t}))\n\n\treturn github.NewClient(tokenClient)\n}", "func NewClient(c client.Client, baseURL, publicKey, privateKey string) *Client {\n\treturn &Client{\n\t\tClient: c,\n\t\thash: hmac.New(sha256.New, []byte(privateKey)),\n\t\tpublicKey: publicKey,\n\t\tbaseURL: baseURL,\n\t}\n}", "func (o *GitHubOptions) GitHubClient(dryRun bool) (github.Client, error) {\n\treturn o.GitHubClientWithLogFields(dryRun, logrus.Fields{})\n}", "func NewGitHubFetcher(token string) *GitHubFetcher {\n\tts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})\n\ttc := oauth2.NewClient(oauth2.NoContext, ts)\n\tgf := &GitHubFetcher{\n\t\tc: github.NewClient(tc),\n\t}\n\treturn gf\n}", "func NewClient(username, oauth string) *Client {\n\treturn &Client{\n\t\tircUser: username,\n\t\tircToken: oauth,\n\t\tIrcAddress: ircTwitch,\n\t}\n}", "func NewClient(accessToken string) *Client {\n\treturn &Client{\n\t\ttoken: accessToken,\n\t\tunmarshal: json.Unmarshal,\n\t\tmarshal: json.Marshal,\n\t\tclient: &http.Client{},\n\t\tdebug: utils.GetEnv(envDebug, \"false\") == \"true\",\n\t\tlog: log.New(os.Stderr, \"slackapi\", log.LstdFlags|log.Lshortfile),\n\t}\n}", "func NewClient(authToken string) Client {\n\treturn Client{AuthToken: authToken, BaseURL: defaultBaseURL}\n}", "func (j *DSGitHub) Validate(ctx *Ctx) (err error) {\n\tj.Org = strings.TrimSpace(j.Org)\n\tif j.Org == \"\" {\n\t\terr = fmt.Errorf(\"github org must be set\")\n\t\treturn\n\t}\n\tj.Repo = strings.TrimSpace(j.Repo)\n\tif strings.HasSuffix(j.Repo, \".git\") {\n\t\tlRepo := len(j.Repo)\n\t\tj.Repo = j.Repo[:lRepo-4]\n\t}\n\tif j.Repo == \"\" {\n\t\terr = fmt.Errorf(\"github repo must be set\")\n\t\treturn\n\t}\n\tj.Category = strings.TrimSpace(j.Category)\n\tif j.Category == \"\" {\n\t\terr = fmt.Errorf(\"github category must be set\")\n\t\treturn\n\t}\n\tj.URL = \"https://github.com/\" + j.Org + \"/\" + j.Repo\n\tdefer func() {\n\t\tPrintf(\"configured %d GitHub OAuth clients\\n\", len(j.Clients))\n\t}()\n\tj.Tokens = strings.TrimSpace(j.Tokens)\n\t// Get GitHub OAuth from env or from file\n\toAuth := j.Tokens\n\tif strings.Contains(oAuth, \"/\") {\n\t\tbytes, err := ioutil.ReadFile(oAuth)\n\t\tFatalOnError(err)\n\t\toAuth = strings.TrimSpace(string(bytes))\n\t}\n\t// GitHub authentication or use public access\n\tj.Context = context.Background()\n\tif oAuth == \"\" {\n\t\tclient := github.NewClient(nil)\n\t\tj.Clients = append(j.Clients, client)\n\t} else {\n\t\toAuths := strings.Split(oAuth, \",\")\n\t\tfor _, auth := range oAuths {\n\t\t\tj.OAuthKeys = append(j.OAuthKeys, auth)\n\t\t\tts := oauth2.StaticTokenSource(\n\t\t\t\t&oauth2.Token{AccessToken: auth},\n\t\t\t)\n\t\t\ttc := oauth2.NewClient(j.Context, ts)\n\t\t\tclient := github.NewClient(tc)\n\t\t\tj.Clients = append(j.Clients, client)\n\t\t}\n\t}\n\tif CacheGitHubRepo {\n\t\tj.GitHubRepo = make(map[string]map[string]interface{})\n\t}\n\tif CacheGitHubIssues {\n\t\tj.GitHubIssues = make(map[string][]map[string]interface{})\n\t}\n\tif CacheGitHubUser {\n\t\tj.GitHubUser = make(map[string]map[string]interface{})\n\t}\n\tif CacheGitHubIssueComments {\n\t\tj.GitHubIssueComments = make(map[string][]map[string]interface{})\n\t}\n\tif CacheGitHubCommentReactions {\n\t\tj.GitHubCommentReactions = make(map[string][]map[string]interface{})\n\t}\n\tif CacheGitHubIssueReactions {\n\t\tj.GitHubIssueReactions = make(map[string][]map[string]interface{})\n\t}\n\tif CacheGitHubPull {\n\t\tj.GitHubPull = make(map[string]map[string]interface{})\n\t}\n\tif CacheGitHubPulls {\n\t\tj.GitHubPulls = make(map[string][]map[string]interface{})\n\t}\n\tif CacheGitHubPullReviews {\n\t\tj.GitHubPullReviews = make(map[string][]map[string]interface{})\n\t}\n\tif CacheGitHubPullReviewComments {\n\t\tj.GitHubPullReviewComments = make(map[string][]map[string]interface{})\n\t}\n\tif CacheGitHubReviewCommentReactions {\n\t\tj.GitHubReviewCommentReactions = make(map[string][]map[string]interface{})\n\t}\n\tif CacheGitHubPullRequestedReviewers {\n\t\tj.GitHubPullRequestedReviewers = make(map[string][]map[string]interface{})\n\t}\n\tif CacheGitHubPullCommits {\n\t\tj.GitHubPullCommits = make(map[string][]map[string]interface{})\n\t}\n\tif CacheGitHubUserOrgs {\n\t\tj.GitHubUserOrgs = make(map[string][]map[string]interface{})\n\t}\n\t// Multithreading\n\tj.ThrN = GetThreadsNum(ctx)\n\tif j.ThrN > 1 {\n\t\tj.GitHubMtx = &sync.RWMutex{}\n\t\tj.GitHubRateMtx = &sync.RWMutex{}\n\t\tif CacheGitHubRepo {\n\t\t\tj.GitHubRepoMtx = &sync.RWMutex{}\n\t\t}\n\t\tif CacheGitHubIssues {\n\t\t\tj.GitHubIssuesMtx = &sync.RWMutex{}\n\t\t}\n\t\tif CacheGitHubUser {\n\t\t\tj.GitHubUserMtx = &sync.RWMutex{}\n\t\t}\n\t\tif CacheGitHubIssueComments {\n\t\t\tj.GitHubIssueCommentsMtx = &sync.RWMutex{}\n\t\t}\n\t\tif CacheGitHubCommentReactions {\n\t\t\tj.GitHubCommentReactionsMtx = &sync.RWMutex{}\n\t\t}\n\t\tif CacheGitHubIssueReactions {\n\t\t\tj.GitHubIssueReactionsMtx = &sync.RWMutex{}\n\t\t}\n\t\tif CacheGitHubPull {\n\t\t\tj.GitHubPullMtx = &sync.RWMutex{}\n\t\t}\n\t\tif CacheGitHubPulls {\n\t\t\tj.GitHubPullsMtx = &sync.RWMutex{}\n\t\t}\n\t\tif CacheGitHubPullReviews {\n\t\t\tj.GitHubPullReviewsMtx = &sync.RWMutex{}\n\t\t}\n\t\tif CacheGitHubPullReviewComments {\n\t\t\tj.GitHubPullReviewCommentsMtx = &sync.RWMutex{}\n\t\t}\n\t\tif CacheGitHubReviewCommentReactions {\n\t\t\tj.GitHubReviewCommentReactionsMtx = &sync.RWMutex{}\n\t\t}\n\t\tif CacheGitHubPullRequestedReviewers {\n\t\t\tj.GitHubPullRequestedReviewersMtx = &sync.RWMutex{}\n\t\t}\n\t\tif CacheGitHubPullCommits {\n\t\t\tj.GitHubPullCommitsMtx = &sync.RWMutex{}\n\t\t}\n\t\tif CacheGitHubUserOrgs {\n\t\t\tj.GitHubUserOrgsMtx = &sync.RWMutex{}\n\t\t}\n\t}\n\tj.Hint, _ = j.handleRate(ctx)\n\tj.CacheDir = os.Getenv(\"HOME\") + \"/.perceval/github-users-cache/\"\n\t_ = os.MkdirAll(j.CacheDir, 0777)\n\treturn\n}", "func NewClient(token string, baseURL string) *Client {\n\tif baseURL == \"\" {\n\t\tbaseURL = apiURL\n\t}\n\treturn &Client{\n\t\tbaseURL: baseURL,\n\t\ttoken: token,\n\t}\n}", "func NewClient(api string, baseURI string, org string) *Client {\n\n\tif !strings.HasSuffix(baseURI, \"/\") {\n\t\tbaseURI += \"/\"\n\t}\n\n\treturn &Client{\n\t\tclient: &http.Client{},\n\t\tsentryAPIKey: api,\n\t\tsentryURI: baseURI,\n\t\tsentryOrg: org,\n\t}\n}", "func NewClient(cfg watson.Config) (Client, error) {\n\tci := Client{version: \"/\" + defaultMajorVersion}\n\tif len(cfg.Credentials.ServiceName) == 0 {\n\t\tcfg.Credentials.ServiceName = \"retrieve_and_rank\"\n\t}\n\tif len(cfg.Credentials.Url) == 0 {\n\t\tcfg.Credentials.Url = defaultUrl\n\t}\n\tclient, err := watson.NewClient(cfg.Credentials)\n\tif err != nil {\n\t\treturn Client{}, err\n\t}\n\tci.watsonClient = client\n\treturn ci, nil\n}", "func NewClient() *Client {\n baseURL, _ := url.Parse(defaultBaseURL)\n return &Client{client: http.DefaultClient, BaseURL: baseURL, UserAgent: userAgent}\n}", "func NewClient() GitLaber {\n\treturn new(client)\n}", "func NewClient(authToken string) *Client {\n\treturn &Client{\n\t\tBaseURL: apiURLv1,\n\t\tHTTPClient: newTransport(),\n\t\tauthToken: authToken,\n\t\tidOrg: \"\",\n\t}\n}", "func NewClient(apiTokenID string, apiSecret string, logger *log.Logger) (*Client, error) {\n\tif len(apiTokenID) == 0 {\n\t\treturn nil, fmt.Errorf(\"apiTokenID is not set\")\n\t}\n\n\tif len(apiSecret) == 0 {\n\t\treturn nil, fmt.Errorf(\"apiSecret is not set\")\n\t}\n\n\tpublicUrl, err := url.ParseRequestURI(baseUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tprivateUrl, err := url.ParseRequestURI(privateBaseUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar discardLogger = log.New(ioutil.Discard, \"\", log.LstdFlags)\n\tif logger == nil {\n\t\tlogger = discardLogger\n\t}\n\n\tclient := &http.Client{Timeout: time.Duration(10) * time.Second}\n\treturn &Client{URL: publicUrl, PrivateURL: privateUrl, ApiTokenID: apiTokenID, ApiSecret: apiSecret, HTTPClient: client, Logger: logger}, nil\n\n}", "func newClient(token string) *Client {\n\treturn &Client{\n\t\tToken: token,\n\t}\n}", "func NewFakeClient() Client {\n\treturn &client{\n\t\tlogger: logrus.WithField(\"client\", \"github\"),\n\t\tgqlc: &graphQLGitHubAppsAuthClientWrapper{},\n\t\tdelegate: &delegate{\n\t\t\ttime: &standardTime{},\n\t\t\tfake: true,\n\t\t\tdry: true,\n\t\t},\n\t}\n}", "func oauth2Client(ctx context.Context) *github.Client {\n\tvar oauthClient *http.Client\n\tif token := getGithubToken(); token != \"\" {\n\t\tts := oauth2.StaticTokenSource(\n\t\t\t&oauth2.Token{AccessToken: getGithubToken()},\n\t\t)\n\t\toauthClient = oauth2.NewClient(ctx, ts)\n\t}\n\treturn github.NewClient(oauthClient)\n}", "func NewWithOptions(opts ...OptFn) (*Labeler, error) {\n\tl := Labeler{}\n\toptions := Opt{\n\t\ttoken: os.Getenv(\"GITHUB_TOKEN\"),\n\t\towner: os.Getenv(\"GITHUB_ACTOR\"),\n\t\trepo: os.Getenv(\"GITHUB_REPO\"),\n\t\tevent: os.Getenv(\"GITHUB_EVENT_NAME\"),\n\t\tid: -1,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(&options)\n\t}\n\n\t// validations\n\tif options.owner == \"\" && options.repo == \"\" {\n\t\treturn nil, errors.New(\"both a github and owner are required\")\n\t}\n\n\tif strings.Contains(options.repo, \"/\") {\n\t\treturn nil, errors.New(\"a repo must be just the repo name. Separate org/repo style into owner and repo options\")\n\t}\n\n\tif options.owner == \"\" {\n\t\treturn nil, errors.New(\"a github owner (user or org) is required\")\n\t}\n\n\tif options.repo == \"\" {\n\t\treturn nil, errors.New(\"a github repo is required\")\n\t}\n\n\tif options.id < 0 {\n\t\treturn nil, errors.New(\"the integer id of the issue or pull request is required\")\n\t}\n\n\tif options.ctx == nil {\n\t\toptions.ctx = context.Background()\n\t}\n\n\tif options.client == nil {\n\t\t// only validate the token when constructing this default client. Otherwise, assume the caller has property constructed a client\n\t\tif options.token == \"\" {\n\t\t\tisTest := false\n\t\t\t// hack: only apply this required token check if not in tests.\n\t\t\t// this isn't a concern if we construct with an empty token because the client will error at invocation\n\t\t\tfor _, arg := range os.Args {\n\t\t\t\tif strings.HasPrefix(arg, \"-test.v\") {\n\t\t\t\t\tisTest = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !isTest {\n\t\t\t\treturn nil, errors.New(\"github token (e.g. GITHUB_TOKEN environment variable) is required\")\n\t\t\t}\n\t\t}\n\n\t\toptions.client = github.NewClient(oauth2.NewClient(options.ctx, oauth2.StaticTokenSource(\n\t\t\t&oauth2.Token{AccessToken: options.token},\n\t\t)))\n\t}\n\n\t// assignment\n\tl.context = &options.ctx\n\tl.client = options.client\n\tl.Owner = &options.owner\n\tl.Repo = &options.repo\n\tl.Event = &options.event\n\tl.ID = &options.id\n\tif options.data != \"\" {\n\t\tl.Data = &options.data\n\t}\n\tl.configPath = options.configPath\n\n\treturn &l, nil\n}", "func NewClient(scheme, host, rootPath string) (*Client, error) {\n\tfi, err := NewFakeIdentity()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed identity gen: %v\", err)\n\t}\n\tclient := &Client{\n\t\tScheme: scheme,\n\t\tHost: host,\n\t\tRootPath: rootPath,\n\t\tIdentity: fi,\n\t}\n\n\tif UseJava {\n\t\tclient.Nutter = &JavaNutter{}\n\t} else if UseDotNet {\n\t\tclient.Nutter = &DotNetNutter{}\n\t} else {\n\t\t// default to self for the Nutter\n\t\tclient.Nutter = client\n\t}\n\treturn client, nil\n}", "func MockGithubClient(statusCode int, response string) ClientWrapper {\n\treturn Client(NewTestClient(MockRoundTripper{\n\t\tStatusCode: statusCode,\n\t\tResponse: response,\n\t}))\n}", "func NewClient(httpClient *http.Client, address, token string, logger Logger) *Client {\n\treturn &Client{httpClient, address, token, logger}\n}", "func NewGithub(context context.Context, repositoryServices IRepositoryServices) Github {\n\treturn Github{\n\t\trepositoryServices: repositoryServices,\n\t\tcontext: context,\n\t}\n}", "func NewClientFromOptions(fields logrus.Fields, options ClientOptions) (TokenGenerator, UserGenerator, Client, error) {\n\toptions = options.Default()\n\n\t// Will be nil if github app authentication is used\n\tif options.GetToken == nil {\n\t\toptions.GetToken = func() []byte { return nil }\n\t}\n\tif options.BaseRoundTripper == nil {\n\t\toptions.BaseRoundTripper = http.DefaultTransport\n\t}\n\n\thttpClient := &http.Client{\n\t\tTransport: options.BaseRoundTripper,\n\t\tTimeout: options.MaxRequestTime,\n\t}\n\tgraphQLTransport := newAddHeaderTransport(options.BaseRoundTripper)\n\tc := &client{\n\t\tlogger: logrus.WithFields(fields).WithField(\"client\", \"github\"),\n\t\tgqlc: &graphQLGitHubAppsAuthClientWrapper{Client: githubql.NewEnterpriseClient(\n\t\t\toptions.GraphqlEndpoint,\n\t\t\t&http.Client{\n\t\t\t\tTimeout: options.MaxRequestTime,\n\t\t\t\tTransport: &oauth2.Transport{\n\t\t\t\t\tSource: newReloadingTokenSource(options.GetToken),\n\t\t\t\t\tBase: graphQLTransport,\n\t\t\t\t},\n\t\t\t})},\n\t\tdelegate: &delegate{\n\t\t\ttime: &standardTime{},\n\t\t\tclient: httpClient,\n\t\t\tbases: options.Bases,\n\t\t\tthrottle: ghThrottler{Throttler: &throttle.Throttler{}},\n\t\t\tgetToken: options.GetToken,\n\t\t\tcensor: options.Censor,\n\t\t\tdry: options.DryRun,\n\t\t\tusesAppsAuth: options.AppID != \"\",\n\t\t\tmaxRetries: options.MaxRetries,\n\t\t\tmax404Retries: options.Max404Retries,\n\t\t\tinitialDelay: options.InitialDelay,\n\t\t\tmaxSleepTime: options.MaxSleepTime,\n\t\t},\n\t}\n\tc.gqlc = c.gqlc.forUserAgent(c.userAgent())\n\n\t// Wrap clients with the throttler\n\tc.wrapThrottler()\n\n\tvar tokenGenerator func(_ string) (string, error)\n\tvar userGenerator func() (string, error)\n\tif options.AppID != \"\" {\n\t\tappsTransport, err := newAppsRoundTripper(options.AppID, options.AppPrivateKey, options.BaseRoundTripper, c, options.Bases)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"failed to construct apps auth roundtripper: %w\", err)\n\t\t}\n\t\thttpClient.Transport = appsTransport\n\t\tgraphQLTransport.upstream = appsTransport\n\n\t\t// Use github apps auth for git actions\n\t\t// https://docs.github.com/en/free-pro-team@latest/developers/apps/authenticating-with-github-apps#http-based-git-access-by-an-installation=\n\t\ttokenGenerator = func(org string) (string, error) {\n\t\t\tres, _, err := appsTransport.installationTokenFor(org)\n\t\t\treturn res, err\n\t\t}\n\t\tuserGenerator = func() (string, error) {\n\t\t\treturn \"x-access-token\", nil\n\t\t}\n\t} else {\n\t\t// Use Personal Access token auth for git actions\n\t\ttokenGenerator = func(_ string) (string, error) {\n\t\t\treturn string(options.GetToken()), nil\n\t\t}\n\t\tuserGenerator = func() (string, error) {\n\t\t\tuser, err := c.BotUser()\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\treturn user.Login, nil\n\t\t}\n\t}\n\n\treturn tokenGenerator, userGenerator, c, nil\n}", "func New(owner string, repo string, event string, id int, data *string) (*Labeler, error) {\n\ttoken, found := os.LookupEnv(\"GITHUB_TOKEN\")\n\tif !found {\n\t\treturn nil, errors.New(\"GITHUB_TOKEN environment variable is missing\")\n\t}\n\tctx := context.Background()\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: token},\n\t)\n\ttc := oauth2.NewClient(ctx, ts)\n\tclient := github.NewClient(tc)\n\tl := &Labeler{\n\t\tcontext: &ctx,\n\t\tclient: client,\n\t\tOwner: &owner,\n\t\tRepo: &repo,\n\t\tEvent: &event,\n\t\tID: &id,\n\t\tData: data,\n\t}\n\treturn l, nil\n}", "func NewClient(accessKey, accessSecret string) *Client {\n\tbaseURL, _ := url.Parse(defaultBaseURL)\n\treturn &Client{\n\t\tclient: defaultHttpClient,\n\t\tbaseURL: baseURL,\n\t\taccessKey: accessKey,\n\t\taccessSecret: accessSecret,\n\t}\n}", "func New(credhubURL string, hc HTTPClient) (*Client, error) {\n\tc := &Client{\n\t\turl: credhubURL,\n\t\thc: hc,\n\t}\n\n\tc.Log = log.New(os.Stderr, log.Prefix(), log.Flags())\n\n\terr := c.setVersion()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}", "func NewRepoVersionGetter(ctx context.Context, credentials Credentials) RepoVersionGetter {\n\tif credentials.Username != \"\" && credentials.Password != \"\" {\n\t\tauth := github.BasicAuthTransport{\n\t\t\tUsername: credentials.Username,\n\t\t\tPassword: credentials.Password,\n\t\t}\n\t\treturn &githubClient{\n\t\t\tclient: github.NewClient(auth.Client()),\n\t\t}\n\t}\n\n\tif credentials.Token != \"\" {\n\t\tauth := oauth2.StaticTokenSource(\n\t\t\t&oauth2.Token{AccessToken: credentials.Token},\n\t\t)\n\t\treturn &githubClient{\n\t\t\tgithub.NewClient(oauth2.NewClient(ctx, auth)),\n\t\t}\n\t}\n\n\treturn &githubClient{\n\t\tgithub.NewClient(nil),\n\t}\n}", "func Client(client *http.Client) ClientWrapper {\n\treturn ClientWrapper{Client: github.NewClient(client)}\n}", "func New(token string) Client {\n\treturn Client{\n\t\ttoken: token,\n\t\thttpClient: http.DefaultClient,\n\t\tbaseURL: baseURL,\n\t}\n}", "func newClient(uri string, hc *http.Client, opts jsonclient.Options, log *entitylist.LogInfo) (*LogClient, error) {\n\tlogClient, err := jsonclient.New(uri, hc, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &LogClient{*logClient, *log}, err\n}", "func NewClient(apiKey string, options ...OptionFunc) *Client {\n\tbaseURL, _ := url.Parse(defaultBaseURL)\n\tc := &Client{\n\t\tclient: &http.Client{\n\t\t\tTimeout: time.Second * 10,\n\t\t},\n\t\tbaseURL: baseURL,\n\t\tapiKey: apiKey,\n\t\tuserAgent: \"github.com/barthr/newsapi\",\n\t}\n\n\tfor _, opt := range options {\n\t\topt(c)\n\t}\n\treturn c\n}", "func NewClient(apiToken string) *Client {\n\tbaseURL, _ := url.Parse(defaultBaseURL)\n\tclient := &Client{\n\t\ttoken: apiToken,\n\t\tclient: http.DefaultClient,\n\t\tbaseURL: baseURL,\n\t\tuserAgent: defaultUserAgent,\n\t}\n\tclient.Me = newMeService(client)\n\tclient.Projects = newProjectService(client)\n\tclient.Stories = newStoryService(client)\n\tclient.Memberships = newMembershipService(client)\n\tclient.Iterations = newIterationService(client)\n\tclient.Activity = newActivitiesService(client)\n\tclient.Epic = newEpicService(client)\n\treturn client\n}", "func NewLoggingClient(c Client) Client {\n\treturn &loggingClient{c, \"githubapi\"}\n}", "func NewGithub(w io.Writer) Printer {\n\treturn &github{w: w}\n}", "func provideGitlabClient(config config.Config) *scm.Client {\n\tlogrus.WithField(\"server\", config.GitLab.Server).\n\t\tWithField(\"client\", config.GitLab.ClientID).\n\t\tWithField(\"skip_verify\", config.GitLab.SkipVerify).\n\t\tDebugln(\"main: creating the GitLab client\")\n\n\tclient, err := gitlab.New(config.GitLab.Server)\n\tif err != nil {\n\t\tlogrus.WithError(err).\n\t\t\tFatalln(\"main: cannot create the GitLab client\")\n\t}\n\tif config.GitLab.Debug {\n\t\tclient.DumpResponse = httputil.DumpResponse\n\t}\n\tclient.Client = &http.Client{\n\t\tTransport: &oauth2.Transport{\n\t\t\tSource: oauth2.ContextTokenSource(),\n\t\t\tBase: defaultTransport(config.GitLab.SkipVerify),\n\t\t},\n\t}\n\treturn client\n}", "func NewGithubOauthClient() *github.Client {\n\treturn NewGithubOauthClientWithAccessToken(getSecretAccessToken())\n}", "func NewClient(httpClient *http.Client, baseURL string) (*Client, error) {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\tparsedBaseURL, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &Client{\n\t\tclient: httpClient,\n\t\tbaseURL: parsedBaseURL,\n\t}\n\tc.Authentication = &AuthenticationService{client: c}\n\tc.User = &UserService{client: c}\n\tc.Project = &ProjectService{client: c}\n\tc.Report = &ReportService{client: c}\n\treturn c, nil\n}", "func MockGithubClient(responses []MockResponse) ClientWrapper {\n\treturn Client(NewTestClient(&MockRoundTripper{\n\t\tResponses: responses,\n\t}))\n}", "func InitGithub(config *GithubConfig) {\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: config.Token},\n\t)\n\ttc := oauth2.NewClient(oauth2.NoContext, ts)\n\tservice = &githubService{\n\t\tconfig: config,\n\t\tclient: github.NewClient(tc),\n\t}\n}", "func New(organization string, c *config.Config) *API {\n\tctx := context.Background()\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: c.GithubToken},\n\t)\n\ttc := oauth2.NewClient(ctx, ts)\n\n\tclient := gh.NewClient(tc)\n\n\treturn &API{\n\t\torganization,\n\t\ttc,\n\t\tclient,\n\t\tcache{validDuration: time.Duration(6 * time.Hour)},\n\t}\n}", "func newClient(httpClient *http.Client) (c *Client) {\n\tc = &Client{httpClient: httpClient}\n\tc.service.client = c\n\tc.Auth = (*AuthService)(&c.service)\n\tc.Providers = (*ProvidersService)(&c.service)\n\tc.Projects = (*ProjectsService)(&c.service)\n\tc.Releases = (*ReleasesService)(&c.service)\n\tc.SlackChannels = (*SlackChannelsService)(&c.service)\n\tc.TelegramChats = (*TelegramChatsService)(&c.service)\n\tc.DiscordChannels = (*DiscordChannelsService)(&c.service)\n\tc.HangoutsChatWebhooks = (*HangoutsChatWebhooksService)(&c.service)\n\tc.MicrosoftTeamsWebhooks = (*MicrosoftTeamsWebhooksService)(&c.service)\n\tc.MattermostWebhooks = (*MattermostWebhooksService)(&c.service)\n\tc.RocketchatWebhooks = (*RocketchatWebhooksService)(&c.service)\n\tc.MatrixRooms = (*MatrixRoomsService)(&c.service)\n\tc.Webhooks = (*WebhooksService)(&c.service)\n\tc.Tags = (*TagsService)(&c.service)\n\treturn c\n}", "func NewClient(ctx context.Context, host string) (*Client, error) {\n\tclient, err := newGerritClient(ctx, host)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"error making Gerrit client for host %s\", host).Err()\n\t}\n\n\treturn &Client{\n\t\tgerritClient: client,\n\t\thost: host,\n\t}, nil\n}", "func NewClient(token string) *Client {\n\tc := Client{\n\t\ttoken: token,\n\t\thclient: retryablehttp.NewClient(),\n\t}\n\n\t// set up http client\n\tc.hclient.Logger = nil\n\tc.hclient.ErrorHandler = c.errorHandler\n\tc.hclient.RetryMax = retryLimit\n\tc.hclient.RetryWaitMin = maxRateLimit / 3\n\tc.hclient.RetryWaitMax = maxRateLimit\n\n\t// add services\n\tc.Account = &AccountService{&c}\n\tc.Server = &ServerService{&c}\n\tc.Transaction = &TransactionService{&c}\n\tc.CreateOptions = &CreateOptionsService{&c}\n\tc.SSHKey = &SSHKeyService{&c}\n\n\treturn &c\n}", "func (c *client) newClient() *gitea.Client {\n\treturn c.newClientToken(\"\")\n}" ]
[ "0.7759529", "0.77101016", "0.73958576", "0.72508085", "0.72462213", "0.71401393", "0.711106", "0.7109243", "0.6980343", "0.69798774", "0.6979324", "0.6939871", "0.6929803", "0.6923655", "0.6916688", "0.68918586", "0.6881709", "0.6701082", "0.6700863", "0.6653842", "0.6622923", "0.65382576", "0.6536031", "0.6492383", "0.64466465", "0.6435465", "0.642879", "0.6362788", "0.62803936", "0.6233658", "0.6214802", "0.6160028", "0.6147913", "0.6142091", "0.6139792", "0.61318487", "0.6124546", "0.611547", "0.6064665", "0.6061127", "0.6052448", "0.60466963", "0.6017928", "0.5997281", "0.59916776", "0.59604746", "0.59548193", "0.5920605", "0.59003645", "0.58888155", "0.5883885", "0.5826112", "0.58249485", "0.5788991", "0.57832175", "0.5773043", "0.5761322", "0.5749324", "0.5743505", "0.57387865", "0.5734257", "0.57097733", "0.5706872", "0.56752354", "0.5656234", "0.56260014", "0.56136805", "0.5612924", "0.5607775", "0.56030846", "0.5601124", "0.5596874", "0.55954766", "0.5590758", "0.5585409", "0.5574381", "0.556428", "0.5553113", "0.55470836", "0.55358446", "0.5531057", "0.5530722", "0.5526374", "0.5520397", "0.550652", "0.5502678", "0.5501065", "0.55009353", "0.55004483", "0.54927015", "0.5483076", "0.54752964", "0.54737335", "0.5473693", "0.5467043", "0.54582304", "0.5453478", "0.54511666", "0.5442951", "0.5437038" ]
0.7156345
5
GetRepository fetches a repository
func (c *Client) GetRepository(ctx context.Context) (*github.Repository, error) { repo, res, err := c.Repositories.Get(context.TODO(), c.Owner, c.Repo) if err != nil { if res.StatusCode == http.StatusNotFound { return nil, nil } panic(err) } return repo, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetRepository(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *RepositoryState, opts ...pulumi.ResourceOption) (*Repository, error) {\n\tvar resource Repository\n\terr := ctx.ReadResource(\"aws-native:ecr:Repository\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func GetRepository(web *router.WebRequest) *model.Container {\n\tprojectName := web.GetQueryParam(\"project\")\n\tname := web.GetQueryParam(\"name\")\n\tif projectName == \"\" || name == \"\" {\n\t\treturn model.ErrorResponse(model.MessageItem{\n\t\t\tCode: \"invalid-request\",\n\t\t\tMessage: \"project name and repository name are required\",\n\t\t}, 500)\n\t}\n\titem, err := factory.GetGitClient().GetRepositoryDetail(projectName, name)\n\tif err != nil {\n\t\treturn model.ErrorResponse(model.MessageItem{\n\t\t\tCode: \"list-repo-error\",\n\t\t\tMessage: err.Error(),\n\t\t}, 500)\n\t}\n\treturn model.Response(item)\n}", "func GetRepository(cmd *cobra.Command, args []string) {\n\treq := &helmmanager.ListRepositoryReq{}\n\n\tif !flagAll {\n\t\treq.Size = common.GetUint32P(uint32(flagNum))\n\t}\n\tif len(args) > 0 {\n\t\treq.Name = common.GetStringP(args[0])\n\t\treq.Size = common.GetUint32P(1)\n\t}\n\treq.ProjectID = &flagProject\n\n\tc := newClientWithConfiguration()\n\tr, err := c.Repository().List(cmd.Context(), req)\n\tif err != nil {\n\t\tfmt.Printf(\"get repository failed, %s\\n\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tif flagOutput == outputTypeJson {\n\t\tprinter.PrintRepositoryInJson(r)\n\t\treturn\n\t}\n\n\tprinter.PrintRepositoryInTable(flagOutput == outputTypeWide, r)\n}", "func (c *V3Client) GetRepository(ctx context.Context, owner, name string) (*Repository, error) {\n\tif GetRepositoryMock != nil {\n\t\treturn GetRepositoryMock(ctx, owner, name)\n\t}\n\n\tkey := ownerNameCacheKey(owner, name)\n\treturn c.cachedGetRepository(ctx, key, func(ctx context.Context) (repo *Repository, keys []string, err error) {\n\t\tkeys = append(keys, key)\n\t\trepo, err = c.getRepositoryFromAPI(ctx, owner, name)\n\t\tif repo != nil {\n\t\t\tkeys = append(keys, nodeIDCacheKey(repo.ID)) // also cache under GraphQL node ID\n\t\t}\n\t\treturn repo, keys, err\n\t}, false)\n}", "func (client ArtifactsClient) GetRepository(ctx context.Context, request GetRepositoryRequest) (response GetRepositoryResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.getRepository, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = GetRepositoryResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = GetRepositoryResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(GetRepositoryResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into GetRepositoryResponse\")\n\t}\n\treturn\n}", "func (i *imageBackend) GetRepository(ctx context.Context, ref reference.Named, authConfig *registrytypes.AuthConfig) (dist.Repository, error) {\n\treturn distribution.GetRepository(ctx, ref, &distribution.ImagePullConfig{\n\t\tConfig: distribution.Config{\n\t\t\tAuthConfig: authConfig,\n\t\t\tRegistryService: i.registryService,\n\t\t},\n\t})\n}", "func (s *RepositoryService) Get(rs app.RequestScope, name string) (*models.Repository, error) {\n\treturn s.dao.Get(rs.DB(), name)\n}", "func (client ArtifactsClient) getRepository(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/repositories/{repositoryId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetRepositoryResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\tapiReferenceLink := \"https://docs.oracle.com/iaas/api/#/en/registry/20160918/Repository/GetRepository\"\n\t\terr = common.PostProcessServiceError(err, \"Artifacts\", \"GetRepository\", apiReferenceLink)\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponseWithPolymorphicBody(httpResponse, &response, &repository{})\n\treturn response, err\n}", "func getRepository(config *gctsDeployOptions, httpClient piperhttp.Sender) (*getRepositoryResponseBody, error) {\n\tvar response getRepositoryResponseBody\n\trequestURL := config.Host +\n\t\t\"/sap/bc/cts_abapvcs/repository/\" + config.Repository +\n\t\t\"?sap-client=\" + config.Client\n\n\trequestURL, urlErr := addQueryToURL(requestURL, config.QueryParameters)\n\n\tif urlErr != nil {\n\n\t\treturn nil, urlErr\n\t}\n\n\tresp, httpErr := httpClient.SendRequest(\"GET\", requestURL, nil, nil, nil)\n\tdefer func() {\n\t\tif resp != nil && resp.Body != nil {\n\t\t\tresp.Body.Close()\n\t\t}\n\t}()\n\tif httpErr != nil {\n\t\t_, errorDumpParseErr := parseErrorDumpFromResponseBody(resp)\n\t\tif errorDumpParseErr != nil {\n\t\t\treturn nil, errorDumpParseErr\n\t\t}\n\t\tlog.Entry().Infof(\"Error while repository Check : %v\", httpErr)\n\t\treturn &response, httpErr\n\t} else if resp == nil {\n\t\treturn &response, errors.New(\"did not retrieve a HTTP response\")\n\t}\n\n\tparsingErr := piperhttp.ParseHTTPResponseBodyJSON(resp, &response)\n\tif parsingErr != nil {\n\t\treturn &response, parsingErr\n\t}\n\treturn &response, nil\n}", "func getRepository(repositoryID string) (*cmismodel.Repository, error) {\n\tctxt, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\trepo, err := cmisClient.GetRepository(ctxt, &empty.Empty{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cmisserver.ConvertRepositoryProtoToCmis(repo), nil\n}", "func GetRepository(dbase db.IDB, logger log.ILogger, entity string) (repo IRepository, err error) {\n\tr := &repository{\n\t\tdb: dbase,\n\t\tlogger: logger,\n\t}\n\n\tswitch entity {\n\tcase \"user\":\n\t\trepo, err = NewUserRepository(r)\n\tcase \"product\":\n\t\trepo, err = NewProductRepository(r)\n\tcase \"component\":\n\t\trepo, err = NewComponentRepository(r)\n\tcase \"price\":\n\t\trepo, err = NewPriceRepository(r)\n\tcase \"ruleapplicability\":\n\t\trepo, err = NewRuleApplicabilityRepository(r)\n\tdefault:\n\t\terr = errors.Errorf(\"Repository for entity %q not found\", entity)\n\t}\n\treturn repo, err\n}", "func (c *client) GetRepo(org, name string) (*library.Repo, error) {\n\tlogrus.Tracef(\"getting repo %s/%s from the database\", org, name)\n\n\t// variable to store query results\n\tr := new(database.Repo)\n\n\t// send query to the database and store result in variable\n\tresult := c.Postgres.\n\t\tTable(constants.TableRepo).\n\t\tRaw(dml.SelectRepo, org, name).\n\t\tScan(r)\n\n\t// check if the query returned a record not found error or no rows were returned\n\tif errors.Is(result.Error, gorm.ErrRecordNotFound) || result.RowsAffected == 0 {\n\t\treturn nil, gorm.ErrRecordNotFound\n\t}\n\n\t// decrypt the fields for the repo\n\t//\n\t// https://pkg.go.dev/github.com/go-vela/types/database#Repo.Decrypt\n\terr := r.Decrypt(c.config.EncryptionKey)\n\tif err != nil {\n\t\t// ensures that the change is backwards compatible\n\t\t// by logging the error instead of returning it\n\t\t// which allows us to fetch unencrypted repos\n\t\tlogrus.Errorf(\"unable to decrypt repo %s/%s: %v\", org, name, err)\n\n\t\t// return the unencrypted repo\n\t\treturn r.ToLibrary(), result.Error\n\t}\n\n\t// return the decrypted repo\n\treturn r.ToLibrary(), result.Error\n}", "func GetRepository(name string) *Repository {\n\tfor i := range repos {\n\t\tif repos[i].Name == name {\n\t\t\treturn &repos[i]\n\t\t}\n\t}\n\tlog.Fatalln(\"Can't not find repository.\")\n\treturn nil\n}", "func Get() Repository {\n\tif repoInstance == nil {\n\t\trepoInstance = &repo{}\n\t}\n\n\treturn repoInstance\n}", "func (c AppConfig) GetRepository() (repositories.Repository, error) {\n\tif c.Storage == \"neo4j\" {\n\t\tdb, err := whiterabbit.Open(c.Neo4j)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// TODO: test the connection\n\t\t//defer func() {\n\t\t//\tdb.Close()\n\t\t//}()\n\n\t\treturn neo4j.New(*db), nil\n\t}\n\treturn nil, fmt.Errorf(\"storage %s not implemeted\", c.Storage)\n}", "func GetRepository(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *RepositoryState, opts ...pulumi.ResourceOption) (*Repository, error) {\n\tvar resource Repository\n\terr := ctx.ReadResource(\"google-native:artifactregistry/v1beta2:Repository\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (s *GitlabSCM) GetRepository(cts context.Context, opt *RepositoryOptions) (*Repository, error) {\n\t// TODO no implementation provided yet\n\treturn nil, ErrNotSupported{\n\t\tSCM: \"gitlab\",\n\t\tMethod: \"GetRepository\",\n\t}\n}", "func (c *client) GetRepo(owner, name string) (FullRepo, error) {\n\tdurationLogger := c.log(\"GetRepo\", owner, name)\n\tdefer durationLogger()\n\n\tvar repo FullRepo\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodGet,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s\", owner, name),\n\t\torg: owner,\n\t\texitCodes: []int{200},\n\t}, &repo)\n\treturn repo, err\n}", "func (rs *RepositoriesService) Get(id uint) (*Repository, *http.Response, error) {\n\tu, err := urlWithOptions(fmt.Sprintf(\"/repos/%d\", id), nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := rs.client.NewRequest(\"GET\", u, nil, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar repoResp getRepositoryResponse\n\tresp, err := rs.client.Do(req, &repoResp)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn &repoResp.Repository, resp, err\n}", "func (s *Store) GetRepository(id int64) (*gh.Repository, error) {\n\tvar repo gh.Repository\n\tresult := s.db.First(&repo, id)\n\tif result.Error != nil {\n\t\treturn nil, result.Error\n\t}\n\treturn &repo, nil\n}", "func (c *RepoAPI) Get(name string, opts ...*api.GetRepoOpts) (*api.ResultRepository, error) {\n\n\tif len(opts) == 0 {\n\t\topts = []*api.GetRepoOpts{{}}\n\t}\n\n\tparams := util.Map{\"name\": name, \"height\": opts[0].Height, \"noProposals\": opts[0].NoProposals}\n\tresp, statusCode, err := c.c.call(\"repo_get\", params)\n\tif err != nil {\n\t\treturn nil, makeReqErrFromCallErr(statusCode, err)\n\t}\n\n\tvar r = api.ResultRepository{Repository: state.BareRepository()}\n\tif err := util.DecodeMap(resp, r.Repository); err != nil {\n\t\treturn nil, errors.ReqErr(500, ErrCodeDecodeFailed, \"\", err.Error())\n\t}\n\n\treturn &r, nil\n}", "func FetchRepository(c *gin.Context) {\n\tvar (\n\t\trepo Repository\n\t)\n\n\tid := c.Param(\"id\")\n\tsqlStatement := `SELECT\n id, name, namespace, full_name, user, description, is_automated,\n last_updated, pull_count, star_count, tags_checked, score, official\n FROM image WHERE id=$1 LIMIT 2;`\n\trow := db.GetDB().QueryRow(sqlStatement, id)\n\n\terr := row.Scan(\n\t\t&repo.Id,\n\t\t&repo.Name,\n\t\t&repo.Namespace,\n\t\t&repo.Full_name,\n\t\t&repo.User,\n\t\t&repo.Description,\n\t\t&repo.Is_automated,\n\t\t&repo.Last_updated,\n\t\t&repo.Pull_count,\n\t\t&repo.Star_count,\n\t\t&repo.Tags_checked,\n\t\t&repo.Score,\n\t\t&repo.Official,\n\t)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tc.JSON(http.StatusOK, repo)\n\n}", "func (eb *ExtendedBackend) GetRepository(name string) (backends.Repository, error) {\n\tif repo, ok := eb.extended[name]; ok {\n\t\treturn repo, nil\n\t}\n\treturn nil, backends.ErrBackendError(\"repository not defined\")\n}", "func GetRepo(ctx context.Context, userRepo string) (Repository, error) {\n\t// setup request\n\treq, err := http.NewRequest(\"GET\", GITHUB_API_URL+\"repos/\"+userRepo, nil)\n\treq.Header.Set(\"Accept\", viper.Get(\"http.githubheaderaccept\").(string))\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"token %v\", viper.Get(\"http.githubheaderauthorization\")))\n\tif err != nil {\n\t\treturn Repository{}, err\n\t}\n\trepo := Repository{}\n\terr = service.HttpDo(ctx, req, func(resp *http.Response, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = json.Unmarshal(body, &repo)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\treturn repo, err\n}", "func (gr GitRepo) GetRepository() *model.Repository {\n\treturn &gr.Repository\n}", "func requestedRepository(repoName string) (repository.Repository, error) {\n\t/*\t_, repoName, err := parseGitCommand(sshcmd)\n\t\tif err != nil {\n\t\t\treturn repository.Repository{}, err\n\t\t}*/\n\tvar repo repository.Repository\n\tconn, err := db.Conn()\n\tif err != nil {\n\t\treturn repository.Repository{}, err\n\t}\n\tdefer conn.Close()\n\tif err := conn.Repository().Find(bson.M{\"_id\": repoName}).One(&repo); err != nil {\n\t\treturn repository.Repository{}, errors.New(\"Repository not found\")\n\t}\n\treturn repo, nil\n}", "func GetRepo() *yamlrepo.YAMLRepo {\n\tif repo == nil {\n\t\trepo, _ = yamlrepo.New(conf.DataSource.Conn)\n\t}\n\treturn repo\n}", "func (c *Client) GetRepository() (*github.Repository, error) {\n\tc.Mutex.Lock()\n\tdefer c.Mutex.Unlock()\n\n\trepo, _, err := c.Repositories.Get(context.Background(), c.owner, c.repo)\n\tif err != nil {\n\t\tlogrus.Errorf(\"failed to get repository c.repo %s: %v\", c.repo, err)\n\t\treturn nil, err\n\t}\n\tlogrus.Debugf(\"succeed in getting repository %s\", c.repo)\n\treturn repo, nil\n}", "func (controller *Controller) GetRepository() *repository.Repository {\n\treturn controller.repository\n}", "func (c *CheckSuite) GetRepository() *Repository {\n\tif c == nil {\n\t\treturn nil\n\t}\n\treturn c.Repository\n}", "func GetDpsRepository() (obj DpsRepository, err error) {\n\tif repoObj == nil {\n\t\t//ERROR\n\t}\n\tobj = *repoObj\n\treturn\n}", "func (reg *registry) GetRepository(img Repository) (_ []flux.Image, err error) {\n\trem, err := reg.newRemote(img)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttags, err := rem.Tags(img)\n\tif err != nil {\n\t\trem.Cancel()\n\t\treturn nil, err\n\t}\n\n\t// the hostlessImageName is canonicalised, in the sense that it\n\t// includes \"library\" as the org, if unqualified -- e.g.,\n\t// `library/nats`. We need that to fetch the tags etc. However, we\n\t// want the results to use the *actual* name of the images to be\n\t// as supplied, e.g., `nats`.\n\treturn reg.tagsToRepository(rem, img, tags)\n}", "func (c *CodeResult) GetRepository() *Repository {\n\tif c == nil {\n\t\treturn nil\n\t}\n\treturn c.Repository\n}", "func (c *UserRepositoriesClient) Get(ctx context.Context, ref gitprovider.UserRepositoryRef) (gitprovider.UserRepository, error) {\n\t// Make sure the UserRepositoryRef is valid\n\tif err := validateUserRepositoryRef(ref, c.host); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Make sure the UserRef is valid\n\tif err := validateUserRef(ref.UserRef, c.host); err != nil {\n\t\treturn nil, err\n\t}\n\n\tslug := ref.GetSlug()\n\tif slug == \"\" {\n\t\t// try with name\n\t\tslug = ref.GetRepository()\n\t}\n\n\tapiObj, err := c.client.Repositories.Get(ctx, addTilde(ref.UserLogin), slug)\n\tif err != nil {\n\t\tif errors.Is(err, ErrNotFound) {\n\t\t\treturn nil, gitprovider.ErrNotFound\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to get repository %s/%s: %w\", addTilde(ref.UserLogin), slug, err)\n\t}\n\n\t// Validate the API objects\n\tif err := validateRepositoryAPI(apiObj); err != nil {\n\t\treturn nil, err\n\t}\n\n\tref.SetSlug(apiObj.Slug)\n\n\treturn newUserRepository(c.clientContext, apiObj, ref), nil\n}", "func (client *Client) GetChartRepository(request *GetChartRepositoryRequest) (response *GetChartRepositoryResponse, err error) {\n\tresponse = CreateGetChartRepositoryResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (c *Client) GetRepo(namespace, repoName string) (*api.Repo, error) {\n\tout := &api.Repo{}\n\trawURL := fmt.Sprintf(pathRepo, c.base.String(), namespace, repoName)\n\terr := c.get(rawURL, true, out)\n\treturn out, errio.Error(err)\n}", "func (r *Repository) GitLabGetRepository(ctx context.Context, repositoryID string) (*repoModels.RepositoryDBModel, error) {\n\tf := logrus.Fields{\n\t\t\"functionName\": \"v2.repositories.repositories.GitLabGetRepository\",\n\t\tutils.XREQUESTID: ctx.Value(utils.XREQUESTID),\n\t\t\"repositoryID\": repositoryID,\n\t}\n\n\tresult, err := r.dynamoDBClient.GetItem(&dynamodb.GetItemInput{\n\t\tTableName: aws.String(r.repositoryTableName),\n\t\tKey: map[string]*dynamodb.AttributeValue{\n\t\t\t\"repository_id\": {\n\t\t\t\tS: aws.String(repositoryID),\n\t\t\t},\n\t\t},\n\t})\n\n\tif err != nil {\n\t\tlog.WithFields(f).WithError(err).Warn(\"problem querying using repository ID\")\n\t\treturn nil, err\n\t}\n\tif len(result.Item) == 0 {\n\t\tmsg := fmt.Sprintf(\"repository with ID: %s does not exist\", repositoryID)\n\t\tlog.WithFields(f).Warn(msg)\n\t\treturn nil, &utils.GitHubRepositoryNotFound{\n\t\t\tMessage: msg,\n\t\t}\n\t}\n\n\t// Decode the results into a model\n\tvar out repoModels.RepositoryDBModel\n\terr = dynamodbattribute.UnmarshalMap(result.Item, &out)\n\tif err != nil {\n\t\tlog.WithFields(f).WithError(err).Warn(\"problem unmarshalling database repository response\")\n\t\treturn nil, err\n\t}\n\n\treturn &out, nil\n}", "func (a *RepoAPI) getRepo(params interface{}) (resp *rpc.Response) {\n\tobj := objx.New(cast.ToStringMap(params))\n\tname := obj.Get(\"name\").Str()\n\topts := modulestypes.GetOptions{}\n\topts.Height = cast.ToUint64(obj.Get(\"height\").Inter())\n\topts.Select = cast.ToStringSlice(obj.Get(\"select\").InterSlice())\n\treturn rpc.Success(a.mods.Repo.Get(name, opts))\n}", "func (s *StarredRepository) GetRepository() *Repository {\n\tif s == nil {\n\t\treturn nil\n\t}\n\treturn s.Repository\n}", "func GetRepo(ref, url string) (*Repo, error) {\n\trepo := new(Repo)\n\trepo.Ref = ref\n\n\trem := git.NewRemote(memory.NewStorage(), &config.RemoteConfig{\n\t\tName: \"origin\",\n\t\tURLs: []string{url},\n\t})\n\n\trefs, err := rem.List(&git.ListOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, ref := range refs {\n\t\tif ref.Name().IsTag() {\n\t\t\trepo.Tags = append(repo.Tags, ref.Name().Short())\n\t\t} else if ref.Name().IsBranch() {\n\t\t\trepo.Branches = append(repo.Branches, ref.Name().Short())\n\t\t} else if ref.Name() == \"HEAD\" { // Default branch.\n\t\t\trepo.DefaultBranch = ref.Target().Short()\n\t\t}\n\t}\n\n\treturn repo, nil\n}", "func (s *Submodule) Repository() (*Repository, error) {\n\tstorer, err := s.w.r.Storer.Module(s.c.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = storer.Reference(plumbing.HEAD)\n\tif err != nil && err != plumbing.ErrReferenceNotFound {\n\t\treturn nil, err\n\t}\n\n\tworktree := s.w.fs.Dir(s.c.Path)\n\tif err == nil {\n\t\treturn Open(storer, worktree)\n\t}\n\n\tr, err := Init(storer, worktree)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = r.CreateRemote(&config.RemoteConfig{\n\t\tName: DefaultRemoteName,\n\t\tURL: s.c.URL,\n\t})\n\n\treturn r, err\n}", "func (c *CheckSuitePreferenceResults) GetRepository() *Repository {\n\tif c == nil {\n\t\treturn nil\n\t}\n\treturn c.Repository\n}", "func GetRepository(redisClient *redis.Client) domain.TokenRepository {\n\treturn _tokenRedisRepository.NewTokenRepository(redisClient)\n}", "func DiscoverRepository() (*Repository, error) {\n\tcmd := exec.Command(\"git\", \"rev-parse\", \"--git-dir\")\n\tdata, err := cmd.Output()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpath := strings.Trim(string(data), \"\\n \")\n\treturn &Repository{Path: path}, nil\n}", "func getRepo(r *http.Request) (string, string, error) {\n\trepoPath, _ := mux.Vars(r)[\"repo\"]\n\trepoUrl, err := repoNameToUrl(repoPath)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\treturn repoUrlToName(repoUrl), repoUrl, nil\n}", "func (g *GollumEvent) GetRepo() *Repository {\n\tif g == nil {\n\t\treturn nil\n\t}\n\treturn g.Repo\n}", "func (rs *RepositoriesService) GetFromSlug(slug string) (*Repository, *http.Response, error) {\n\tu, err := urlWithOptions(fmt.Sprintf(\"/repos/%s\", slug), nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := rs.client.NewRequest(\"GET\", u, nil, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar repoResp getRepositoryResponse\n\tresp, err := rs.client.Do(req, &repoResp)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn &repoResp.Repository, resp, err\n}", "func (c *repoCacheManager) fetchRepository() (ImageRepository, error) {\n\tvar result ImageRepository\n\trepoKey := NewRepositoryKey(c.repoID.CanonicalName())\n\tbytes, _, err := c.cacheClient.GetKey(repoKey)\n\tif err != nil {\n\t\treturn ImageRepository{}, err\n\t}\n\tif err = json.Unmarshal(bytes, &result); err != nil {\n\t\treturn ImageRepository{}, err\n\t}\n\treturn result, nil\n}", "func GetConfigRepo(aliasRepoPath string) error {\n\tlog.Printf(\"Cloning products repo to %s\", aliasRepoPath)\n\t_, err := git.PlainClone(aliasRepoPath, false, &git.CloneOptions{\n\t\tURL: helper.AliasRepo,\n\t\tProgress: os.Stdout,\n\t})\n\n\tif errors.Is(err, git.ErrRepositoryAlreadyExists) {\n\t\treturn pullConfigRepo(aliasRepoPath)\n\t}\n\treturn err\n}", "func (c *CheckSuiteEvent) GetRepo() *Repository {\n\tif c == nil {\n\t\treturn nil\n\t}\n\treturn c.Repo\n}", "func GetGitLabRepository(ctx context.Context, args protocol.RepoLookupArgs) (repo *protocol.RepoInfo, authoritative bool, err error) {\n\tif GetGitLabRepositoryMock != nil {\n\t\treturn GetGitLabRepositoryMock(args)\n\t}\n\n\tghrepoToRepoInfo := func(proj *gitlab.Project, conn *gitlabConnection) *protocol.RepoInfo {\n\t\treturn &protocol.RepoInfo{\n\t\t\tName: gitlabProjectToRepoPath(conn, proj),\n\t\t\tExternalRepo: gitlab.ExternalRepoSpec(proj, *conn.baseURL),\n\t\t\tDescription: proj.Description,\n\t\t\tFork: proj.ForkedFromProject != nil,\n\t\t\tArchived: proj.Archived,\n\t\t\tVCS: protocol.VCSInfo{\n\t\t\t\tURL: conn.authenticatedRemoteURL(proj),\n\t\t\t},\n\t\t\tLinks: &protocol.RepoLinks{\n\t\t\t\tRoot: proj.WebURL,\n\t\t\t\tTree: proj.WebURL + \"/tree/{rev}/{path}\",\n\t\t\t\tBlob: proj.WebURL + \"/blob/{rev}/{path}\",\n\t\t\t\tCommit: proj.WebURL + \"/commit/{commit}\",\n\t\t\t},\n\t\t}\n\t}\n\n\tconn, err := getGitLabConnection(args)\n\tif err != nil {\n\t\treturn nil, true, err // refers to a GitLab repo but the host is not configured\n\t}\n\tif conn == nil {\n\t\treturn nil, false, nil // refers to a non-GitLab repo\n\t}\n\n\tif args.ExternalRepo != nil && args.ExternalRepo.ServiceType == gitlab.ServiceType {\n\t\t// Look up by external repository spec.\n\t\tid, err := strconv.Atoi(args.ExternalRepo.ID)\n\t\tif err != nil {\n\t\t\treturn nil, true, err\n\t\t}\n\t\tproj, err := conn.client.GetProject(ctx, gitlab.GetProjectOp{ID: id})\n\t\tif proj != nil {\n\t\t\trepo = ghrepoToRepoInfo(proj, conn)\n\t\t}\n\t\treturn repo, true, err\n\t}\n\n\tif args.Repo != \"\" {\n\t\t// Look up by repository name.\n\t\tpathWithNamespace := strings.TrimPrefix(strings.ToLower(string(args.Repo)), conn.baseURL.Hostname()+\"/\")\n\t\tproj, err := conn.client.GetProject(ctx, gitlab.GetProjectOp{PathWithNamespace: pathWithNamespace})\n\t\tif proj != nil {\n\t\t\trepo = ghrepoToRepoInfo(proj, conn)\n\t\t}\n\t\treturn repo, true, err\n\t}\n\n\treturn nil, true, fmt.Errorf(\"unable to look up GitLab repository (%+v)\", args)\n}", "func (n *Notification) GetRepository() *Repository {\n\tif n == nil {\n\t\treturn nil\n\t}\n\treturn n.Repository\n}", "func (r *Repository) Get(ctx context.Context, id digest.Digest) (io.ReadCloser, error) {\n\t_, path := r.Path(id)\n\trc, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, errors.E(\"get\", r.Root, id, err)\n\t}\n\treturn rc, nil\n}", "func (c *CommitResult) GetRepository() *Repository {\n\tif c == nil {\n\t\treturn nil\n\t}\n\treturn c.Repository\n}", "func HelmReposGet(c *gin.Context) {\n\tlog := logger.WithFields(logrus.Fields{\"tag\": \"HelmReposGet\"})\n\n\tlog.Info(\"Get helm repository\")\n\n\tclusterName, ok := GetCommonClusterNameFromRequest(c)\n\tif ok != true {\n\t\treturn\n\t}\n\n\tresponse, err := helm.ReposGet(clusterName)\n\tif err != nil {\n\t\tlog.Error(\"Error during get helm repo list.\", err.Error())\n\t\tc.JSON(http.StatusInternalServerError, htype.ErrorResponse{\n\t\t\tCode: http.StatusInternalServerError,\n\t\t\tMessage: \"Error listing helm repos\",\n\t\t\tError: err.Error(),\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, response)\n\treturn\n}", "func (e *Event) GetRepo() *Repository {\n\tif e == nil {\n\t\treturn nil\n\t}\n\treturn e.Repo\n}", "func GetUserRepository() user.UserRepository {\n\treturn &userRepository{UserStorage}\n}", "func getRepo(repos []config.Repository, repoName string) (config.Repository, bool) {\n\tfor _, repo := range repos {\n\t\tif repo.Name == repoName {\n\t\t\treturn repo, true\n\t\t}\n\t}\n\treturn config.Repository{}, false\n}", "func (w *WatchEvent) GetRepo() *Repository {\n\tif w == nil {\n\t\treturn nil\n\t}\n\treturn w.Repo\n}", "func GetLoadedNotaryRepository(trust.ImageRefAndAuth, []string) (client.Repository, error) {\n\treturn LoadedNotaryRepository{}, nil\n}", "func (repo *repoOptions) GetRepo() (*helmrepo.IndexFile, []byte, error) {\n\tu, err := parseURL(repo.url)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := repo.index(u)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\tindex, err := repo.toIndexFile(resp)\n\treturn index, resp, err\n}", "func (g *getter) getRemoteRepository(remote RemoteRepository, branch string) error {\n\tremoteURL := remote.URL()\n\tlocal, err := LocalRepositoryFromURL(remoteURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar (\n\t\tfpath = local.FullPath\n\t\tnewPath = false\n\t)\n\n\t_, err = os.Stat(fpath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tnewPath = true\n\t\t\terr = nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tswitch {\n\tcase newPath:\n\t\tif remoteURL.Scheme == \"codecommit\" {\n\t\t\tlogger.Log(\"clone\", fmt.Sprintf(\"%s -> %s\", remoteURL.Opaque, fpath))\n\t\t} else {\n\t\t\tlogger.Log(\"clone\", fmt.Sprintf(\"%s -> %s\", remoteURL, fpath))\n\t\t}\n\t\tvar (\n\t\t\tlocalRepoRoot = fpath\n\t\t\trepoURL = remoteURL\n\t\t)\n\t\tvcs, ok := vcsRegistry[g.vcs]\n\t\tif !ok {\n\t\t\tvcs, repoURL, err = remote.VCS()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif l := detectLocalRepoRoot(remoteURL.Path, repoURL.Path); l != \"\" {\n\t\t\tlocalRepoRoot = filepath.Join(local.RootPath, remoteURL.Hostname(), l)\n\t\t}\n\n\t\tif remoteURL.Scheme == \"codecommit\" {\n\t\t\trepoURL, _ = url.Parse(remoteURL.Opaque)\n\t\t}\n\t\tif getRepoLock(localRepoRoot) {\n\t\t\treturn vcs.Clone(&vcsGetOption{\n\t\t\t\turl: repoURL,\n\t\t\t\tdir: localRepoRoot,\n\t\t\t\tshallow: g.shallow,\n\t\t\t\tsilent: g.silent,\n\t\t\t\tbranch: branch,\n\t\t\t\trecursive: g.recursive,\n\t\t\t\tbare: g.bare,\n\t\t\t})\n\t\t}\n\t\treturn nil\n\tcase g.update:\n\t\tlogger.Log(\"update\", fpath)\n\t\tvcs, localRepoRoot := local.VCS()\n\t\tif vcs == nil {\n\t\t\treturn fmt.Errorf(\"failed to detect VCS for %q\", fpath)\n\t\t}\n\t\tif getRepoLock(localRepoRoot) {\n\t\t\treturn vcs.Update(&vcsGetOption{\n\t\t\t\tdir: localRepoRoot,\n\t\t\t\tsilent: g.silent,\n\t\t\t\trecursive: g.recursive,\n\t\t\t})\n\t\t}\n\t\treturn nil\n\t}\n\tlogger.Log(\"exists\", fpath)\n\treturn nil\n}", "func (r *RepositoryEvent) GetRepo() *Repository {\n\tif r == nil {\n\t\treturn nil\n\t}\n\treturn r.Repo\n}", "func (s *AutograderService) GetRepositories(ctx context.Context, in *pb.URLRequest) (*pb.Repositories, error) {\n\tusr, err := s.getCurrentUser(ctx)\n\tif err != nil {\n\t\ts.logger.Errorf(\"GetRepositories failed: authentication error: %w\", err)\n\t\treturn nil, ErrInvalidUserInfo\n\t}\n\tvar urls = make(map[string]string)\n\tfor _, repoType := range in.GetRepoTypes() {\n\t\trepo, _ := s.getRepositoryURL(usr, in.GetCourseID(), repoType)\n\t\t// we do not care if some repo was not found, this will append an empty url string in that case\n\t\t// frontend will take care of the rest\n\t\turls[repoType.String()] = repo\n\t}\n\treturn &pb.Repositories{URLs: urls}, nil\n}", "func (g *GitLab) Repo(ctx context.Context, user *model.User, remoteID model.ForgeRemoteID, owner, name string) (*model.Repo, error) {\n\tclient, err := newClient(g.url, user.Token, g.SkipVerify)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif remoteID.IsValid() {\n\t\tintID, err := strconv.ParseInt(string(remoteID), 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t_repo, _, err := client.Projects.GetProject(int(intID), nil, gitlab.WithContext(ctx))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn g.convertGitLabRepo(_repo)\n\t}\n\n\t_repo, err := g.getProject(ctx, client, owner, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn g.convertGitLabRepo(_repo)\n}", "func getRemoteRepository(remote RemoteRepository, doUpdate bool, isShallow bool) {\n\tremoteURL := remote.URL()\n\tlocal := LocalRepositoryFromURL(remoteURL)\n\n\tpath := local.FullPath\n\tnewPath := false\n\n\t_, err := os.Stat(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tnewPath = true\n\t\t\terr = nil\n\t\t}\n\t\tutils.PanicIf(err)\n\t}\n\n\tif newPath {\n\t\tutils.Log(\"clone\", fmt.Sprintf(\"%s -> %s\", remoteURL, path))\n\n\t\tvcs := remote.VCS()\n\t\tif vcs == nil {\n\t\t\tutils.Log(\"error\", fmt.Sprintf(\"Could not find version control system: %s\", remoteURL))\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tvcs.Clone(remoteURL, path, isShallow)\n\t} else {\n\t\tif doUpdate {\n\t\t\tutils.Log(\"update\", path)\n\t\t\tlocal.VCS().Update(path)\n\t\t} else {\n\t\t\tutils.Log(\"exists\", path)\n\t\t}\n\t}\n}", "func Repository() *repository {\n\tif r == nil {\n\t\tr = &repository{\n\t\t\titems: make(map[string]string),\n\t\t}\n\t}\n\treturn r\n}", "func (p *PublicEvent) GetRepo() *Repository {\n\tif p == nil {\n\t\treturn nil\n\t}\n\treturn p.Repo\n}", "func (client ArtifactsClient) getContainerRepository(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/container/repositories/{repositoryId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetContainerRepositoryResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\tapiReferenceLink := \"https://docs.oracle.com/iaas/api/#/en/registry/20160918/ContainerRepository/GetContainerRepository\"\n\t\terr = common.PostProcessServiceError(err, \"Artifacts\", \"GetContainerRepository\", apiReferenceLink)\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func FetchRepo(username string, after *string) (*UserRepositoryResponse, error) {\n\tvariables, _ := json.Marshal(map[string]interface{}{\n\t\t\"username\": username,\n\t\t\"after\": after,\n\t})\n\tdata, err := FetchGhGql(UserQuery, string(variables))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb, _ := json.Marshal(data)\n\tvar resp UserRepositoryResponse\n\terr = json.Unmarshal(b, &resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}", "func (i *Issue) GetRepository() *Repository {\n\tif i == nil {\n\t\treturn nil\n\t}\n\treturn i.Repository\n}", "func (r *repo) Get(args *RepoArgs) ([]RepoData, error) {\n\tif err := args.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\trepos := make([]RepoData, 0)\n\treqArgs := r.formRequestArgs(args.User, args.Org)\n\n\tif err := r.g.doFullPagination(reqArgs, extractRepos(&repos)); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn repos, nil\n}", "func (w *WebHookPayload) GetRepo() *Repository {\n\tif w == nil {\n\t\treturn nil\n\t}\n\treturn w.Repo\n}", "func (r *Resolver) Repository() generated.RepositoryResolver { return &repositoryResolver{r} }", "func (c *CreateEvent) GetRepo() *Repository {\n\tif c == nil {\n\t\treturn nil\n\t}\n\treturn c.Repo\n}", "func (o *DeployKey) GetRepository() Repository {\n\tif o == nil || o.Repository == nil {\n\t\tvar ret Repository\n\t\treturn ret\n\t}\n\treturn *o.Repository\n}", "func (s *Store) GetRepositories(username string) ([]*gh.Repository, error) {\n\tvar repo []*gh.Repository\n\tresult := s.db.Where(\"owner = ?\", username).Find(&repo)\n\tif result.Error != nil {\n\t\treturn nil, result.Error\n\t}\n\treturn repo, nil\n}", "func (s GithubService) GetGithub(githubURL string) (*models.Github, error) {\n\tswitch {\n\tcase strings.HasPrefix(githubURL, prefixSecureGithubURL) || strings.HasPrefix(githubURL, prefixGithubURL):\n\t\t// Correct url\n\tdefault:\n\t\treturn nil, errors.New(\"This is not github url.\")\n\t}\n\n\t// Get author & repo name\n\tauthor, name := func() (string, string) {\n\t\tpaths := strings.Split(githubURL, \"/\")\n\t\tif (len(paths) - 2) < 0 {\n\t\t\treturn \"\", \"\"\n\t\t}\n\t\treturn paths[len(paths)-2], paths[len(paths)-1]\n\t}()\n\n\t// Hack doc\n\tdoc, err := goquery.NewDocument(githubURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get repo description\n\tvar description string\n\tdoc.Find(\".repository-meta-content\").Each(func(i int, s *goquery.Selection) {\n\t\tdescription = strings.TrimSpace(s.Text())\n\t})\n\n\t// Get repo star\n\tvar star int\n\tdoc.Find(\".social-count.js-social-count\").Each(func(i int, s *goquery.Selection) {\n\t\tstar, err = strconv.Atoi(strings.TrimSpace(s.Text()))\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get repo languages (for item tag)\n\tvar langs []string\n\tdoc.Find(\".repository-lang-stats-graph.js-toggle-lang-stats\").Each(func(i int, s *goquery.Selection) {\n\t\tgithubLangs := strings.Split(s.Text(), \"\\n\")\n\t\tfor _, githubLang := range githubLangs {\n\t\t\ttrimedLang := strings.TrimSpace(githubLang)\n\t\t\tif len(trimedLang) <= 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlangs = append(langs, trimedLang)\n\t\t}\n\t})\n\n\treturn models.NewGithub(\n\t\tauthor,\n\t\tname,\n\t\tdescription,\n\t\tlangs,\n\t\tstar,\n\t), nil\n}", "func GetRepoAndClient(cmd *cobra.Command, cfg *config.AppConfig, repoDir string) (rr.LocalRepo, types2.Client) {\n\n\tvar err error\n\tvar targetRepo rr.LocalRepo\n\n\tif repoDir == \"\" {\n\t\ttargetRepo, err = repo.GetAtWorkingDir(cfg.Node.GitBinPath)\n\t} else {\n\t\ttargetRepo, err = repo.GetWithGitModule(cfg.Node.GitBinPath, repoDir)\n\t}\n\n\trpcClient, err := GetRPCClient(cmd, targetRepo)\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\n\treturn targetRepo, rpcClient\n}", "func (repo *TestRepo) Repository(t *testing.T) *git.Repository {\n\tt.Helper()\n\n\tr, err := git.NewRepository(repo.Path)\n\trequire.NoError(t, err)\n\treturn r\n}", "func GetSampleRepository(db *gorm.DB) SampleRepository {\n\treturn &sampleRepository{\n\t\tDB: db,\n\t}\n}", "func get(client *http.Client, page int, creds *credentials) ([]repo, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tq := req.URL.Query()\n\tq.Add(\"page\", strconv.Itoa(page))\n\tq.Add(\"per_page\", strconv.Itoa(perPage))\n\treq.URL.RawQuery = q.Encode()\n\tif creds != nil {\n\t\treq.SetBasicAuth(creds.username, creds.password)\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"cannot get %q: %s\", req.URL, resp.Status)\n\t}\n\tvar repos []repo\n\td := json.NewDecoder(resp.Body)\n\tif err = d.Decode(&repos); err != nil {\n\t\treturn nil, err\n\t}\n\treturn repos, nil\n}", "func (r *Registry) Repository(ctx context.Context, name string) (registry.Repository, error) {\n\tref := registry.Reference{\n\t\tRegistry: r.Reference.Registry,\n\t\tRepository: name,\n\t}\n\treturn newRepositoryWithOptions(ref, &r.RepositoryOptions)\n}", "func (t TestRepo) Repo() *git.Repository {\n\treturn t.repo\n}", "func OpenRepository(opts GlobalOptions) (*repository.Repository, error) {\n\tif opts.Repo == \"\" {\n\t\treturn nil, errors.Fatal(\"Please specify repository location (-r)\")\n\t}\n\n\tbe, err := open(opts.Repo, opts, opts.extended)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbe = backend.NewRetryBackend(be, 10, func(msg string, err error, d time.Duration) {\n\t\tWarnf(\"%v returned error, retrying after %v: %v\\n\", msg, d, err)\n\t})\n\n\ts := repository.New(be)\n\n\tpasswordTriesLeft := 1\n\tif stdinIsTerminal() && opts.password == \"\" {\n\t\tpasswordTriesLeft = 3\n\t}\n\n\tfor ; passwordTriesLeft > 0; passwordTriesLeft-- {\n\t\topts.password, err = ReadPassword(opts, \"enter password for repository: \")\n\t\tif err != nil && passwordTriesLeft > 1 {\n\t\t\topts.password = \"\"\n\t\t\tfmt.Printf(\"%s. Try again\\n\", err)\n\t\t}\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\terr = s.SearchKey(opts.ctx, opts.password, maxKeys, opts.KeyHint)\n\t\tif err != nil && passwordTriesLeft > 1 {\n\t\t\topts.password = \"\"\n\t\t\tfmt.Printf(\"%s. Try again\\n\", err)\n\t\t}\n\t}\n\tif err != nil {\n\t\tif errors.IsFatal(err) {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errors.Fatalf(\"%s\", err)\n\t}\n\n\tif stdoutIsTerminal() && !opts.JSON {\n\t\tid := s.Config().ID\n\t\tif len(id) > 8 {\n\t\t\tid = id[:8]\n\t\t}\n\t\tif !opts.JSON {\n\t\t\tVerbosef(\"repository %v opened successfully, password is correct\\n\", id)\n\t\t}\n\t}\n\n\tif opts.NoCache {\n\t\treturn s, nil\n\t}\n\n\tc, err := cache.New(s.Config().ID, opts.CacheDir)\n\tif err != nil {\n\t\tWarnf(\"unable to open cache: %v\\n\", err)\n\t\treturn s, nil\n\t}\n\n\tif c.Created && !opts.JSON {\n\t\tVerbosef(\"created new cache in %v\\n\", c.Base)\n\t}\n\n\t// start using the cache\n\ts.UseCache(c)\n\n\toldCacheDirs, err := cache.Old(c.Base)\n\tif err != nil {\n\t\tWarnf(\"unable to find old cache directories: %v\", err)\n\t}\n\n\t// nothing more to do if no old cache dirs could be found\n\tif len(oldCacheDirs) == 0 {\n\t\treturn s, nil\n\t}\n\n\t// cleanup old cache dirs if instructed to do so\n\tif opts.CleanupCache {\n\t\tPrintf(\"removing %d old cache dirs from %v\\n\", len(oldCacheDirs), c.Base)\n\n\t\tfor _, item := range oldCacheDirs {\n\t\t\tdir := filepath.Join(c.Base, item.Name())\n\t\t\terr = fs.RemoveAll(dir)\n\t\t\tif err != nil {\n\t\t\t\tWarnf(\"unable to remove %v: %v\\n\", dir, err)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif stdoutIsTerminal() {\n\t\t\tVerbosef(\"found %d old cache directories in %v, run `restic cache --cleanup` to remove them\\n\",\n\t\t\t\tlen(oldCacheDirs), c.Base)\n\t\t}\n\t}\n\n\treturn s, nil\n}", "func (c *CheckRunEvent) GetRepo() *Repository {\n\tif c == nil {\n\t\treturn nil\n\t}\n\treturn c.Repo\n}", "func (g *Github) GetRepositories(ctx context.Context) ([]scm.Repository, error) {\n\tallRepos, err := g.getRepositories(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trepos := make([]scm.Repository, 0, len(allRepos))\n\tfor _, r := range allRepos {\n\t\tlog := log.WithField(\"repo\", r.GetFullName())\n\t\tpermissions := r.GetPermissions()\n\n\t\t// Check if it's even meaningful to run on this repository or if it will just error\n\t\t// when trying to do other actions\n\t\tswitch {\n\t\tcase r.GetArchived():\n\t\t\tlog.Debug(\"Skipping repository since it's archived\")\n\t\t\tcontinue\n\t\tcase r.GetDisabled():\n\t\t\tlog.Debug(\"Skipping repository since it's disabled\")\n\t\t\tcontinue\n\t\tcase len(g.Topics) != 0 && !scm.RepoContainsTopic(r.Topics, g.Topics):\n\t\t\tlog.Debug(\"Skipping repository since it does not match repository topics\")\n\t\t\tcontinue\n\t\tcase g.SkipForks && r.GetFork():\n\t\t\tlog.Debug(\"Skipping repository since it's a fork\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif g.checkPermissions {\n\t\t\tswitch {\n\t\t\tcase !permissions[\"pull\"]:\n\t\t\t\tlog.Debug(\"Skipping repository since the token does not have pull permissions\")\n\t\t\t\tcontinue\n\t\t\tcase !g.Fork && !g.ReadOnly && !permissions[\"push\"]:\n\t\t\t\tlog.Debug(\"Skipping repository since the token does not have push permissions and the run will not fork\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tnewRepo, err := g.convertRepo(r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\trepos = append(repos, newRepo)\n\t}\n\n\treturn repos, nil\n}", "func TestRepositoryFind(t *testing.T) {\n\tdefer gock.Off()\n\n\tgock.New(\"https://gitlab.com\").\n\t\tGet(\"/api/v4/projects/diaspora/diaspora\").\n\t\tReply(200).\n\t\tType(\"application/json\").\n\t\tSetHeaders(mockHeaders).\n\t\tFile(\"testdata/repo.json\")\n\n\tclient := NewDefault()\n\tgot, res, err := client.Repositories.Find(context.Background(), \"diaspora/diaspora\")\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\twant := new(scm.Repository)\n\traw, _ := ioutil.ReadFile(\"testdata/repo.json.golden\")\n\tjson.Unmarshal(raw, want)\n\n\tif diff := cmp.Diff(got, want); diff != \"\" {\n\t\tt.Errorf(\"Unexpected Results\")\n\t\tt.Log(diff)\n\t}\n\n\tt.Run(\"Request\", testRequest(res))\n\tt.Run(\"Rate\", testRate(res))\n}", "func (r *Repository) R() *git.Repository {\n\treturn r.repo\n}", "func (imageName *ImageName) GetRepo(option FormatOption) string {\n\tresult := imageName.Repo\n\n\tif imageName.Namespace != \"\" {\n\t\tresult = fmt.Sprintf(\"%s/%s\", imageName.Namespace, result)\n\t}\n\n\tif option.Has(ExplicitNamespace) {\n\t\tresult = fmt.Sprintf(\"%s/%s\", \"library\", result)\n\t}\n\n\treturn result\n}", "func Get(params GetParams) (*models.RepositoryConfig, error) {\n\tif err := params.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\trepo, err := params.V1API.PlatformConfigurationSnapshots.GetSnapshotRepository(\n\t\tplatform_configuration_snapshots.NewGetSnapshotRepositoryParams().\n\t\t\tWithRepositoryName(params.Name),\n\t\tparams.AuthWriter,\n\t)\n\tif err != nil {\n\t\treturn nil, api.UnwrapError(err)\n\t}\n\n\treturn repo.Payload, nil\n}", "func (l *LabelEvent) GetRepo() *Repository {\n\tif l == nil {\n\t\treturn nil\n\t}\n\treturn l.Repo\n}", "func gitFetch(repo string, filePath string) (*github.RepositoryContent, error) {\n\tusername, token := os.Getenv(\"GITHUB_USER\"), os.Getenv(\"GITHUB_TOKEN\")\n\ttp := github.BasicAuthTransport{\n\t\tUsername: username,\n\t\tPassword: token,\n\t}\n\tgitClient := github.NewClient(tp.Client())\n\tctx := context.Background()\n\tuser, _, err := gitClient.Users.Get(ctx, \"\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlog.Info(\"\\n%v\\n\", github.Stringify(user.Login))\n\tlog.Info(\"\\n%v\\n\", github.Stringify(user.OrganizationsURL))\n\ts := github.Stringify(user.Login)\n\tgitUser := s[1 : len(s)-1]\n\trcg := &github.RepositoryContentGetOptions{Ref: \"master\"}\n\tfc, _, _, err := gitClient.Repositories.GetContents(ctx, gitUser, repo, filePath, rcg)\n\treturn fc, err\n}", "func (s *StatusEvent) GetRepo() *Repository {\n\tif s == nil {\n\t\treturn nil\n\t}\n\treturn s.Repo\n}", "func getRun(request GetRequest) (err error) {\n\treturn repository.InstallRepository(request.RepoUrl)\n}", "func (d *DeploymentEvent) GetRepo() *Repository {\n\tif d == nil {\n\t\treturn nil\n\t}\n\treturn d.Repo\n}", "func (p *ProjectCardEvent) GetRepo() *Repository {\n\tif p == nil {\n\t\treturn nil\n\t}\n\treturn p.Repo\n}", "func GetOperationImplementationRepository(ctx context.Context, deploymentID, nodeTemplateImpl, nodeTypeImpl, operationName string) (string, error) {\n\treturn getOperationImplementation(ctx, deploymentID, nodeTemplateImpl, nodeTypeImpl, operationName, \"repository\")\n}", "func (c *config) Repo(u *model.User, owner, name string) (*model.Repo, error) {\n\trepo, err := c.newClient(u).FindRepo(owner, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn convertRepo(repo), nil\n}", "func (f *ForkEvent) GetRepo() *Repository {\n\tif f == nil {\n\t\treturn nil\n\t}\n\treturn f.Repo\n}" ]
[ "0.76943356", "0.74888605", "0.7419276", "0.73818403", "0.73109317", "0.73045814", "0.72826105", "0.72484", "0.7221816", "0.72175574", "0.72043943", "0.71991307", "0.71787184", "0.7144311", "0.7124235", "0.7099846", "0.70911855", "0.6998159", "0.695291", "0.69476277", "0.69378054", "0.6907062", "0.686312", "0.6861174", "0.68541753", "0.684847", "0.68457127", "0.6711561", "0.6704657", "0.66837126", "0.66342324", "0.66314125", "0.66125786", "0.65895605", "0.65520155", "0.65514904", "0.6546696", "0.6532696", "0.65303195", "0.6524628", "0.6476556", "0.6468", "0.6465292", "0.6395865", "0.6362306", "0.63127524", "0.62964565", "0.6284828", "0.624555", "0.62347734", "0.6223179", "0.6217334", "0.61977875", "0.6193148", "0.616598", "0.6142319", "0.6129391", "0.61273134", "0.6124068", "0.61184675", "0.61124384", "0.61050874", "0.6103282", "0.60765797", "0.6059381", "0.6047845", "0.6016128", "0.60118073", "0.601026", "0.6003655", "0.5994688", "0.59912944", "0.59865814", "0.59818757", "0.59714925", "0.5970902", "0.5929178", "0.5921805", "0.591647", "0.59138167", "0.5900997", "0.5897646", "0.5897191", "0.5895815", "0.58923596", "0.5885605", "0.5884863", "0.5879913", "0.58768713", "0.5865165", "0.5857528", "0.5853967", "0.5848452", "0.58452874", "0.5838124", "0.5828355", "0.58249843", "0.58066016", "0.58055556", "0.5802896" ]
0.669049
29
CreateRelease creates a new release object in the GitHub API
func (c *Client) CreateRelease(ctx context.Context, req *github.RepositoryRelease) (*github.RepositoryRelease, error) { release, res, err := c.Repositories.CreateRelease(context.TODO(), c.Owner, c.Repo, req) if err != nil { return nil, errors.Wrap(err, "failed to create a release") } if res.StatusCode != http.StatusCreated { return nil, errors.Errorf("create release: invalid status: %s", res.Status) } return release, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func CreateRelease(res http.ResponseWriter, req *http.Request) {\n\tres.Header().Set(\"Content-Type\", \"application/json\")\n\tc := Release{\"relid\", \"http://ispw:8080/ispw/ispw/releases/relid\"}\n\toutgoingJSON, err := json.Marshal(c)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(res, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tres.WriteHeader(http.StatusCreated)\n\tfmt.Fprint(res, string(outgoingJSON))\n}", "func (c *gitlabClient) CreateRelease(ctx *context.Context, body string) (releaseID string, err error) {\n\ttitle, err := tmpl.New(ctx).Apply(ctx.Config.Release.NameTemplate)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tprojectID := ctx.Config.Release.GitLab.Owner + \"/\" + ctx.Config.Release.GitLab.Name\n\tlog.WithFields(log.Fields{\n\t\t\"owner\": ctx.Config.Release.GitLab.Owner,\n\t\t\"name\": ctx.Config.Release.GitLab.Name,\n\t}).Debug(\"projectID\")\n\n\tname := title\n\ttagName := ctx.Git.CurrentTag\n\trelease, resp, err := c.client.Releases.GetRelease(projectID, tagName)\n\tif err != nil && resp.StatusCode != 403 {\n\t\treturn \"\", err\n\t}\n\n\tif resp.StatusCode == 403 {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"err\": err.Error(),\n\t\t}).Debug(\"get release\")\n\n\t\tdescription := body\n\t\tref := ctx.Git.Commit\n\t\tgitURL := ctx.Git.URL\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"name\": name,\n\t\t\t\"description\": description,\n\t\t\t\"ref\": ref,\n\t\t\t\"url\": gitURL,\n\t\t}).Debug(\"creating release\")\n\t\trelease, _, err = c.client.Releases.CreateRelease(projectID, &gitlab.CreateReleaseOptions{\n\t\t\tName: &name,\n\t\t\tDescription: &description,\n\t\t\tRef: &ref,\n\t\t\tTagName: &tagName,\n\t\t})\n\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"err\": err.Error(),\n\t\t\t}).Debug(\"error create release\")\n\t\t\treturn \"\", err\n\t\t}\n\t\tlog.WithField(\"name\", release.Name).Info(\"release created\")\n\t} else {\n\t\tdesc := body\n\t\tif release != nil && release.DescriptionHTML != \"\" {\n\t\t\tdesc = release.DescriptionHTML\n\t\t}\n\n\t\trelease, _, err = c.client.Releases.UpdateRelease(projectID, tagName, &gitlab.UpdateReleaseOptions{\n\t\t\tName: &name,\n\t\t\tDescription: &desc,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"err\": err.Error(),\n\t\t\t}).Debug(\"error update release\")\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tlog.WithField(\"name\", release.Name).Info(\"release updated\")\n\t}\n\n\treturn tagName, err // gitlab references a tag in a repo by its name\n}", "func CreateRelease(ctx *context.APIContext) {\n\t// swagger:operation POST /repos/{owner}/{repo}/releases repository repoCreateRelease\n\t// ---\n\t// summary: Create a release\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/CreateReleaseOption\"\n\t// responses:\n\t// \"201\":\n\t// \"$ref\": \"#/responses/Release\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"409\":\n\t// \"$ref\": \"#/responses/error\"\n\tform := web.GetForm(ctx).(*api.CreateReleaseOption)\n\trel, err := repo_model.GetRelease(ctx.Repo.Repository.ID, form.TagName)\n\tif err != nil {\n\t\tif !repo_model.IsErrReleaseNotExist(err) {\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetRelease\", err)\n\t\t\treturn\n\t\t}\n\t\t// If target is not provided use default branch\n\t\tif len(form.Target) == 0 {\n\t\t\tform.Target = ctx.Repo.Repository.DefaultBranch\n\t\t}\n\t\trel = &repo_model.Release{\n\t\t\tRepoID: ctx.Repo.Repository.ID,\n\t\t\tPublisherID: ctx.Doer.ID,\n\t\t\tPublisher: ctx.Doer,\n\t\t\tTagName: form.TagName,\n\t\t\tTarget: form.Target,\n\t\t\tTitle: form.Title,\n\t\t\tNote: form.Note,\n\t\t\tIsDraft: form.IsDraft,\n\t\t\tIsPrerelease: form.IsPrerelease,\n\t\t\tIsTag: false,\n\t\t\tRepo: ctx.Repo.Repository,\n\t\t}\n\t\tif err := release_service.CreateRelease(ctx.Repo.GitRepo, rel, nil, \"\"); err != nil {\n\t\t\tif repo_model.IsErrReleaseAlreadyExist(err) {\n\t\t\t\tctx.Error(http.StatusConflict, \"ReleaseAlreadyExist\", err)\n\t\t\t} else {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"CreateRelease\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !rel.IsTag {\n\t\t\tctx.Error(http.StatusConflict, \"GetRelease\", \"Release is has no Tag\")\n\t\t\treturn\n\t\t}\n\n\t\trel.Title = form.Title\n\t\trel.Note = form.Note\n\t\trel.IsDraft = form.IsDraft\n\t\trel.IsPrerelease = form.IsPrerelease\n\t\trel.PublisherID = ctx.Doer.ID\n\t\trel.IsTag = false\n\t\trel.Repo = ctx.Repo.Repository\n\t\trel.Publisher = ctx.Doer\n\t\trel.Target = form.Target\n\n\t\tif err = release_service.UpdateRelease(ctx.Doer, ctx.Repo.GitRepo, rel, nil, nil, nil); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"UpdateRelease\", err)\n\t\t\treturn\n\t\t}\n\t}\n\tctx.JSON(http.StatusCreated, convert.ToAPIRelease(ctx, ctx.Repo.Repository, rel))\n}", "func (g *GHR) CreateRelease(ctx context.Context, req *github.RepositoryRelease, recreate bool) (*github.RepositoryRelease, error) {\n\n\t// When draft release creation is requested,\n\t// create it without any check (it can).\n\tif *req.Draft {\n\t\tfmt.Fprintln(g.outStream, \"==> Create a draft release\")\n\t\treturn g.GitHub.CreateRelease(ctx, req)\n\t}\n\n\t// Always create release as draft first. After uploading assets, turn off\n\t// draft unless the `-draft` flag is explicitly specified.\n\t// It is to prevent users from seeing empty release.\n\treq.Draft = github.Bool(true)\n\n\t// Check release exists.\n\t// If release is not found, then create a new release.\n\trelease, err := g.GitHub.GetRelease(ctx, *req.TagName)\n\tif err != nil {\n\t\tif !errors.Is(err, ErrReleaseNotFound) {\n\t\t\treturn nil, fmt.Errorf(\"failed to get release: %w\", err)\n\t\t}\n\t\tDebugf(\"Release (with tag %s) not found: create a new one\",\n\t\t\t*req.TagName)\n\n\t\tif recreate {\n\t\t\tfmt.Fprintf(g.outStream,\n\t\t\t\t\"WARNING: '-recreate' is specified but release (%s) not found\",\n\t\t\t\t*req.TagName)\n\t\t}\n\n\t\tfmt.Fprintln(g.outStream, \"==> Create a new release\")\n\t\treturn g.GitHub.CreateRelease(ctx, req)\n\t}\n\n\t// recreate is not true. Then use that existing release.\n\tif !recreate {\n\t\tDebugf(\"Release (with tag %s) exists: use existing one\",\n\t\t\t*req.TagName)\n\n\t\tfmt.Fprintf(g.outStream, \"WARNING: found release (%s). Use existing one.\\n\",\n\t\t\t*req.TagName)\n\t\treturn release, nil\n\t}\n\n\t// When recreate is requested, delete existing release and create a\n\t// new release.\n\tfmt.Fprintln(g.outStream, \"==> Recreate a release\")\n\tif err := g.DeleteRelease(ctx, *release.ID, *req.TagName); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn g.GitHub.CreateRelease(ctx, req)\n}", "func (r *RLS) CreateRelease(ctx context.Context, req *github.RepositoryRelease, recreate bool) (*github.RepositoryRelease, error) {\n\n\t// When draft release creation is requested,\n\t// create it without any check (it can).\n\tif *req.Draft {\n\t\tfmt.Fprintln(r.outStream, \"==> Create a draft release\")\n\t\treturn r.GitHub.CreateRelease(ctx, req)\n\t}\n\n\t// Check release exists.\n\t// If release is not found, then create a new release.\n\trelease, err := r.GitHub.GetRelease(ctx, *req.TagName)\n\tif err != nil {\n\t\tif err != RelaseNotFound {\n\t\t\treturn nil, errors.Wrap(err, \"failed to get release\")\n\t\t}\n\t\tif recreate {\n\t\t\tfmt.Fprintf(r.outStream,\n\t\t\t\t\"WARNING: '-recreate' is specified but release (%s) not found\",\n\t\t\t\t*req.TagName)\n\t\t}\n\n\t\tfmt.Fprintln(r.outStream, \"==> Create a new release\")\n\t\treturn r.GitHub.CreateRelease(ctx, req)\n\t}\n\n\t// recreate is not true. Then use that existing release.\n\tif !recreate {\n\t\tfmt.Fprintf(r.outStream, \"WARNING: found release (%s). Use existing one.\\n\",\n\t\t\t*req.TagName)\n\t\treturn release, nil\n\t}\n\n\t// When recreate is requested, delete existing release and create a\n\t// new release.\n\tfmt.Fprintln(r.outStream, \"==> Recreate a release\")\n\tif err := r.DeleteRelease(ctx, *release.ID, *req.TagName); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r.GitHub.CreateRelease(ctx, req)\n}", "func (c *GitHub) CreateRelease(ctx context.Context, r git.Release) (*git.Release, error) {\n\tc.Logger.Debugf(\"Creating a release %+v\", r)\n\trelease, _, err := c.Client.CreateRelease(ctx, r.ID.Repository.Owner, r.ID.Repository.Name, &github.RepositoryRelease{\n\t\tName: github.String(r.Name),\n\t\tTagName: github.String(r.TagName.Name()),\n\t\tTargetCommitish: github.String(r.TargetCommitish),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"GitHub API error: %w\", err)\n\t}\n\treturn &git.Release{\n\t\tID: git.ReleaseID{\n\t\t\tRepository: r.ID.Repository,\n\t\t\tInternalID: release.GetID(),\n\t\t},\n\t\tTagName: git.TagName(release.GetTagName()),\n\t\tTargetCommitish: release.GetTargetCommitish(),\n\t\tName: release.GetName(),\n\t}, nil\n}", "func (a *Client) Create(params *CreateParams, authInfo runtime.ClientAuthInfoWriter) (*CreateOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"Create\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/{organization}/{project}/_apis/release/releases\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &CreateReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*CreateOK), nil\n\n}", "func (r *Releaser) Create(name string) (*Release, error) {\n\trelease, _, err := r.client.Repositories.CreateRelease(context.Background(), r.owner, r.repository, &gogithub.RepositoryRelease{Name: gogithub.String(name), TagName: gogithub.String(name)})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Printf(\"created release ID: %v, tag: %v\", *release.ID, *release.TagName)\n\treturn &Release{ID: *release.ID, TagName: *release.TagName, Releaser: r}, nil\n}", "func (operator *AccessOperator) CreateRelease(cxt context.Context, option *ReleaseOption) (string, error) {\n\tif option == nil {\n\t\treturn \"\", fmt.Errorf(\"Lost create Commit info\")\n\t}\n\n\tgrpcOptions := []grpc.CallOption{\n\t\tgrpc.WaitForReady(true),\n\t}\n\n\t// query business first.\n\tbusiness, app, err := getBusinessAndApp(operator, operator.Business, option.AppName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\trequest := &accessserver.CreateReleaseReq{\n\t\tSeq: pkgcommon.Sequence(),\n\t\tBid: business.Bid,\n\t\tName: option.Name,\n\t\tCommitid: option.CommitID,\n\t\tCreator: operator.User,\n\t}\n\n\t// check strategy for this release.\n\tif len(option.StrategyName) != 0 {\n\t\tstrategy, styerr := operator.innerGetStrategyByID(cxt, business.Bid, app.Appid, option.StrategyName)\n\t\tif styerr != nil {\n\t\t\treturn \"\", styerr\n\t\t}\n\n\t\tif strategy == nil {\n\t\t\tlogger.V(3).Infof(\"CreateRelease: No relative Strategy %s with Release.\", option.StrategyName)\n\t\t\treturn \"\", fmt.Errorf(\"No relative Strategy %s\", option.StrategyName)\n\t\t}\n\t\trequest.Strategyid = strategy.Strategyid\n\t}\n\n\tresponse, err := operator.Client.CreateRelease(cxt, request, grpcOptions...)\n\tif err != nil {\n\t\tlogger.V(3).Infof(\n\t\t\t\"CreateRelease: post new Release %s for App[%s]/Cfgset[%s]/Commit %s failed, %s\",\n\t\t\toption.Name, option.AppName, option.CfgSetName, option.CommitID, err.Error(),\n\t\t)\n\t\treturn \"\", err\n\t}\n\tif response.ErrCode != common.ErrCode_E_OK {\n\t\tlogger.V(3).Infof(\n\t\t\t\"CreateStrategy: post new Release %s for App[%s]/Cfgset[%s]/Commit %s successfully, but reponse failed: %s\",\n\t\t\toption.Name, option.AppName, option.CfgSetName, option.CommitID, response.ErrMsg,\n\t\t)\n\t\treturn \"\", fmt.Errorf(\"%s\", response.ErrMsg)\n\t}\n\tif len(response.Releaseid) == 0 {\n\t\tlogger.V(3).Infof(\"CreateStrategy: BSCP system error, No ReleaseID response\")\n\t\treturn \"\", fmt.Errorf(\"Lost ReleaseID from configuraiotn platform\")\n\t}\n\treturn response.Releaseid, nil\n}", "func (api *APIClient) CreateBranchRelease(blockID int, branch string) (int, error) {\n\tvalues := url.Values{\"branch_name\": {branch}}\n\treq, err := http.NewRequest(\"POST\", fmt.Sprintf(\"%s/api/v1/blocks/%d/releases?%s\", api.baseURL, blockID, values.Encode()), nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Source\", \"gLearn_cli\")\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", api.Credentials.token))\n\n\tres, err := api.client.Do(req)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn 0, fmt.Errorf(\"Error: response status: %d\", res.StatusCode)\n\t}\n\n\tvar r ReleaseResponse\n\n\terr = json.NewDecoder(res.Body).Decode(&r)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn r.ReleaseID, nil\n}", "func NewRelease(ctx *pulumi.Context,\n\tname string, args *ReleaseArgs, opts ...pulumi.ResourceOption) (*Release, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Project == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Project'\")\n\t}\n\tvar resource Release\n\terr := ctx.RegisterResource(\"google-native:firebaserules/v1:Release\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewRelease(tag string) Release {\n\treturn Release{\n\t\tTag: tag,\n\t}\n}", "func NewRelease(ctx *pulumi.Context,\n\tname string, args *ReleaseArgs, opts ...pulumi.ResourceOption) (*Release, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ChannelId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ChannelId'\")\n\t}\n\tif args.SiteId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'SiteId'\")\n\t}\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"channelId\",\n\t\t\"project\",\n\t\t\"siteId\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Release\n\terr := ctx.RegisterResource(\"google-native:firebasehosting/v1beta1:Release\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewRelease(name, namespace, chart, chartVersion string, chartSpec ChartSpec, currentReleaseVersion int32, values Values, usedSubstitute Substitute, overrides Overrides) Release {\n\treturn Release{Name: name, Namespace: namespace, Chart: chart, ChartVersion: chartVersion, ChartSpec: chartSpec, CurrentReleaseVersion: currentReleaseVersion, Values: values, usedSubstitute: usedSubstitute, overrides: overrides}\n}", "func NewRelease(executer executer.Executer, config Config) Release {\n\treturn &release{executer: executer, config: config}\n}", "func createTrackRelease(config Configs, versionCodes googleapi.Int64s) (*androidpublisher.TrackRelease, error) {\n\tnewRelease := &androidpublisher.TrackRelease{\n\t\tVersionCodes: versionCodes,\n\t\tStatus: config.Status,\n\t\tInAppUpdatePriority: int64(config.UpdatePriority),\n\t}\n\tlog.Infof(\"Release version codes are: %v\", newRelease.VersionCodes)\n\n\tif newRelease.Status == \"\" {\n\t\tnewRelease.Status = releaseStatusFromConfig(config.UserFraction)\n\t}\n\n\tif shouldApplyUserFraction(newRelease.Status) {\n\t\tnewRelease.UserFraction = config.UserFraction\n\t}\n\n\tif config.ReleaseName != \"\" {\n\t\tnewRelease.Name = config.ReleaseName\n\t}\n\n\tif err := updateListing(config.WhatsnewsDir, newRelease); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to update listing, reason: %v\", err)\n\t}\n\n\treturn newRelease, nil\n}", "func (c *GitHub) CreateReleaseAsset(ctx context.Context, a git.ReleaseAsset) error {\n\tc.Logger.Debugf(\"Creating a release asset %+v\", a)\n\tf, err := os.Open(a.RealPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not open the file: %w\", err)\n\t}\n\tdefer f.Close()\n\t_, _, err = c.Client.UploadReleaseAsset(ctx, a.Release.Repository.Owner, a.Release.Repository.Name, a.Release.InternalID, &github.UploadOptions{\n\t\tName: a.Name,\n\t}, f)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"GitHub API error: %w\", err)\n\t}\n\treturn nil\n}", "func (s *ReleaseService) AddTextRelease(r *Release, authToken string) (*Release, error) {\n\tvar (\n\t\tmethod = http.MethodPost\n\t\tpath = fmt.Sprintf(\"/releases\")\n\t)\n\treq := s.client.newRequest(path, method)\n\taddJWTToRequest(req, authToken)\n\tr.Type = Text\n\terr := addBodyToRequestAsJSON(req, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn s.addRelease(req)\n}", "func (r *ReconcileCanary) CreatePrimaryRelease(instance *kharonv1alpha1.Canary) (reconcile.Result, error) {\n\tlog.Info(\"ACTION {CREATE_PRIMARY_RELEASE}\")\n\t// Create a Service for TargetRef\n\ttargetService, err := r.CreateServiceForTargetRef(instance)\n\tif err != nil && !errors.IsAlreadyExists(err) {\n\t\treturn r.ManageError(instance, err)\n\t}\n\n\t// Create a Route that points to the targetService with no alternate service\n\tprimaryService := &DestinationServiceDef{\n\t\tName: targetService.Name,\n\t\tWeight: 100,\n\t}\n\tcanaryService := &DestinationServiceDef{}\n\tif route, err := r.CreateRouteForCanary(instance, primaryService, canaryService); err != nil {\n\t\tif errors.IsAlreadyExists(err) {\n\t\t\tif _, err := r.UpdateRouteDestinationsForCanary(route, primaryService, canaryService); err != nil {\n\t\t\t\treturn r.ManageError(instance, err)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Update Status with new Release!\n\tinstance.Status.IsCanaryRunning = false\n\tinstance.Status.CanaryWeight = 0\n\tinstance.Status.Iterations = 0\n\tinstance.Status.ReleaseHistory = append(instance.Status.ReleaseHistory, kharonv1alpha1.Release{\n\t\tID: instance.Spec.TargetRef.Name,\n\t\tName: instance.Spec.TargetRef.Name,\n\t\tRef: instance.Spec.TargetRef,\n\t})\n\n\t// Send notification event\n\tr.recorder.Eventf(instance, \"Normal\", string(kharonv1alpha1.CreatePrimaryRelease), \"Primary release deployed from %s\", instance.Spec.TargetRef.Name)\n\n\treturn r.ManageSuccess(instance, time.Duration(instance.Spec.CanaryAnalysis.Interval)*time.Second, kharonv1alpha1.CreatePrimaryRelease)\n}", "func newTestRelease() *Release {\n\tv, _ := version.NewVersion(\"1.0.0\")\n\tt, _ := time.Parse(time.RFC1123Z, \"Fri, 13 May 2016 12:00:00 +0200\")\n\n\treturn &Release{\n\t\tversion: v,\n\t\tbuild: \"1000\",\n\t\ttitle: \"Test\",\n\t\tdescription: \"Test\",\n\t\tpublishedDateTime: NewPublishedDateTime(&t),\n\t\treleaseNotesLink: \"https://example.com/changelogs/1.0.0.html\",\n\t\tminimumSystemVersion: \"10.9\",\n\t\tdownloads: []Download{\n\t\t\t*NewDownload(\"https://example.com/1.0.0/one.dmg\", \"application/octet-stream\", 100000),\n\t\t\t*NewDownload(\"https://example.com/1.0.0/two.dmg\", \"application/octet-stream\", 100000),\n\t\t},\n\t\tisPreRelease: false,\n\t}\n}", "func (s *Services) Release(ctx context.Context, request *proto.ReleaseRequest) (*proto.ReleaseResponse, error) {\n\tvar result models.Release\n\tquery := s.DB\n\n\tif request.Id != 0 {\n\t\tquery = query.Where(\"id = ?\", request.Id)\n\t}\n\n\tif err := query.First(&result).Error; err != nil {\n\n\t\t// If nothing was found\n\t\tif gorm.IsRecordNotFoundError(err) {\n\t\t\treturn &proto.ReleaseResponse{Release: nil}, nil\n\t\t}\n\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\n\treturn &proto.ReleaseResponse{Release: result.ToProto()}, nil\n}", "func runReleaseCmd(cmd *cobra.Command, args []string) {\n\tconfigFile, _ := cmd.Flags().GetString(\"config\")\n\tconfig := &config.Config{}\n\terr := config.Load(configFile)\n\tif err != nil {\n\t\tfmt.Printf(\"could not load config file: %v\\n\", err)\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\n\tspinner, err := initSpinner(fmt.Sprintf(\"Releasing v%s of %s\", args[0], config.Repository))\n\tif err != nil {\n\t\tfmt.Println(\"could not init spinner\")\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\tspinner.Start()\n\n\tnewRelease, err := github.NewRelease(config, args, spinner)\n\tif err != nil {\n\t\tspinner.StopFailMessage(fmt.Sprintf(\"%v\", err))\n\t\tspinner.StopFail()\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\n\tcl, err := changelog.HandleChangelog(newRelease.ProjectName, newRelease.Version, newRelease.Date, spinner)\n\tif err != nil {\n\t\tspinner.StopFailMessage(fmt.Sprintf(\"%v\", err))\n\t\tspinner.StopFail()\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\n\tnewRelease.Changelog = cl\n\n\tvar binaryPath string\n\tskipBinary, _ := cmd.Flags().GetBool(\"skipBinary\")\n\tif !skipBinary {\n\t\t// set project build path so we have a predictable location\n\t\tbinaryPath = fmt.Sprintf(binaryPathFmt, newRelease.ProjectName, newRelease.Version)\n\t\trunBuildCmd(cmd, []string{newRelease.Version, binaryPath})\n\t}\n\n\ttokenFile, _ := cmd.Flags().GetString(\"tokenFile\")\n\terr = newRelease.CreateGithubRelease(tokenFile, binaryPath, spinner)\n\tif err != nil {\n\t\tspinner.StopFailMessage(fmt.Sprintf(\"%v\", err))\n\t\tspinner.StopFail()\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\n\tspinner.Suffix(\" Finished release\")\n\tspinner.Stop()\n}", "func (client *KeyVaultClient) releaseCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyReleaseParameters, options *KeyVaultClientReleaseOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/{key-version}/release\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\tif keyVersion == \"\" {\n\t\treturn nil, errors.New(\"parameter keyVersion cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-version}\", url.PathEscape(keyVersion))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.3\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func NewRelease(cache string) *Release {\n\treturn &Release{CacheDir: cache}\n}", "func Release(version, commit, date string) {\n\tif version == \"\" {\n\t\tversion = \"dev\"\n\t} else if version[0] == 'v' {\n\t\tversion = version[1:]\n\t}\n\tif commit == \"\" {\n\t\tcommit = \"-\"\n\t}\n\tif date == \"\" {\n\t\tdate = \"-\"\n\t}\n\tVersion, Commit, Date = version, commit, date\n}", "func (c Client) Release(id string, params *stripe.SubscriptionScheduleReleaseParams) (*stripe.SubscriptionSchedule, error) {\n\tpath := stripe.FormatURLPath(\"/v1/subscription_schedules/%s/release\", id)\n\tsched := &stripe.SubscriptionSchedule{}\n\terr := c.B.Call(http.MethodPost, path, c.Key, params, sched)\n\n\treturn sched, err\n}", "func resourceHelmfileReleaseCreate(d *schema.ResourceData, _ interface{}) (finalErr error) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tfinalErr = fmt.Errorf(\"unhandled error: %v\\n%s\", err, debug.Stack())\n\t\t}\n\t}()\n\n\trs, err := NewReleaseSetWithSingleRelease(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := CreateReleaseSet(newContext(d), rs, d); err != nil {\n\t\treturn err\n\t}\n\n\td.MarkNewResource()\n\n\t//create random uuid for the id\n\tid := xid.New().String()\n\td.SetId(id)\n\n\treturn nil\n}", "func (h *handler) Release(ctx context.Context, evt *github.ReleaseEvent) error {\n\tif evt.GetAction() != \"released\" {\n\t\tlogrus.WithField(\"action\", evt.GetAction()).Info(\"ignoring release event\")\n\t\treturn nil\n\t}\n\tnotifyRepos := h.cfg.ReleaseDispatchRepos()\n\tlogrus.WithField(\"repos\", len(notifyRepos)).Info(\"notifying repositories of release\")\n\tif len(notifyRepos) == 0 {\n\t\treturn nil\n\t}\n\n\tgh := repo.NewGitHubClient(h.cfg.GitHubToken)\n\tfeedbackIssue, err := releaseFeedbackIssue(ctx, gh, evt, notifyRepos)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogrus.WithField(\"issue_number\", feedbackIssue.Number).Debug(\"created feedback issue\")\n\n\tdispatchOpts, err := h.releaseDispatchOptions(evt, feedbackIssue)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, notifyRepo := range notifyRepos {\n\t\tnotifyRepoParts := strings.SplitN(notifyRepo, \"/\", 2)\n\t\towner := notifyRepoParts[0]\n\t\tname := notifyRepoParts[1]\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"owner\": owner,\n\t\t\t\"name\": name,\n\t\t}).Debug(\"dispatching release to repository\")\n\t\tif _, _, err := gh.Repositories.Dispatch(ctx, owner, name, dispatchOpts); err != nil {\n\t\t\tlogrus.WithError(err).Warn(\"error dispatching update\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *FakeReleaseHistories) Create(ctx context.Context, releaseHistory *v1alpha1.ReleaseHistory, opts v1.CreateOptions) (result *v1alpha1.ReleaseHistory, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewCreateAction(releasehistoriesResource, c.ns, releaseHistory), &v1alpha1.ReleaseHistory{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.ReleaseHistory), err\n}", "func (s *Server) HandleRelease(w http.ResponseWriter, r *http.Request) {\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar release *shared.ReleaseEvent\n\terr = json.Unmarshal(body, &release)\n\tif err != nil {\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Tag the request with an ID for tracing in the logs.\n\trelease.RequestID = nuid.Next()\n\tfmt.Println(release)\n\n\t// Publish event to the NATS server\n\tnc := s.NATS()\n\t\n\trelease.RequestID = nuid.Next()\n\trelease_event := shared.ReleaseEvent{release.ID, release.Time, release.NextState, release.PostMedication, release.Notes, release.RequestID}\n\trel_event, err := json.Marshal(release_event)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\n\tlog.Printf(\"requestID:%s - Publishing inspection event with patientID %d\\n\", release.RequestID, release.ID)\n\t// Publishing the message to NATS Server\n\tnc.Publish(\"patient.release\", rel_event)\n\n\tjson.NewEncoder(w).Encode(\"Release event published\")\n}", "func NewGitLabRelease(source string, config GitLabConfig) (*GitLabRelease, error) {\n\ts := strings.SplitN(source, \"/\", 2)\n\tif len(s) != 2 {\n\t\treturn nil, fmt.Errorf(\"failed to parse source: %s\", source)\n\t}\n\n\t// If config.api is not set, create a default GitLabClient\n\tvar api GitLabAPI\n\tif config.api == nil {\n\t\tvar err error\n\t\tapi, err = NewGitLabClient(config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tapi = config.api\n\t}\n\n\treturn &GitLabRelease{\n\t\tapi: api,\n\t\towner: s[0],\n\t\tproject: s[1],\n\t}, nil\n}", "func NewMockRelease(ctrl *gomock.Controller) *MockRelease {\n\tmock := &MockRelease{ctrl: ctrl}\n\tmock.recorder = &MockReleaseMockRecorder{mock}\n\treturn mock\n}", "func GetReleaseInformation(res http.ResponseWriter, req *http.Request) {\n\tres.Header().Set(\"Content-Type\", \"application/json\")\n\tc := ReleaseInformation{\"relid\", \"app\", \"stream\", \"something\", \"xebia\", \"1234\"}\n\toutgoingJSON, err := json.Marshal(c)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(res, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tres.WriteHeader(http.StatusCreated)\n\tfmt.Fprint(res, string(outgoingJSON))\n}", "func (s *deployerService) Deploy(ctx context.Context, db *gorm.DB, opts DeploymentsCreateOpts) (*Release, error) {\n\tvar msg jsonmessage.JSONMessage\n\n\tr, err := s.deploy(ctx, db, opts)\n\tif err != nil {\n\t\tmsg = newJSONMessageError(err)\n\t} else {\n\t\tmsg = jsonmessage.JSONMessage{Status: fmt.Sprintf(\"Status: Created new release v%d for %s\", r.Version, r.App.Name)}\n\t}\n\n\tif err := json.NewEncoder(opts.Output).Encode(&msg); err != nil {\n\t\treturn r, err\n\t}\n\n\treturn r, err\n}", "func release(name string, year uint32, lead string) {\n\tevents.EmitEvent(MovieRelease, name, year, lead)\n}", "func (z *zfsctl) Release(ctx context.Context, name string, r bool, tag string) *execute {\n\targs := []string{\"release\"}\n\tif r {\n\t\targs = append(args, \"-r\")\n\t}\n\targs = append(args, tag, name)\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func (a *Agent) RollbackRelease(\n\tctx context.Context,\n\tname string,\n\tversion int,\n) error {\n\tctx, span := telemetry.NewSpan(ctx, \"helm-rollback-release\")\n\tdefer span.End()\n\n\ttelemetry.WithAttributes(span,\n\t\ttelemetry.AttributeKV{Key: \"name\", Value: name},\n\t\ttelemetry.AttributeKV{Key: \"version\", Value: version},\n\t)\n\n\tcmd := action.NewRollback(a.ActionConfig)\n\tcmd.Version = version\n\treturn cmd.Run(name)\n}", "func (c *gitlabClient) Upload(\n\tctx *context.Context,\n\treleaseID string,\n\tname string,\n\tfile *os.File,\n) error {\n\tprojectID := ctx.Config.Release.GitLab.Owner + \"/\" + ctx.Config.Release.GitLab.Name\n\n\tlog.WithField(\"file\", file.Name()).Debug(\"uploading file\")\n\tprojectFile, _, err := c.client.Projects.UploadFile(\n\t\tprojectID,\n\t\tfile.Name(),\n\t\tnil,\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"file\": file.Name(),\n\t\t\"url\": projectFile.URL,\n\t}).Debug(\"uploaded file\")\n\n\tgitlabBaseURL := ctx.Config.GitLabURLs.Download\n\t// projectFile from upload: /uploads/<sha>/filename.txt\n\trelativeUploadURL := projectFile.URL\n\tlinkURL := gitlabBaseURL + \"/\" + projectID + relativeUploadURL\n\treleaseLink, _, err := c.client.ReleaseLinks.CreateReleaseLink(\n\t\tprojectID,\n\t\treleaseID,\n\t\t&gitlab.CreateReleaseLinkOptions{\n\t\t\tName: &name,\n\t\t\tURL: &linkURL,\n\t\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"id\": releaseLink.ID,\n\t\t\"url\": releaseLink.URL,\n\t}).Debug(\"created release link\")\n\n\treturn err\n}", "func CreateReleaseDirectConnectionRequest() (request *ReleaseDirectConnectionRequest) {\n\trequest = &ReleaseDirectConnectionRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"R-kvstore\", \"2015-01-01\", \"ReleaseDirectConnection\", \"redisa\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (as *apiServer) DownloadRelease(w http.ResponseWriter, r *http.Request) {\n\treqLogger := as.logger.New(\"method\", r.Method, \"url\", r.RequestURI)\n\treqLogger.Info(\"fetching release URL\")\n\n\tvars := mux.Vars(r)\n\tctx, cancel := context.WithTimeout(r.Context(), requestTimeout)\n\tdefer cancel()\n\n\turl, err := as.githubClient.FetchReleaseURL(ctx, vars[\"owner\"], vars[\"repo\"], vars[\"tag\"], vars[\"assetName\"])\n\tif ctx.Err() != nil {\n\t\treqLogger.Error(\"error retrieving release URL\", \"err\", err, \"ctx error\", ctx.Err())\n\t\twriteHTTPError(w, reqLogger, http.StatusBadGateway, \"Bad Gateway\")\n\t\treturn\n\t}\n\tif err != nil {\n\t\tswitch t := err.(type) {\n\t\tcase GitHubError:\n\t\t\tif t.Type == TypeNotFound {\n\t\t\t\treqLogger.Info(\"data not found\", \"err\", t.WrappedError, \"vars\", vars)\n\t\t\t\twriteHTTPError(w, reqLogger, http.StatusNotFound, t.WrappedError.Error())\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\treqLogger.Error(\"unhandled github error\", \"err\", t.WrappedError, \"vars\", vars)\n\t\t\t\twriteHTTPError(w, reqLogger, http.StatusInternalServerError, \"Internal Server Error\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\treqLogger.Error(\"error retrieving release URL\", \"err\", err, \"vars\", vars)\n\t\twriteHTTPError(w, reqLogger, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\n\treqLogger.Info(\"found release URL\", \"url\", url)\n\n\tw.Header().Set(\"Location\", url)\n\tw.WriteHeader(http.StatusMovedPermanently)\n}", "func GetRelease(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/releases/{id} repository repoGetRelease\n\t// ---\n\t// summary: Get a release\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: id\n\t// in: path\n\t// description: id of the release to get\n\t// type: integer\n\t// format: int64\n\t// required: true\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/Release\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\n\tid := ctx.ParamsInt64(\":id\")\n\trelease, err := repo_model.GetReleaseByID(ctx, id)\n\tif err != nil && !repo_model.IsErrReleaseNotExist(err) {\n\t\tctx.Error(http.StatusInternalServerError, \"GetReleaseByID\", err)\n\t\treturn\n\t}\n\tif err != nil && repo_model.IsErrReleaseNotExist(err) ||\n\t\trelease.IsTag || release.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tif err := release.LoadAttributes(ctx); err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"LoadAttributes\", err)\n\t\treturn\n\t}\n\tctx.JSON(http.StatusOK, convert.ToAPIRelease(ctx, ctx.Repo.Repository, release))\n}", "func (repo BoshDirectorRepository) DeleteRelease(name string, version string) (apiResponse net.ApiResponse) {\n\tpath := fmt.Sprintf(\"/releases/%s?force=true&version=%s\", name, version)\n\tapiResponse = repo.gateway.DeleteResource(repo.config.TargetURL+path, repo.config.Username, repo.config.Password)\n\tif apiResponse.IsNotSuccessful() {\n\t\treturn\n\t}\n\tif !apiResponse.IsRedirection() {\n\t\treturn\n\t}\n\n\tvar taskStatus models.TaskStatus\n\ttaskURL, err := url.Parse(apiResponse.RedirectLocation)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tapiResponse = repo.gateway.GetResource(repo.config.TargetURL+taskURL.Path, repo.config.Username, repo.config.Password, &taskStatus)\n\tif apiResponse.IsNotSuccessful() {\n\t\treturn\n\t}\n\n\t/* Progression should be: queued, progressing, done */\n\t/* TODO task might fail; end states: done, error, cancelled */\n\tfor taskStatus.State != \"done\" {\n\t\ttime.Sleep(1)\n\t\ttaskStatus, apiResponse = repo.GetTaskStatus(taskStatus.ID)\n\t\tif apiResponse.IsNotSuccessful() {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}", "func (r *ReleaseModuleServiceServer) InstallRelease(ctx context.Context, in *rudderAPI.InstallReleaseRequest) (*rudderAPI.InstallReleaseResponse, error) {\n\tgrpclog.Print(\"install\")\n\tb := bytes.NewBufferString(in.Release.Manifest)\n\terr := kubeClient.Create(in.Release.Namespace, b, 500, false)\n\tif err != nil {\n\t\tgrpclog.Printf(\"error when creating release: %v\", err)\n\t}\n\treturn &rudderAPI.InstallReleaseResponse{}, err\n}", "func NewReleaseLink(ctx *pulumi.Context,\n\tname string, args *ReleaseLinkArgs, opts ...pulumi.ResourceOption) (*ReleaseLink, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Project == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Project'\")\n\t}\n\tif args.TagName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'TagName'\")\n\t}\n\tif args.Url == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Url'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource ReleaseLink\n\terr := ctx.RegisterResource(\"gitlab:index/releaseLink:ReleaseLink\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func New() Release {\n\tnow := time.Now()\n\n\treturn Release{\n\t\tYear: now.Year(),\n\t\tMonth: int(now.Month()),\n\t}\n}", "func (s *SubmissionHandler) createSubmission(subInfo map[string]interface{}) error {\n\tps, e := convert.GetString(subInfo, db.PROJECTID)\n\tif e != nil {\n\t\treturn e\n\t}\n\ts.submission.ProjectId, e = convert.Id(ps)\n\tif e != nil {\n\t\treturn e\n\t}\n\ts.submission.Time, e = convert.GetInt64(subInfo, db.TIME)\n\tif e != nil {\n\t\treturn e\n\t}\n\tif e = db.Add(db.SUBMISSIONS, s.submission); e != nil {\n\t\treturn e\n\t}\n\treturn s.writeJSON(s.submission)\n}", "func (t TestDescription) Release() TestDescription {\n\treturn t.newLabel(\"RELEASE\")\n}", "func NewReleaseYear(v int) (ReleaseYear, error) {\n\tyear := ReleaseYear(0) // we must avoid assigning the incoming data from here to avoid signed integer overflow at runtime\n\tif err := year.ensureEpoch(v); err != nil {\n\t\treturn 0, err\n\t}\n\tyear = ReleaseYear(v)\n\treturn year, nil\n}", "func Release(path string, change parser.SemVerChange, ch chan Result, options ReleaseOptions) {\n\tdefer close(ch)\n\n\t// Get Git User\n\tuser, err := git.GetUser(path)\n\tif err != nil {\n\t\tch <- Result{\n\t\t\tError: fmt.Errorf(\"[Git] get user: %v\", err),\n\t\t}\n\t\treturn\n\t}\n\tch <- Result{\n\t\tPhase: PhaseGetGitUser,\n\t\tMessage: user.String(),\n\t}\n\n\t// Parse Commits\n\tcommits, err := parser.ParseCommits(path)\n\tif err != nil {\n\t\tch <- Result{\n\t\t\tError: fmt.Errorf(\"[Release] parse commits: %v\", err),\n\t\t}\n\t\treturn\n\t}\n\tch <- Result{\n\t\tPhase: PhaseParseCommits,\n\t\tMessage: strconv.Itoa(len(commits)),\n\t}\n\n\t// Read version from last bump commit if exist\n\tvar version string\n\tif len(commits) > 0 {\n\t\tlastCommit := commits[len(commits)-1]\n\t\tif lastCommit.SemVer != \"\" {\n\t\t\tversion = lastCommit.SemVer\n\t\t\tch <- Result{\n\t\t\t\tPhase: PhaseLastVersionFromCommit,\n\t\t\t\tMessage: version,\n\t\t\t}\n\t\t}\n\t}\n\n\t// Read version from npm (package.json) if exist\n\tvar npmVersion string\n\tisNpm := npm.HasPackage(path)\n\tif isNpm {\n\t\tpkg, err := npm.ParsePackage(path)\n\t\tif err != nil {\n\t\t\tch <- Result{\n\t\t\t\tError: fmt.Errorf(\n\t\t\t\t\t\"[Release] parse npm package: %v\",\n\t\t\t\t\terr,\n\t\t\t\t),\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tnpmVersion = pkg.Version\n\t\tch <- Result{\n\t\t\tPhase: PhaseLastVersionFromPackage,\n\t\t\tMessage: npmVersion,\n\t\t}\n\t}\n\n\t// Inconsistency between commit history and package.json version\n\tif npmVersion != \"\" && npmVersion != version {\n\t\tch <- Result{\n\t\t\tPhase: PhaseLastVersionInconsistency,\n\t\t\tMessage: fmt.Sprintf(\n\t\t\t\t\"package.json: %s, git: %s\",\n\t\t\t\tnpmVersion,\n\t\t\t\tversion,\n\t\t\t),\n\t\t}\n\t\tversion = npmVersion\n\t}\n\n\t// Find Change\n\tif change == \"\" {\n\t\tchange = semver.GetChange(commits)\n\t\tch <- Result{\n\t\t\tPhase: PhaseChangeFound,\n\t\t\tMessage: string(change),\n\t\t}\n\t}\n\n\t// Calculate new version\n\tnewVersion, err := semver.GetVersion(version, change)\n\tif err != nil {\n\t\tch <- Result{\n\t\t\tError: fmt.Errorf(\n\t\t\t\t\"[Release] get semver version: %v\",\n\t\t\t\terr,\n\t\t\t),\n\t\t}\n\t\treturn\n\t}\n\tch <- Result{\n\t\tPhase: PhaseNextVersion,\n\t\tMessage: newVersion,\n\t}\n\n\t// Generate changelog\n\tcf, _, err := changelog.Save(path, newVersion, version, change, commits, user)\n\tif err != nil {\n\t\tch <- Result{\n\t\t\tError: fmt.Errorf(\"[Release] save changelog: %v\", err),\n\t\t}\n\t\treturn\n\t}\n\tch <- Result{\n\t\tPhase: PhaseChangelogUpdated,\n\t\tMessage: cf,\n\t}\n\n\t// Version: npm\n\tif isNpm {\n\t\t_, err = npm.Version(path, newVersion, string(change))\n\t\tif err != nil {\n\t\t\tch <- Result{\n\t\t\t\tError: fmt.Errorf(\"[npm] version: %v\", err),\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tch <- Result{\n\t\t\tPhase: PhasePackageVersion,\n\t\t}\n\t}\n\n\t// Release: Git\n\terr = git.Release(path, newVersion, user, options.SuppressPush)\n\tif err != nil {\n\t\tch <- Result{\n\t\t\tError: fmt.Errorf(\"[Release] git: %v\", err),\n\t\t}\n\t\treturn\n\t}\n\tch <- Result{\n\t\tPhase: PhaseGitRelease,\n\t\tMessage: newVersion,\n\t}\n\n\t// Publish: npm\n\tif isNpm {\n\t\t_, err = npm.Publish(path)\n\t\tif err != nil {\n\t\t\tch <- Result{\n\t\t\t\tError: fmt.Errorf(\"[npm] publish: %v\", err),\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tch <- Result{\n\t\t\tPhase: PhasePackagePublish,\n\t\t}\n\t}\n}", "func (d *Deployment) PostRelease(version string) (string, error) {\n\tif os.Geteuid() == 0 && d.cfg.Insecure {\n\t\treturn \"\", fmt.Errorf(\n\t\t\t\"Refusing to execute post-release command from insecure %q configuration as root\",\n\t\t\td.appName)\n\t}\n\tif d.cfg.Scripts[\"postrelease\"].Cmd != \"\" {\n\t\tartifactPath, _ := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)\n\t\tversionDir, _ := makeReleasePath(d.releaseDir, version)\n\t\tcmdlineArgs := substituteVars(d.cfg.Scripts[\"postrelease\"].Args,\n\t\t\tvarValues{artifactPath: artifactPath, versionDir: versionDir})\n\t\treturn sysCommand(versionDir, d.cfg.Scripts[\"postrelease\"].Cmd, cmdlineArgs)\n\t}\n\treturn \"\", nil\n}", "func CreateDeployment(chartName string, releaseName string, valueOverrides []byte, kubeConfig []byte, clusterName string) (*rls.InstallReleaseResponse, error) {\n\tdefer tearDown()\n\n\tlogTag := \"CreateDeployment\"\n\n\tutils.LogInfof(logTag, \"Deploying chart='%s', release name='%s'.\", chartName, releaseName)\n\tdownloadedChartPath, err := downloadChartFromRepo(chartName, clusterName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tutils.LogInfof(logTag, \"Loading chart '%s'\", downloadedChartPath)\n\tchartRequested, err := chartutil.Load(downloadedChartPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error loading chart: %v\", err)\n\t}\n\tif req, err := chartutil.LoadRequirements(chartRequested); err == nil {\n\t\tif err := checkDependencies(chartRequested, req); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if err != chartutil.ErrRequirementsNotFound {\n\t\treturn nil, fmt.Errorf(\"cannot load requirements: %v\", err)\n\t}\n\tvar namespace = \"default\"\n\tif len(strings.TrimSpace(releaseName)) == 0 {\n\t\treleaseName, _ = generateName(\"\")\n\t}\n\thClient, err := GetHelmClient(kubeConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinstallRes, err := hClient.InstallReleaseFromChart(\n\t\tchartRequested,\n\t\tnamespace,\n\t\thelm.ValueOverrides(valueOverrides),\n\t\thelm.ReleaseName(releaseName),\n\t\thelm.InstallDryRun(false),\n\t\thelm.InstallReuseName(true),\n\t\thelm.InstallDisableHooks(false),\n\t\thelm.InstallTimeout(30),\n\t\thelm.InstallWait(false))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error deploying chart: %v\", err)\n\t}\n\treturn installRes, nil\n}", "func CreateDeployment(c *gin.Context) {\n\tlog := logger.WithFields(logrus.Fields{\"tag\": constants.TagCreateDeployment})\n\tparsedRequest, err := parseCreateUpdateDeploymentRequest(c)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\tc.JSON(http.StatusBadRequest, htype.ErrorResponse{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tMessage: \"Error during parsing request!\",\n\t\t\tError: errors.Cause(err).Error(),\n\t\t})\n\t\treturn\n\t}\n\trelease, err := helm.CreateDeployment(parsedRequest.deploymentName,\n\t\tparsedRequest.deploymentReleaseName, parsedRequest.values, parsedRequest.kubeConfig,\n\t\tparsedRequest.clusterName)\n\tif err != nil {\n\t\t//TODO distinguish error codes\n\t\tlog.Errorf(\"Error during create deployment. %s\", err.Error())\n\t\tc.JSON(http.StatusBadRequest, htype.ErrorResponse{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tMessage: \"Error creating deployment\",\n\t\t\tError: err.Error(),\n\t\t})\n\t\treturn\n\t}\n\tlog.Info(\"Create deployment succeeded\")\n\n\treleaseName := release.Release.Name\n\treleaseNotes := release.Release.Info.Status.Notes\n\n\tlog.Debug(\"Release name: \", releaseName)\n\tlog.Debug(\"Release notes: \", releaseNotes)\n\tresponse := htype.CreateUpdateDeploymentResponse{\n\t\tReleaseName: releaseName,\n\t\tNotes: releaseNotes,\n\t}\n\tc.JSON(http.StatusCreated, response)\n\treturn\n}", "func (gauo *GithubAssetUpdateOne) SetRelease(g *GithubRelease) *GithubAssetUpdateOne {\n\treturn gauo.SetReleaseID(g.ID)\n}", "func Release() (err error) {\n\tif os.Getenv(\"TAG\") == \"\" {\n\t\treturn errors.New(\"MSG and TAG environment variables are required\")\n\t}\n\tif err := sh.RunV(\"git\", \"tag\", \"-a\", \"$TAG\"); err != nil {\n\t\treturn err\n\t}\n\tif err := sh.RunV(\"git\", \"push\", \"origin\", \"$TAG\"); err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tsh.RunV(\"git\", \"tag\", \"--delete\", \"$TAG\")\n\t\t\tsh.RunV(\"git\", \"push\", \"--delete\", \"origin\", \"$TAG\")\n\t\t}\n\t}()\n\treturn sh.RunV(\"goreleaser\")\n}", "func ReleaseMock(opts *MockReleaseOptions) *release.Release {\n\tdate := time.Unix(242085845, 0).UTC()\n\n\tname := opts.Name\n\tif name == \"\" {\n\t\tname = \"testrelease-\" + string(rand.Intn(100))\n\t}\n\n\tversion := 1\n\tif opts.Version != 0 {\n\t\tversion = opts.Version\n\t}\n\n\tnamespace := opts.Namespace\n\tif namespace == \"\" {\n\t\tnamespace = \"default\"\n\t}\n\n\tch := opts.Chart\n\tif opts.Chart == nil {\n\t\tch = &chart.Chart{\n\t\t\tMetadata: &chart.Metadata{\n\t\t\t\tName: \"foo\",\n\t\t\t\tVersion: \"0.1.0-beta.1\",\n\t\t\t},\n\t\t\tTemplates: []*chart.File{\n\t\t\t\t{Name: \"templates/foo.tpl\", Data: []byte(MockManifest)},\n\t\t\t},\n\t\t}\n\t}\n\n\tscode := release.StatusDeployed\n\tif len(opts.Status) > 0 {\n\t\tscode = opts.Status\n\t}\n\n\treturn &release.Release{\n\t\tName: name,\n\t\tInfo: &release.Info{\n\t\t\tFirstDeployed: date,\n\t\t\tLastDeployed: date,\n\t\t\tStatus: scode,\n\t\t\tDescription: \"Release mock\",\n\t\t},\n\t\tChart: ch,\n\t\tConfig: map[string]interface{}{\"name\": \"value\"},\n\t\tVersion: version,\n\t\tNamespace: namespace,\n\t\tHooks: []*release.Hook{\n\t\t\t{\n\t\t\t\tName: \"pre-install-hook\",\n\t\t\t\tKind: \"Job\",\n\t\t\t\tPath: \"pre-install-hook.yaml\",\n\t\t\t\tManifest: MockHookTemplate,\n\t\t\t\tLastRun: date,\n\t\t\t\tEvents: []release.HookEvent{release.HookPreInstall},\n\t\t\t},\n\t\t},\n\t\tManifest: MockManifest,\n\t}\n}", "func PrepareRelease(release *deployer.Release, zip_file_path *string) error {\n\tregion, account_id := to.RegionAccount()\n\trelease.SetDefaults(region, account_id, \"coinbase-step-deployer-\")\n\n\tlambda_sha, err := to.SHA256File(*zip_file_path)\n\tif err != nil {\n\t\treturn err\n\t}\n\trelease.LambdaSHA256 = &lambda_sha\n\n\t// Interpolate variables for resource strings\n\trelease.StateMachineJSON = to.InterpolateArnVariables(\n\t\trelease.StateMachineJSON,\n\t\trelease.AwsRegion,\n\t\trelease.AwsAccountID,\n\t\trelease.LambdaName,\n\t)\n\n\treturn nil\n}", "func minorRelease(f *os.File, release, draftURL, changelogURL string) {\n\t// Check for draft and use it if available\n\tlog.Printf(\"Checking if draft release notes exist for %s...\", release)\n\n\tresp, err := http.Get(draftURL)\n\tif err == nil {\n\t\tdefer resp.Body.Close()\n\t}\n\n\tif err == nil && resp.StatusCode == 200 {\n\t\tlog.Print(\"Draft found - using for release notes...\")\n\t\t_, err = io.Copy(f, resp.Body)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error during copy to file: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tf.WriteString(\"\\n\")\n\t} else {\n\t\tlog.Print(\"Failed to find draft - creating generic template... (error message/status code printed below)\")\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error message: %v\", err)\n\t\t} else {\n\t\t\tlog.Printf(\"Response status code: %d\", resp.StatusCode)\n\t\t}\n\t\tf.WriteString(\"## Major Themes\\n\\n* TBD\\n\\n## Other notable improvements\\n\\n* TBD\\n\\n## Known Issues\\n\\n* TBD\\n\\n## Provider-specific Notes\\n\\n* TBD\\n\\n\")\n\t}\n\n\t// Aggregate all previous release in series\n\tf.WriteString(fmt.Sprintf(\"### Previous Release Included in %s\\n\\n\", release))\n\n\t// Regexp Example:\n\t// Assume the release tag is v1.7.0, this regexp matches \"- [v1.7.0-\" in\n\t// \"- [v1.7.0-rc.1](#v170-rc1)\"\n\t// \"- [v1.7.0-beta.2](#v170-beta2)\"\n\t// \"- [v1.7.0-alpha.3](#v170-alpha3)\"\n\treAnchor, _ := regexp.Compile(fmt.Sprintf(\"- \\\\[%s-\", release))\n\n\tresp, err = http.Get(changelogURL)\n\tif err == nil {\n\t\tdefer resp.Body.Close()\n\t}\n\n\tif err == nil && resp.StatusCode == 200 {\n\t\tbuf := new(bytes.Buffer)\n\t\tbuf.ReadFrom(resp.Body)\n\t\tfor _, line := range strings.Split(buf.String(), \"\\n\") {\n\t\t\tif anchor := reAnchor.FindStringSubmatch(line); anchor != nil {\n\t\t\t\tf.WriteString(line + \"\\n\")\n\t\t\t}\n\t\t}\n\t\tf.WriteString(\"\\n\")\n\t} else {\n\t\tlog.Print(\"Failed to fetch past changelog for minor release - continuing... (error message/status code printed below)\")\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error message: %v\", err)\n\t\t} else {\n\t\t\tlog.Printf(\"Response status code: %d\", resp.StatusCode)\n\t\t}\n\t}\n}", "func NewRepoCreateReleaseCreated() *RepoCreateReleaseCreated {\n\treturn &RepoCreateReleaseCreated{}\n}", "func generateVersionFromReleaseData(payload api.WebhookGithub) (name string, description string) {\n\trelease := payload[api.GithubWebhookFlagRelease].(map[string]interface{})\n\n\t// generate name\n\tif nil != release[\"tag_name\"] && canBeUsedInImageTag(release[\"tag_name\"].(string)) {\n\t\tname = release[\"tag_name\"].(string)\n\t} else {\n\t\tname = \"tag_\" + uuid.NewV4().String()\n\t}\n\n\t// generate description\n\tif nil != release[\"body\"] {\n\t\tdescription = release[\"body\"].(string)\n\t} else {\n\t\tdescription = \"\"\n\t}\n\tif nil != release[\"html_url\"] {\n\t\tdescription = description + \"\\r\\n\" + release[\"html_url\"].(string)\n\t}\n\n\tlog.Infof(\"webhook release event: name[%s] description[%s]\", name, description)\n\treturn name, description\n}", "func createDeployment(client GitHubClient, event PullRequestEvent, envName string) (*github.Deployment, error) {\n\trepoName := strings.Split(event.Repository.FullName, \"/\")\n\towner, repo := repoName[0], repoName[1]\n\tref := fmt.Sprintf(\"pull/%v/head\", event.PullRequest.Number)\n\n\treq := &github.DeploymentRequest{\n\t\tRef: github.String(ref),\n\t\tTransientEnvironment: github.Bool(true),\n\t\tEnvironment: github.String(envName),\n\t\tRequiredContexts: &[]string{},\n\t}\n\tctx := context.Background()\n\tdeployment, _, err := client.Repositories.CreateDeployment(ctx, owner, repo, req)\n\tif err != nil {\n\t\treturn deployment, err\n\t}\n\treturn deployment, nil\n}", "func (gau *GithubAssetUpdate) SetRelease(g *GithubRelease) *GithubAssetUpdate {\n\treturn gau.SetReleaseID(g.ID)\n}", "func (a *RepoAPI) createRepo(params interface{}) (resp *rpc.Response) {\n\treturn rpc.Success(a.mods.Repo.Create(cast.ToStringMap(params)))\n}", "func rollbackRelease(c *gin.Context, r *api.HelmRelease) error {\n\tlogEntry := log.ReqEntry(c).\n\t\tWithField(\"cluster\", r.Cluster).WithField(\"namespace\", r.Namespace).WithField(\"releaseName\", r.Name)\n\n\tlogEntry.Debugf(\"getting helm action config...\")\n\trollbackConfig, err := generateHelmActionConfig(r.Cluster, r.Namespace, logEntry)\n\tif err != nil {\n\t\tlogEntry.WithField(\"error\", err).Warningf(\"failed to generate configuration for helm action\")\n\t\treturn err\n\t}\n\trollbackAction := action.NewRollback(rollbackConfig)\n\trollbackAction.Version = int(r.Revision)\n\terr = rollbackAction.Run(r.Name)\n\tif err != nil {\n\t\tlogEntry.WithField(\"error\", err).Warningf(\"failed to run rollback action\")\n\t}\n\treturn nil\n}", "func Release() string {\n\treturn New().Version\n}", "func (p *Package) Release() error {\n\tvar body []byte\n\tvar err error\n\tlog.Entry().Infof(\"Release package %s\", p.PackageName)\n\tp.Connector.GetToken(\"/odata/aas_ocs_package\")\n\tappendum := \"/odata/aas_ocs_package/ReleasePackage?Name='\" + url.QueryEscape(p.PackageName) + \"'\"\n\tbody, err = p.Connector.Post(appendum, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar jPck jsonPackage\n\tif err := json.Unmarshal(body, &jPck); err != nil {\n\t\treturn errors.Wrap(err, \"Unexpected AAKaaS response for release package: \"+string(body))\n\t}\n\tp.Status = jPck.Package.Status\n\treturn nil\n}", "func (c *client) CreateReview(org, repo string, number int, r DraftReview) error {\n\tdurationLogger := c.log(\"CreateReview\", org, repo, number, r)\n\tdefer durationLogger()\n\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodPost,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/pulls/%d/reviews\", org, repo, number),\n\t\taccept: \"application/vnd.github.black-cat-preview+json\",\n\t\torg: org,\n\t\trequestBody: r,\n\t\texitCodes: []int{200},\n\t}, nil)\n\treturn err\n}", "func (s NounResource) Create(obj interface{}, r api2go.Request) (api2go.Responder, error) {\n\tnoun, ok := obj.(model.Noun)\n\tif !ok {\n\t\treturn &Response{}, api2go.NewHTTPError(errors.New(\"Invalid instance given\"), \"Invalid instance given\", http.StatusBadRequest)\n\t}\n\n\tid, _ := s.NounStorage.Insert(noun)\n\tnoun.SetID(id)\n\n\treturn &Response{Res: noun, Code: http.StatusCreated}, nil\n}", "func CreateDeploy(w http.ResponseWriter, r *http.Request) {\n\tdeploy := models.Deploy{}\n\terr := json.NewDecoder(r.Body).Decode(&deploy)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Todo validate requirement id\n\n\terr = models.InsertDeploy(deploy)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tw.WriteHeader(200)\n\terr = json.NewEncoder(w).Encode(deploy)\n\tif err != nil {\n\t\tpanic(error(err))\n\t}\n}", "func NewHostingRelease(ctx *pulumi.Context,\n\tname string, args *HostingReleaseArgs, opts ...pulumi.ResourceOption) (*HostingRelease, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.SiteId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'SiteId'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource HostingRelease\n\terr := ctx.RegisterResource(\"gcp:firebase/hostingRelease:HostingRelease\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (g *Gatherer) ReleaseNoteFromCommit(result *Result) (*ReleaseNote, error) {\n\tpr := result.pullRequest\n\n\tprBody := pr.GetBody()\n\ttext, err := noteTextFromString(prBody)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdocumentation := DocumentationFromString(prBody)\n\n\tauthor := pr.GetUser().GetLogin()\n\tauthorURL := pr.GetUser().GetHTMLURL()\n\tprURL := pr.GetHTMLURL()\n\tisFeature := hasString(labelsWithPrefix(pr, \"kind\"), \"feature\")\n\tnoteSuffix := prettifySIGList(labelsWithPrefix(pr, \"sig\"))\n\n\tisDuplicateSIG := false\n\tif len(labelsWithPrefix(pr, \"sig\")) > 1 {\n\t\tisDuplicateSIG = true\n\t}\n\n\tisDuplicateKind := false\n\tif len(labelsWithPrefix(pr, \"kind\")) > 1 {\n\t\tisDuplicateKind = true\n\t}\n\n\t// TODO: Spin this to sep function\n\tindented := strings.ReplaceAll(text, \"\\n\", \"\\n \")\n\tmarkdown := fmt.Sprintf(\"%s (#%d, @%s)\",\n\t\tindented, pr.GetNumber(), author)\n\tif g.options.AddMarkdownLinks {\n\t\tmarkdown = fmt.Sprintf(\"%s ([#%d](%s), [@%s](%s))\",\n\t\t\tindented, pr.GetNumber(), prURL, author, authorURL)\n\t}\n\n\tif noteSuffix != \"\" {\n\t\tmarkdown = fmt.Sprintf(\"%s [%s]\", markdown, noteSuffix)\n\t}\n\n\t// Uppercase the first character of the markdown to make it look uniform\n\tmarkdown = capitalizeString(markdown)\n\n\treturn &ReleaseNote{\n\t\tCommit: result.commit.GetSHA(),\n\t\tText: text,\n\t\tMarkdown: markdown,\n\t\tDocumentation: documentation,\n\t\tAuthor: author,\n\t\tAuthorURL: authorURL,\n\t\tPrURL: prURL,\n\t\tPrNumber: pr.GetNumber(),\n\t\tSIGs: labelsWithPrefix(pr, \"sig\"),\n\t\tKinds: labelsWithPrefix(pr, \"kind\"),\n\t\tAreas: labelsWithPrefix(pr, \"area\"),\n\t\tFeature: isFeature,\n\t\tDuplicate: isDuplicateSIG,\n\t\tDuplicateKind: isDuplicateKind,\n\t\tActionRequired: labelExactMatch(pr, \"release-note-action-required\"),\n\t\tDoNotPublish: labelExactMatch(pr, \"release-note-none\"),\n\t}, nil\n}", "func (pc PyPiClient) Release(ctx context.Context, name, version string) (*PipPackage, *http.Response, error) {\n\tif name == \"\" {\n\t\treturn nil, nil, fmt.Errorf(\"pacakge name is required and can't be empty\")\n\t}\n\n\tvar path string\n\tif version == \"\" {\n\t\tpath = fmt.Sprintf(\"%s/pypi/%s/json\", &pc.baseUrl, name)\n\t} else {\n\t\tpath = fmt.Sprintf(\"%s/pypi/%s/%s/json\", &pc.baseUrl, name, version)\n\t}\n\n\treq, err := http.NewRequestWithContext(ctx, \"GET\", path, nil)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"unable to create a request: %w\", err)\n\t}\n\tresp, err := pc.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"unable to send the request: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\treturn nil, resp, fmt.Errorf(\"Pypi returned with !=200 status code\")\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, resp, fmt.Errorf(\"unable to read the response body: %w\", err)\n\t}\n\n\tpp := PipPackage{}\n\tif err = json.Unmarshal(body, &pp); err != nil {\n\t\treturn nil, resp, fmt.Errorf(\"unable to parse the response body: %w\", err)\n\t}\n\n\treturn &pp, resp, nil\n}", "func (r *ReleaseManifest) BumpForRelease(ctx BosunContext, app *App, fromBranch, toBranch string, bump semver.Bump, expectedVersion semver.Version) (*App, error) {\n\tr.init()\n\tr.MarkDirty()\n\n\tname := app.Name\n\n\tappConfig := app.AppConfig\n\n\tif appConfig.BranchForRelease {\n\t\tlog := ctx.Log.WithField(\"app\", appConfig.Name)\n\t\tif !app.IsRepoCloned() {\n\t\t\treturn nil, errors.New(\"repo is not cloned but must be branched for release; what is going on?\")\n\t\t}\n\n\t\tlocalRepo := app.Repo.LocalRepo\n\t\tif localRepo.IsDirty() {\n\t\t\treturn nil, errors.Errorf(\"repo at %q is dirty, commit or stash your changes before adding it to the release\", localRepo.Path)\n\t\t}\n\n\t\tlog.Infof(\"Ensuring release branch and version correct for app %q...\", name)\n\n\t\tbranchExists, err := localRepo.DoesBranchExist(ctx, toBranch)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif branchExists {\n\t\t\tlog.Info(\"Release branch already exists, switching to it.\")\n\t\t\terr = localRepo.SwitchToBranchAndPull(ctx.Services(), toBranch)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, \"switching to release branch\")\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Info(\"Creating release branch...\")\n\t\t\terr = localRepo.SwitchToNewBranch(ctx, fromBranch, toBranch)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, \"creating release branch\")\n\t\t\t}\n\t\t}\n\n\t\tapp.AddReleaseToHistory(r.Version.String())\n\t\terr = app.Parent.Save()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"saving after adding release to app history\")\n\t\t}\n\n\t\terr = app.Repo.LocalRepo.Commit(\"chore(release): add release to history\", app.Parent.FromPath)\n\t\tif err != nil &&\n\t\t\t!strings.Contains(err.Error(), \"no changes added to commit\") &&\n\t\t\t!strings.Contains(err.Error(), \"nothing to commit\") {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif bump != \"none\" {\n\t\t\tif expectedVersion.LessThan(app.Version) {\n\t\t\t\tlog.Warnf(\"Skipping version bump %q because version on branch is already %s (source branch is version %s).\", bump, app.Version, expectedVersion)\n\t\t\t} else {\n\t\t\t\tlog.Infof(\"Applying version bump %q to source branch version %s.\", bump, app.Version)\n\n\t\t\t\terr = app.BumpVersion(ctx, string(bump))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, errors.Wrap(err, \"bumping version\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\terr = localRepo.Push()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"pushing branch\")\n\t\t}\n\n\t\tlog.Info(\"App has been branched and bumped correctly.\")\n\n\t\tapp, err = ctx.Bosun.ReloadApp(app.Name)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"reload app after switching to new branch\")\n\t\t}\n\t}\n\n\treturn app, nil\n}", "func MockMinimalRelease(t *testing.T) *Release {\n\tvar r Release\n\terr := json.Unmarshal([]byte(`\n {\n \"release_id\": \"rr\",\n \"project_name\": \"project\",\n \"config_name\": \"config\",\n \"ami\": \"ami-123456\",\n \"subnets\": [\"subnet-1\"],\n \"user_data\": \"echo DATE\",\n \"services\": {\n \"web\": {\n \"instance_type\": \"t2.small\",\n \"security_groups\": [\"web-sg\"]\n }\n }\n }\n `), &r)\n\n\tassert.NoError(t, err)\n\tr.CreatedAt = to.Timep(time.Now())\n\n\treturn &r\n}", "func NewReleasesCommand() *cobra.Command {\n\n\tcmd := createListCommand(cmdListReleases, \"releases\", \"\")\n\tcmd.Flags().StringVar(&releaseParams.Name, \"name\", \"\", \"Filter releases by name\")\n\tcmd.Flags().StringVar(&releaseParams.Version, \"version\", \"\", \"Filter releases by version\")\n\tcmd.Flags().StringVar(&releaseParams.StackID, \"stack-id\", \"\", \"Filter releases by stack_id\")\n\tcmd.Flags().StringVar(&releaseParams.StackVersionID, \"stack-version-id\", \"\", \"Filter releases by stack_version_id\")\n\tcmd.Flags().StringVar(&releaseParams.ProjectID, \"project-id\", \"\", \"Filter releases by project_id\")\n\tcmd.Flags().StringVar(&releaseParams.LocImageID, \"loc-image-id\", \"\", \"Filter releases by loc_image_id\")\n\tcmd.Flags().StringVar(&releaseParams.BuildStatus, \"build-status\", \"\", \"Filter releases by build_status\")\n\n\t// Get\n\tgetCmd := createGetCommand(cmdGetReleases, \"release\", \"\")\n\tcmd.AddCommand(getCmd)\n\n\t// Create\n\tcreateCmd := NewReleasesCreateCommand()\n\tcmd.AddCommand(createCmd)\n\n\t// Delete\n\tdeleteCmd := createDeleteCommand(cmdDeleteReleases, \"release\", \"\")\n\tcmd.AddCommand(deleteCmd)\n\n\treturn cmd\n}", "func (a *Agent) UpgradeRelease(\n\tctx context.Context,\n\tconf *UpgradeReleaseConfig,\n\tvalues string,\n\tdoAuth *oauth2.Config,\n\tdisablePullSecretsInjection bool,\n\tignoreDependencies bool,\n) (*release.Release, error) {\n\tctx, span := telemetry.NewSpan(ctx, \"helm-upgrade-release\")\n\tdefer span.End()\n\n\ttelemetry.WithAttributes(span,\n\t\ttelemetry.AttributeKV{Key: \"project-id\", Value: conf.Cluster.ProjectID},\n\t\ttelemetry.AttributeKV{Key: \"cluster-id\", Value: conf.Cluster.ID},\n\t\ttelemetry.AttributeKV{Key: \"name\", Value: conf.Name},\n\t\ttelemetry.AttributeKV{Key: \"stack-name\", Value: conf.StackName},\n\t\ttelemetry.AttributeKV{Key: \"stack-revision\", Value: conf.StackRevision},\n\t)\n\n\tvaluesYaml, err := chartutil.ReadValues([]byte(values))\n\tif err != nil {\n\t\treturn nil, telemetry.Error(ctx, span, err, \"Values could not be parsed\")\n\t}\n\n\tconf.Values = valuesYaml\n\n\treturn a.UpgradeReleaseByValues(ctx, conf, doAuth, disablePullSecretsInjection, ignoreDependencies)\n}", "func (a *Client) UpdateRelease(params *UpdateReleaseParams, authInfo runtime.ClientAuthInfoWriter) (*UpdateReleaseOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewUpdateReleaseParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"Update Release\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/{organization}/{project}/_apis/release/releases/{releaseId}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &UpdateReleaseReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*UpdateReleaseOK), nil\n\n}", "func DeployHelmRelease(releaseName string, chart string, vals map[string]interface{}, cfg *helm.Configuration, client *helm.Upgrade) (*release.Release, error) {\n\t\n\tinfo(\"Deploying release %s of chart %s ...\", releaseName, chart)\n\thelmChartOptions, err := NewHelmChartOptionsFromConfig(chart, vals)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tchartRequested, err := helmChartOptions.LoadChart()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif chartRequested == nil || chartRequested.Metadata == nil {\n\t\treturn nil, fmt.Errorf(\"Failed to load %s chart. Check helm chart options in config\", chart)\n\t}\n\n\tvar release *release.Release\n\n\t// Checking if chart already installed and decide to use install or upgrade helm client\n\thistClient := helm.NewHistory(cfg)\n\thistClient.Max = 1\n\tif _, err := histClient.Run(releaseName); err == driver.ErrReleaseNotFound {\n\t\t// Only print this to stdout for table output\n\t\n\t\tinfo(\"Release %q does not exist. Installing it now.\\n\", releaseName)\n\t\t\n\t\tinstClient := helm.NewInstall(cfg)\n\t\tinstClient.CreateNamespace = false //TODO\n\t\tinstClient.ChartPathOptions = client.ChartPathOptions\n\t\tinstClient.DryRun = client.DryRun\n\t\tinstClient.DisableHooks = client.DisableHooks\n\t\tinstClient.SkipCRDs = client.SkipCRDs\n\t\tinstClient.Timeout = client.Timeout\n\t\tinstClient.Wait = client.Wait\n\t\tinstClient.Devel = client.Devel\n\t\tinstClient.Namespace = client.Namespace\n\t\tinstClient.Atomic = client.Atomic\n\t\tinstClient.PostRenderer = client.PostRenderer\n\t\tinstClient.DisableOpenAPIValidation = client.DisableOpenAPIValidation\n\t\tinstClient.SubNotes = client.SubNotes\n\n\t\tinstClient.ReleaseName = releaseName\n\t\n\t\tif chartRequested.Metadata.Deprecated {\n\t\t\tfmt.Println(\"WARNING: This chart is deprecated\")\n\t\t}\n\t\treturn instClient.Run(chartRequested, vals)\n\t\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\n\tif req := chartRequested.Metadata.Dependencies; req != nil {\n\t\tif err := helm.CheckDependencies(chartRequested, req); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\trelease, err = client.Run(releaseName, chartRequested, vals)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"UPGRADE of %s FAILED\", releaseName)\n\t}\n\tinfo(\"Release %q has been upgraded\\n\", releaseName)\n\n\treturn release, nil\n}", "func CreatePublication(w http.ResponseWriter, r *http.Request) {\n\tuserID, err := authentication.ExtractUserID(r)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusUnauthorized, err)\n\t\treturn\n\t}\n\n\trequest, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\n\tvar publication models.Publication\n\n\tif err = json.Unmarshal(request, &publication); err != nil {\n\t\tresponses.Error(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tpublication.AuthorID = userID\n\n\tif err = publication.Prepare(); err != nil {\n\t\tresponses.Error(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tdb, err := database.Connect()\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\trepository := repositories.NewPublicationRepository(db)\n\tpublication.ID, err = repository.CreatePublication(publication)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tresponses.JSON(w, http.StatusCreated, publication)\n}", "func (w *Workspace) ConvertToRelease() {\n\tw.convert(ptRelease)\n}", "func DeleteRelease(ctx *context.APIContext) {\n\t// swagger:operation DELETE /repos/{owner}/{repo}/releases/{id} repository repoDeleteRelease\n\t// ---\n\t// summary: Delete a release\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: id\n\t// in: path\n\t// description: id of the release to delete\n\t// type: integer\n\t// format: int64\n\t// required: true\n\t// responses:\n\t// \"204\":\n\t// \"$ref\": \"#/responses/empty\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"405\":\n\t// \"$ref\": \"#/responses/empty\"\n\n\tid := ctx.ParamsInt64(\":id\")\n\trel, err := repo_model.GetReleaseByID(ctx, id)\n\tif err != nil && !repo_model.IsErrReleaseNotExist(err) {\n\t\tctx.Error(http.StatusInternalServerError, \"GetReleaseByID\", err)\n\t\treturn\n\t}\n\tif err != nil && repo_model.IsErrReleaseNotExist(err) ||\n\t\trel.IsTag || rel.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\tif err := release_service.DeleteReleaseByID(ctx, id, ctx.Doer, false); err != nil {\n\t\tif models.IsErrProtectedTagName(err) {\n\t\t\tctx.Error(http.StatusMethodNotAllowed, \"delTag\", \"user not allowed to delete protected tag\")\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"DeleteReleaseByID\", err)\n\t\treturn\n\t}\n\tctx.Status(http.StatusNoContent)\n}", "func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"comp\", \"lease\")\n\treqQP.Set(\"restype\", \"container\")\n\tif options != nil && options.Timeout != nil {\n\t\treqQP.Set(\"timeout\", strconv.FormatInt(int64(*options.Timeout), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"x-ms-lease-action\"] = []string{\"release\"}\n\treq.Raw().Header[\"x-ms-lease-id\"] = []string{leaseID}\n\tif modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {\n\t\treq.Raw().Header[\"If-Modified-Since\"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}\n\t}\n\tif modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {\n\t\treq.Raw().Header[\"If-Unmodified-Since\"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}\n\t}\n\treq.Raw().Header[\"x-ms-version\"] = []string{\"2020-10-02\"}\n\tif options != nil && options.RequestID != nil {\n\t\treq.Raw().Header[\"x-ms-client-request-id\"] = []string{*options.RequestID}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/xml\"}\n\treturn req, nil\n}", "func handleRepo(ctx context.Context, client *github.Client, repo *github.Repository) (*release, error) {\n\tif !in(orgs, repo.GetOwner().GetLogin()) {\n\t\t// return early\n\t\treturn nil, nil\n\t}\n\topt := &github.ListOptions{\n\t\tPage: 1,\n\t\tPerPage: 100,\n\t}\n\n\treleases, resp, err := client.Repositories.ListReleases(ctx, repo.GetOwner().GetLogin(), repo.GetName(), opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden || err != nil {\n\t\tif _, ok := err.(*github.RateLimitError); ok {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Skip it because there is no release.\n\t\treturn nil, nil\n\t}\n\tif err != nil || len(releases) < 1 {\n\t\treturn nil, err\n\t}\n\n\trl := release{\n\t\tRepository: repo,\n\t}\n\t// Get information about the binary assets for linux-amd64\n\tarch := \"linux-amd64\"\n\tfor i := 0; i < len(releases); i++ {\n\t\tr := releases[i]\n\t\tif rl.Release == nil && !r.GetDraft() {\n\t\t\t// If this is the latest release and it's not a draft make it the one\n\t\t\t// to return\n\t\t\trl.Release = r\n\t\t\tfor _, asset := range r.Assets {\n\t\t\t\trl.BinaryDownloadCount += asset.GetDownloadCount()\n\t\t\t\tif strings.HasSuffix(asset.GetName(), arch) {\n\t\t\t\t\trl.BinaryURL = asset.GetBrowserDownloadURL()\n\t\t\t\t\trl.BinaryName = asset.GetName()\n\t\t\t\t\trl.BinarySince = units.HumanDuration(time.Since(asset.GetCreatedAt().Time))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif strings.HasSuffix(asset.GetName(), arch+\".sha256\") {\n\t\t\t\t\tc, err := getURLContent(asset.GetBrowserDownloadURL())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\trl.BinarySHA256 = c\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif strings.HasSuffix(asset.GetName(), arch+\".md5\") {\n\t\t\t\t\tc, err := getURLContent(asset.GetBrowserDownloadURL())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\trl.BinaryMD5 = c\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfor _, asset := range r.Assets {\n\t\t\t\trl.BinaryDownloadCount += asset.GetDownloadCount()\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &rl, nil\n}", "func Release(id string, params *stripe.SubscriptionScheduleReleaseParams) (*stripe.SubscriptionSchedule, error) {\n\treturn getC().Release(id, params)\n}", "func PrepareReleaseBundle(awsc aws.AwsClients, release *deployer.Release, zip_file_path *string) error {\n\tif err := PrepareRelease(release, zip_file_path); err != nil {\n\t\treturn err\n\t}\n\n\terr := s3.PutFile(\n\t\tawsc.S3Client(nil, nil, nil),\n\t\tzip_file_path,\n\t\trelease.Bucket,\n\t\trelease.LambdaZipPath(),\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// reset CreateAt because it can take a while to upload the lambda\n\trelease.CreatedAt = to.Timep(time.Now())\n\n\t// Uploading the Release to S3 to match SHAs\n\tif err := s3.PutStruct(awsc.S3Client(nil, nil, nil), release.Bucket, release.ReleasePath(), release); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (project *JiraProject) CreateVersion(version *jira.Version) (*jira.Version, *jira.Response, error) {\n\tversion.ProjectID, _ = strconv.Atoi(project.ID)\n\treturn project.client.Version.Create(version)\n}", "func NewOSRelease() (OSRelease, error) {\n\tf, err := os.Open(osReleasePath())\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Unable to collect kernel version from %s - error: %s\", osReleasePath(), err)\n\t\treturn \"\", err\n\t}\n\tdefer f.Close()\n\n\treturn readOSRelease(f)\n}", "func InstallRelease(result string, bin string, helmVersionPath string) error {\n\n\t// Check OS\n\tuname := &BashCmd{\n\t\tCmd: \"uname\",\n\t\tArgs: []string{\"-s\"},\n\t}\n\tout, err := ExecBashCmd(uname)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO: Fix removing the \\n on the bashCmd function\n\t// Perform output clean up\n\tunameToSlice := []string{}\n\tunameToSlice = strings.Split(out, \"\\n\")\n\tosType := strings.ToLower(fmt.Sprintf(\"%s-amd64\", unameToSlice[0]))\n\n\t// Download file\n\tdestinationPath := fmt.Sprintf(\"%s/helm-%s\", helmVersionPath, result)\n\terr = DownloadRelease(result, osType)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Untar helm version\n\tbin = fmt.Sprintf(\"helm-%s\", result)\n\tv := fmt.Sprintf(\"%s/helm\", osType)\n\ttar := &BashCmd{\n\t\tCmd: \"tar\",\n\t\tArgs: []string{\"zxvf\", bin, v, \"--strip-components=1\"},\n\t\tExecPath: tmpPath,\n\t}\n\t_, err = ExecBashCmd(tar)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Rename helm release to specific version\n\tmv := &BashCmd{\n\t\tCmd: \"mv\",\n\t\tArgs: []string{\"helm\", destinationPath},\n\t\tExecPath: tmpPath,\n\t}\n\t_, err = ExecBashCmd(mv)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (api *APIClient) BuildReleaseFromS3(bucketKey string, isDirectory bool) (*PreviewResponse, error) {\n\tpayload := map[string]string{\n\t\t\"s3_key\": bucketKey,\n\t}\n\n\tpayloadBytes, err := json.Marshal(payload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar endpoint string\n\tif isDirectory {\n\t\tendpoint = \"/api/v1/releases\"\n\t} else {\n\t\tendpoint = \"/api/v1/content_files\"\n\t}\n\n\treq, err := http.NewRequest(\n\t\t\"POST\",\n\t\tfmt.Sprintf(\"%s%s\", api.baseURL, endpoint),\n\t\tbytes.NewBuffer(payloadBytes),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer req.Body.Close()\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Source\", \"gLearn_cli\")\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", api.Credentials.token))\n\n\tres, err := api.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer res.Body.Close()\n\n\tp := &PreviewResponse{}\n\tjson.NewDecoder(res.Body).Decode(p)\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"Error: %s, response status: %d\", p.Errors, res.StatusCode)\n\t}\n\n\treturn p, nil\n}", "func (s *ReleaseService) GetRelease(id uint) (*Release, error) {\n\tvar (\n\t\tmethod = http.MethodGet\n\t\tpath = fmt.Sprintf(\"/releases/%d\", id)\n\t)\n\treq := s.client.newRequest(path, method)\n\treturn s.getRelease(req)\n}", "func CreateLink(w http.ResponseWriter, r *http.Request) {\n\n\treqBody, _ := ioutil.ReadAll(r.Body)\n\tvar obj Link\n\tjson.Unmarshal(reqBody, &obj)\n\tobj.Hash = hashLink(obj.URL)\n\tres := globals.Database.FirstOrCreate(&obj, obj)\n\tif res.Error != nil {\n\t\tfmt.Println(res.Error)\n\t\tpanic(\"Creation did not work.\")\n\t}\n\tjson.NewEncoder(w).Encode(obj)\n\tif !globals.Prod {\n\t\tfmt.Println(\"request: creation link\")\n\t}\n\treturn\n}", "func (c Provider) Releases(params []string) (rs *results.ResultSet, err error) {\n\t// Query the API\n\tid := strings.Join(strings.Split(params[1], \"/\"), \"%2f\")\n\turl := fmt.Sprintf(TagsEndpoint, params[0], id)\n\tvar tags Tags\n\tif err = util.FetchJSON(url, \"releases\", &tags); err != nil {\n\t\treturn\n\t}\n\trs = tags.Convert(params[0], params[1])\n\treturn\n}", "func CreateUmbrellaRelease() *helm.HelmRelease {\n\treturn helm.Chart(\"onos-umbrella\", onostest.OnosChartRepo).\n\t\tRelease(\"onos-umbrella\").\n\t\tSet(\"import.onos-gui.enabled\", false).\n\t\tSet(\"onos-topo.image.tag\", \"latest\").\n\t\tSet(\"onos-config.image.tag\", \"latest\").\n\t\tSet(\"onos-config-model.image.tag\", \"latest\")\n}", "func (c *RepoAPI) Create(body *api.BodyCreateRepo) (*api.ResultCreateRepo, error) {\n\n\tif body.SigningKey == nil {\n\t\treturn nil, errors.ReqErr(400, ErrCodeBadParam, \"signingKey\", \"signing key is required\")\n\t}\n\n\t// Create a TxRepoCreate object and fill it with args\n\ttx := txns.NewBareTxRepoCreate()\n\ttx.Name = body.Name\n\ttx.Description = body.Description\n\ttx.Nonce = body.Nonce\n\ttx.Value = util.String(cast.ToString(body.Value))\n\ttx.Fee = util.String(cast.ToString(body.Fee))\n\ttx.Timestamp = time.Now().Unix()\n\ttx.SenderPubKey = body.SigningKey.PubKey().ToPublicKey()\n\tif err := tx.Config.Merge(body.Config); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Sign the tx\n\tvar err error\n\ttx.Sig, err = tx.Sign(body.SigningKey.PrivKey().Base58())\n\tif err != nil {\n\t\treturn nil, errors.ReqErr(400, ErrCodeClient, \"privkey\", err.Error())\n\t}\n\n\tresp, statusCode, err := c.c.call(\"repo_create\", tx.ToMap())\n\tif err != nil {\n\t\treturn nil, makeReqErrFromCallErr(statusCode, err)\n\t}\n\n\tvar r api.ResultCreateRepo\n\tif err = util.DecodeMap(resp, &r); err != nil {\n\t\treturn nil, errors.ReqErr(500, ErrCodeDecodeFailed, \"\", err.Error())\n\t}\n\n\treturn &r, nil\n}", "func compareRelease(owner, repo, tagName string) (*github.CommitsComparison, error) {\n\tclient, ctx := defaultGithubClient(), context.Background()\n\tdefer timeTrack(time.Now(), \"API call to client.Repositories.CompareCommits()\")\n\tcc, _, err := client.Repositories.CompareCommits(ctx, owner, repo, tagName, \"HEAD\")\n\tif cc != nil {\n\t\treverseCommitOrder(cc)\n\t}\n\treturn cc, err\n}", "func NewRollbackReleaseV1Action(\n\tmodel store.HelmManagerModel, platform repo.Platform, releaseHandler release.Handler) *RollbackReleaseV1Action {\n\treturn &RollbackReleaseV1Action{\n\t\tmodel: model,\n\t\tplatform: platform,\n\t\treleaseHandler: releaseHandler,\n\t}\n}", "func (pr *PullRequest) CreateOrUpdate() error {\n\t// check the optional settings now, before actually creating the PR (which we'll have to update)\n\n\tlabels := pr.GetSetting(\"github_labels\")\n\tassignees := pr.GetSetting(\"github_assignees\")\n\tmilestone := pr.GetSetting(\"github_milestone\")\n\n\tfmt.Printf(\"Preparing to open GitHub pull request for %v\\n\", pr.RepoFullName)\n\n\t// TODO if pr exists then update original comment (if diff from original)\n\t// check return code on status?\n\tdata, err := pr.createPR()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif data == nil {\n\t\tdata, err = pr.getExisting()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Store these for future API calls\n\tpr.nodeID = data[\"node_id\"].(string)\n\tpr.apiURL = data[\"url\"].(string)\n\n\t// pr has been created at this point, now have to add meta fields in\n\t// another request\n\tissueURL, _ := data[\"issue_url\"].(string)\n\thtmlURL, _ := data[\"html_url\"].(string)\n\tissueTitle, _ := data[\"title\"].(string)\n\tissueBody, _ := data[\"body\"].(string)\n\n\tif labels != nil || assignees != nil || milestone != nil || pr.Title != issueTitle || pr.Body != issueBody {\n\t\tissueMap := make(map[string]interface{})\n\n\t\t// Make sure these are correct and up-to-date\n\t\tissueMap[\"title\"] = pr.Title\n\t\tissueMap[\"body\"] = pr.Body\n\n\t\tif labels != nil {\n\t\t\tissueMap[\"labels\"] = labels\n\t\t}\n\t\tif assignees != nil {\n\t\t\tissueMap[\"assignees\"] = assignees\n\t\t}\n\t\tif milestone != nil {\n\t\t\tissueMap[\"milestone\"] = milestone\n\t\t}\n\n\t\tfmt.Printf(\"%+v\\n\", issueMap)\n\t\tissueData, _ := json.Marshal(issueMap)\n\n\t\tresp, _, err := pr.request(\"PATCH\", issueURL, issueData)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif resp.StatusCode != 200 {\n\t\t\treturn fmt.Errorf(\"failed to update pull request: %+v\", resp)\n\t\t}\n\n\t\toutput.Event(\"Successfully updated PR fields on %v\\n\", htmlURL)\n\t}\n\n\tif automerge := pr.GetSetting(\"github_automerge\"); automerge != nil {\n\n\t\tcurrentAutomerge := data[\"auto_merge\"]\n\n\t\tautomergeMethod, ok := automerge.(string)\n\t\tif !ok {\n\t\t\tautomergeBool, ok := automerge.(bool)\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"github_automerge must be a string (\\\"merge\\\", \\\"rebase\\\", or \\\"squash\\\") or a boolean\")\n\t\t\t}\n\t\t\tif automergeBool {\n\t\t\t\tautomergeMethod = \"squash\"\n\t\t\t} else {\n\t\t\t\tautomergeMethod = \"none\"\n\t\t\t}\n\t\t}\n\n\t\tif currentAutomerge == nil && automergeMethod != \"none\" {\n\t\t\toutput.Event(\"Enabling \\\"%s\\\" automerge on %v\\n\", automergeMethod, htmlURL)\n\t\t\tif err := pr.enableAutomerge(automergeMethod); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if currentAutomerge != nil && automergeMethod == \"none\" {\n\t\t\toutput.Event(\"Disabling automerge on %v\\n\", htmlURL)\n\t\t\tif err := pr.disableAutomerge(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\toutput.Event(\"No automerge change to make on %v\\nRequested: %s\\nExisting: %+v\", automergeMethod, currentAutomerge, htmlURL)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *ReleaseService) AddImageRelease(r *Release, image io.Reader, imageName, authToken string) (*Release, error) {\n\tvar (\n\t\tmethod = http.MethodPost\n\t\tpath = fmt.Sprintf(\"/releases\")\n\t)\n\treq := s.client.newRequest(path, method)\n\taddJWTToRequest(req, authToken)\n\tr.Type = Image\n\terr := addJSONAndImageToRequestAsMultipart(req, r, image, imageName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.addRelease(req)\n}", "func CutRelease(release string, rc string, isFirstMinorRelease bool, backportRelease bool,\n\tisDryRun bool, legacy bool, server string, webapp string) *AppError {\n\tvar jobName string\n\tif legacy {\n\t\tjobName = Cfg.ReleaseJobLegacy\n\t} else {\n\t\tjobName = Cfg.ReleaseJob\n\t}\n\n\tisRunning, err := IsCutReleaseRunning(jobName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif isRunning {\n\t\treturn NewError(\"There is a release job running.\", nil)\n\t}\n\n\tshortRelease := release[:len(release)-2]\n\treleaseBranch := \"release-\" + shortRelease\n\n\tvar fullRelease string\n\tvar rcpart string\n\tif rc == \"\" {\n\t\tfullRelease = release\n\t\trcpart = \"\"\n\t} else {\n\t\tfullRelease = release + \"-\" + rc\n\t\trcpart = \"-\" + rc\n\t}\n\n\tisFirstMinorReleaseStr := \"false\"\n\tif isFirstMinorRelease {\n\t\tisFirstMinorReleaseStr = \"true\"\n\t}\n\n\tisDryRunStr := \"false\"\n\tif isDryRun {\n\t\tisDryRunStr = \"true\"\n\t}\n\n\tisDotReleaseStr := \"false\"\n\tif backportRelease {\n\t\tisDotReleaseStr = \"true\"\n\t}\n\n\tparameters := map[string]string{\n\t\t\"MM_VERSION\": release,\n\t\t\"MM_RC\": rcpart,\n\t\t\"IS_FIRST_MINOR_RELEASE\": isFirstMinorReleaseStr,\n\t\t\"IS_DRY_RUN\": isDryRunStr,\n\t\t\"IS_DOT_RELEASE\": isDotReleaseStr,\n\t\t\"IS_BACKPORT\": isDotReleaseStr,\n\t\t\"PIP_BRANCH\": releaseBranch,\n\t}\n\n\tif server != \"\" {\n\t\tparameters[\"MM_BUILDER_SERVER_DOCKER\"] = server\n\t}\n\n\tif webapp != \"\" {\n\t\tparameters[\"MM_BUILDER_WEBAPP_DOCKER\"] = webapp\n\t}\n\n\t// We want to return so the user knows the build has started.\n\t// Build jobs should report their own failure.\n\tgo func() {\n\t\tresult, err := RunJobWaitForResult(\n\t\t\tjobName,\n\t\t\tparameters)\n\t\tif err != nil || result != gojenkins.STATUS_SUCCESS {\n\t\t\tLogError(\"Release Job failed. Version=\" + fullRelease + \" err= \" + err.Error() + \" Jenkins result= \" + result)\n\t\t\treturn\n\t\t}\n\n\t\t// If Release was success trigger the Rctesting job to update\n\t\tLogInfo(\"Release Job Status: \" + result)\n\t\tif !backportRelease {\n\t\t\tLogInfo(\"Will trigger Job: \" + Cfg.RCTestingJob)\n\t\t\tRunJobParameters(Cfg.RCTestingJob, map[string]string{\"LONG_RELEASE\": fullRelease}, Cfg.CIServerJenkinsUserName, Cfg.CIServerJenkinsToken, Cfg.CIServerJenkinsURL)\n\n\t\t\t// Only update the CI servers and community if this is the latest release\n\t\t\tLogInfo(\"Setting CI Servers\")\n\t\t\tSetCIServerBranch(releaseBranch)\n\t\t}\n\t}()\n\n\treturn nil\n}", "func (s *deployerService) deploy(ctx context.Context, db *gorm.DB, opts DeploymentsCreateOpts) (*Release, error) {\n\tapp, img := opts.App, opts.Image\n\n\t// If no app is specified, attempt to find the app that relates to this\n\t// images repository, or create it if not found.\n\tif app == nil {\n\t\tvar err error\n\t\tapp, err = appsFindOrCreateByRepo(db, img.Repository)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\t// If the app doesn't already have a repo attached to it, we'll attach\n\t\t// this image's repo.\n\t\tif err := appsEnsureRepo(db, app, img.Repository); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Grab the latest config.\n\tconfig, err := s.configs.Config(db, app)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create a new slug for the docker image.\n\tslug, err := s.slugs.Create(ctx, db, img, opts.Output)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create a new release for the Config\n\t// and Slug.\n\tdesc := fmt.Sprintf(\"Deploy %s\", img.String())\n\n\tr, err := s.releases.Create(ctx, db, &Release{\n\t\tApp: app,\n\t\tConfig: config,\n\t\tSlug: slug,\n\t\tDescription: desc,\n\t})\n\n\treturn r, err\n}", "func (gh *GitHubChecker) Create(token string, ref string) {\n\tgh.token = token\n\tgh.ref = ref\n\n\tgh.events = []string{\n\t\tGitHubPingEvent,\n\t\tGitHubPushEvent,\n\t}\n}" ]
[ "0.82238644", "0.8206941", "0.80329555", "0.7918589", "0.7841578", "0.7819708", "0.76169693", "0.7502176", "0.719978", "0.71835524", "0.68819135", "0.68103856", "0.67815125", "0.66911125", "0.65367913", "0.6471042", "0.64619577", "0.63036394", "0.62942827", "0.6293456", "0.62317175", "0.6213858", "0.6196416", "0.6064337", "0.5902568", "0.5810096", "0.5801908", "0.5762606", "0.5748551", "0.57278264", "0.5721183", "0.5673619", "0.56479937", "0.55521023", "0.55344516", "0.5529957", "0.55195665", "0.5503114", "0.54503053", "0.54095095", "0.540399", "0.54008687", "0.53778297", "0.5377721", "0.53693086", "0.5369249", "0.53665596", "0.5366392", "0.53642684", "0.53608435", "0.53538024", "0.5329311", "0.53277284", "0.52818877", "0.527693", "0.52707225", "0.52326447", "0.5193396", "0.518821", "0.5183494", "0.51394254", "0.51363367", "0.5124287", "0.5117283", "0.51145494", "0.5113666", "0.5094726", "0.50926", "0.5092467", "0.5072179", "0.5063817", "0.5057391", "0.50395924", "0.5028265", "0.50223905", "0.5009282", "0.5008603", "0.4989796", "0.49851945", "0.49839452", "0.4982926", "0.4980873", "0.49721146", "0.49687493", "0.4967096", "0.4957474", "0.49524426", "0.49521348", "0.4931299", "0.49307075", "0.49306402", "0.4926031", "0.49151117", "0.49140972", "0.49066266", "0.49036968", "0.49036607", "0.49006262", "0.48996294", "0.48964652" ]
0.7646442
6
GetRelease queries the GitHub API for a specified release object
func (c *Client) GetRelease(ctx context.Context, tag string) (*github.RepositoryRelease, error) { // Check Release whether already exists or not release, res, err := c.Repositories.GetReleaseByTag(context.TODO(), c.Owner, c.Repo, tag) if err != nil { if res == nil { return nil, errors.Wrapf(err, "failed to get release tag: %s", tag) } // TODO(tcnksm): Handle invalid token if res.StatusCode != http.StatusNotFound { return nil, errors.Wrapf(err, "get release tag: invalid status: %s", res.Status) } return nil, nil } return release, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetRelease(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/releases/{id} repository repoGetRelease\n\t// ---\n\t// summary: Get a release\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: id\n\t// in: path\n\t// description: id of the release to get\n\t// type: integer\n\t// format: int64\n\t// required: true\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/Release\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\n\tid := ctx.ParamsInt64(\":id\")\n\trelease, err := repo_model.GetReleaseByID(ctx, id)\n\tif err != nil && !repo_model.IsErrReleaseNotExist(err) {\n\t\tctx.Error(http.StatusInternalServerError, \"GetReleaseByID\", err)\n\t\treturn\n\t}\n\tif err != nil && repo_model.IsErrReleaseNotExist(err) ||\n\t\trelease.IsTag || release.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tif err := release.LoadAttributes(ctx); err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"LoadAttributes\", err)\n\t\treturn\n\t}\n\tctx.JSON(http.StatusOK, convert.ToAPIRelease(ctx, ctx.Repo.Repository, release))\n}", "func (s *ReleaseService) GetRelease(id uint) (*Release, error) {\n\tvar (\n\t\tmethod = http.MethodGet\n\t\tpath = fmt.Sprintf(\"/releases/%d\", id)\n\t)\n\treq := s.client.newRequest(path, method)\n\treturn s.getRelease(req)\n}", "func GetRelease(version string, owner string, repo string, provider gits.GitProvider) (*gits.GitRelease, error) {\n\trelease, err := provider.GetRelease(owner, repo, version)\n\tif err != nil {\n\t\t// normally tags are v<version> so try that\n\t\ttag := fmt.Sprintf(\"v%s\", version)\n\t\trelease, err = provider.GetRelease(owner, repo, tag)\n\t\tif err != nil {\n\t\t\tif ReleaseNotFoundError(err) {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t\treturn nil, errors.Wrapf(err, \"getting release for %s (tried %s and %s)\", version, version, tag)\n\n\t\t}\n\t}\n\treturn release, nil\n}", "func (c *releaseClient) Get(name string) (*Release, error) {\n\tlist, err := c.config.Releases.List(func(r *release.Release) bool {\n\t\treturn r.Namespace == c.config.Namespace() && r.Name == name\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t} else if len(list) == 0 {\n\t\treturn nil, errors.New(\"release not found\")\n\t} else if len(list) > 1 {\n\t\treturn nil, errors.New(\"release is ambiguous\")\n\t}\n\treturn getRelease(c.config, list[0])\n}", "func GetRelease(cmd *cobra.Command, args []string) {\n\treq := &helmmanager.ListReleaseReq{}\n\n\tif !flagAll {\n\t\treq.Size = common.GetUint32P(uint32(flagNum))\n\t}\n\tif len(args) > 0 {\n\t\treq.Size = common.GetUint32P(1)\n\t\treq.Name = common.GetStringP(args[0])\n\t}\n\treq.ClusterID = &flagCluster\n\treq.Namespace = &flagNamespace\n\n\tc := newClientWithConfiguration()\n\tr, err := c.Release().List(cmd.Context(), req)\n\tif err != nil {\n\t\tfmt.Printf(\"get release failed, %s\\n\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tif flagOutput == outputTypeJson {\n\t\tprinter.PrintReleaseInJson(r)\n\t\treturn\n\t}\n\n\tprinter.PrintReleaseInTable(flagOutput == outputTypeWide, r)\n}", "func (a *Agent) GetRelease(\n\tctx context.Context,\n\tname string,\n\tversion int,\n\tgetDeps bool,\n) (*release.Release, error) {\n\tctx, span := telemetry.NewSpan(ctx, \"helm-get-release\")\n\tdefer span.End()\n\n\ttelemetry.WithAttributes(span,\n\t\ttelemetry.AttributeKV{Key: \"name\", Value: name},\n\t\ttelemetry.AttributeKV{Key: \"version\", Value: version},\n\t\ttelemetry.AttributeKV{Key: \"getDeps\", Value: getDeps},\n\t)\n\n\t// Namespace is already known by the RESTClientGetter.\n\tcmd := action.NewGet(a.ActionConfig)\n\n\tcmd.Version = version\n\n\trelease, err := cmd.Run(name)\n\tif err != nil {\n\t\treturn nil, telemetry.Error(ctx, span, err, \"error running get release\")\n\t}\n\n\tif getDeps && release.Chart != nil && release.Chart.Metadata != nil {\n\t\tfor _, dep := range release.Chart.Metadata.Dependencies {\n\t\t\t// only search for dependency if it passes the condition specified in Chart.yaml\n\t\t\tif dep.Enabled {\n\t\t\t\tdepExists := false\n\n\t\t\t\tfor _, currDep := range release.Chart.Dependencies() {\n\t\t\t\t\t// we just case on name for now -- there might be edge cases we're missing\n\t\t\t\t\t// but this will cover 99% of cases\n\t\t\t\t\tif dep != nil && currDep != nil && dep.Name == currDep.Name() {\n\t\t\t\t\t\tdepExists = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif !depExists {\n\t\t\t\t\tdepChart, err := loader.LoadChartPublic(ctx, dep.Repository, dep.Name, dep.Version)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, telemetry.Error(ctx, span, err, fmt.Sprintf(\"Error retrieving chart dependency %s/%s-%s\", dep.Repository, dep.Name, dep.Version))\n\t\t\t\t\t}\n\n\t\t\t\t\trelease.Chart.AddDependency(depChart)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn release, err\n}", "func (c *Client) Get(name string) (*Release, error) {\n\treleases, err := c.List(ListParameters{Filter: name})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tfor _, release := range releases {\n\t\tif release.Name == name {\n\t\t\treturn &release, nil\n\t\t}\n\t}\n\treturn nil, trace.NotFound(\"release %v not found\", name)\n}", "func GetRelease(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *ReleaseState, opts ...pulumi.ResourceOption) (*Release, error) {\n\tvar resource Release\n\terr := ctx.ReadResource(\"google-native:firebaserules/v1:Release\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func GetRelease(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *ReleaseState, opts ...pulumi.ResourceOption) (*Release, error) {\n\tvar resource Release\n\terr := ctx.ReadResource(\"google-native:firebasehosting/v1beta1:Release\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (operator *AccessOperator) GetRelease(cxt context.Context, ID string) (*common.Release, error) {\n\t//do not implemented\n\tbusiness, err := operator.GetBusiness(cxt, operator.Business)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif business == nil {\n\t\tlogger.V(3).Infof(\"GetRelease: found no relative Business %s\", operator.Business)\n\t\treturn nil, fmt.Errorf(\"No relative Business %s\", operator.Business)\n\t}\n\n\treturn operator.innerGetReleaseByID(cxt, business.Bid, ID)\n}", "func (r *ReleaseEvent) GetRelease() *RepositoryRelease {\n\tif r == nil {\n\t\treturn nil\n\t}\n\treturn r.Release\n}", "func (gc *githubClient) GetLatestVersionFromRelease(ctx context.Context, owner string, repo string) (string, error) {\n\tvar err error\n\tversion := \"\"\n\n\terr = retryWhenRateLimited(func() error {\n\t\tversion, err = gc.getLatestReleaseVersion(ctx, owner, repo)\n\t\treturn err\n\t})\n\n\treturn version, err\n}", "func (hc *Actions) Get(name string) (*release.Release, error) {\n\tactGet := action.NewGet(hc.Config)\n\treturn actGet.Run(name)\n}", "func (a *Client) GetReleaseRevision(params *GetReleaseRevisionParams, authInfo runtime.ClientAuthInfoWriter) (*GetReleaseRevisionOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetReleaseRevisionParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"Get Release Revision\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/{organization}/{project}/_apis/release/releases/{releaseId}\",\n\t\tProducesMediaTypes: []string{\"text/plain\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetReleaseRevisionReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetReleaseRevisionOK), nil\n\n}", "func (api *RestAPI) GetRelease(epicID string) ([]ReleaseItem, error) {\n\tresults := []ReleaseItem{}\n\tissue, err := api.getIssue(epicID)\n\tif err != nil {\n\t\treturn results, err\n\t}\n\n\tscanner := bufio.NewScanner(strings.NewReader(issue.Fields.Description.(string)))\n\tfor scanner.Scan() {\n\t\tline := strings.ToLower(scanner.Text())\n\t\tif strings.Contains(line, \"/app#/projects\") {\n\t\t\tparts := strings.Split(line, \"/\")\n\t\t\tresults = append(results, ReleaseItem{Project: parts[5], Version: parts[7]})\n\t\t}\n\t}\n\treturn results, nil\n}", "func (s *Services) Release(ctx context.Context, request *proto.ReleaseRequest) (*proto.ReleaseResponse, error) {\n\tvar result models.Release\n\tquery := s.DB\n\n\tif request.Id != 0 {\n\t\tquery = query.Where(\"id = ?\", request.Id)\n\t}\n\n\tif err := query.First(&result).Error; err != nil {\n\n\t\t// If nothing was found\n\t\tif gorm.IsRecordNotFoundError(err) {\n\t\t\treturn &proto.ReleaseResponse{Release: nil}, nil\n\t\t}\n\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\n\treturn &proto.ReleaseResponse{Release: result.ToProto()}, nil\n}", "func GetGameRelease(apiClient *api.Client, releaseVersion string) (*semver.Version, error) {\n\tif releaseVersion == \"\" {\n\t\tui.Prompt(\"Enter a release version (1.2.3): \")\n\t\tvar err error\n\t\treleaseVersion, err = inReader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treleaseVersion = strings.TrimSpace(releaseVersion)\n\t}\n\n\tsemver, err := semver.Make(releaseVersion)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Invalid semver. Check out https://semver.org\")\n\t}\n\treturn &semver, nil\n}", "func GetLatestRelease(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/releases/latest repository repoGetLatestRelease\n\t// ---\n\t// summary: Gets the most recent non-prerelease, non-draft release of a repository, sorted by created_at\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/Release\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\trelease, err := repo_model.GetLatestReleaseByRepoID(ctx.Repo.Repository.ID)\n\tif err != nil && !repo_model.IsErrReleaseNotExist(err) {\n\t\tctx.Error(http.StatusInternalServerError, \"GetLatestRelease\", err)\n\t\treturn\n\t}\n\tif err != nil && repo_model.IsErrReleaseNotExist(err) ||\n\t\trelease.IsTag || release.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tif err := release.LoadAttributes(ctx); err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"LoadAttributes\", err)\n\t\treturn\n\t}\n\tctx.JSON(http.StatusOK, convert.ToAPIRelease(ctx, ctx.Repo.Repository, release))\n}", "func (c *MBClient) GetReleaseInfo(id string) (*ReleaseMetadata, error) {\n\tendpoint := ReleaseEntity + \"/\" + id\n\tq := c.CreateQuery()\n\tq.Set(\"inc\", \"recordings\")\n\treq, err := c.NewRequest(endpoint, q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmetadata := &ReleaseMetadata{}\n\t_, err = c.Do(req, metadata)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn metadata, nil\n}", "func GetLatestRelease() (Release, error) {\n\tvar release Release\n\n\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\tdefer cancel()\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, latestSource, nil)\n\tif err != nil {\n\t\treturn release, fmt.Errorf(\"error creating request: %w\", err)\n\t}\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn release, fmt.Errorf(\"error performing request: %w\", err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif err := json.NewDecoder(resp.Body).Decode(&release); err != nil {\n\t\treturn release, fmt.Errorf(\"error decoding response: %w\", err)\n\t}\n\n\treturn release, nil\n}", "func (as *apiServer) DownloadRelease(w http.ResponseWriter, r *http.Request) {\n\treqLogger := as.logger.New(\"method\", r.Method, \"url\", r.RequestURI)\n\treqLogger.Info(\"fetching release URL\")\n\n\tvars := mux.Vars(r)\n\tctx, cancel := context.WithTimeout(r.Context(), requestTimeout)\n\tdefer cancel()\n\n\turl, err := as.githubClient.FetchReleaseURL(ctx, vars[\"owner\"], vars[\"repo\"], vars[\"tag\"], vars[\"assetName\"])\n\tif ctx.Err() != nil {\n\t\treqLogger.Error(\"error retrieving release URL\", \"err\", err, \"ctx error\", ctx.Err())\n\t\twriteHTTPError(w, reqLogger, http.StatusBadGateway, \"Bad Gateway\")\n\t\treturn\n\t}\n\tif err != nil {\n\t\tswitch t := err.(type) {\n\t\tcase GitHubError:\n\t\t\tif t.Type == TypeNotFound {\n\t\t\t\treqLogger.Info(\"data not found\", \"err\", t.WrappedError, \"vars\", vars)\n\t\t\t\twriteHTTPError(w, reqLogger, http.StatusNotFound, t.WrappedError.Error())\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\treqLogger.Error(\"unhandled github error\", \"err\", t.WrappedError, \"vars\", vars)\n\t\t\t\twriteHTTPError(w, reqLogger, http.StatusInternalServerError, \"Internal Server Error\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\treqLogger.Error(\"error retrieving release URL\", \"err\", err, \"vars\", vars)\n\t\twriteHTTPError(w, reqLogger, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\n\treqLogger.Info(\"found release URL\", \"url\", url)\n\n\tw.Header().Set(\"Location\", url)\n\tw.WriteHeader(http.StatusMovedPermanently)\n}", "func getLatestRelease(owner, repo string) (*github.RepositoryRelease, error) {\n\tclient, ctx := defaultGithubClient(), context.Background()\n\tdefer timeTrack(time.Now(), \"API call to client.Repositories.GetLatestRelease()\")\n\trelease, _, err := client.Repositories.GetLatestRelease(ctx, owner, repo)\n\treturn release, err\n}", "func cmdGetReleases(ccmd *cobra.Command, args []string) {\n\taplSvc := apl.NewClient()\n\toutput := runGetCommand(args, aplSvc.Releases.Get)\n\tif output != nil {\n\t\tfields := []string{\"ID\", \"StackID\", \"Version\", \"CreatedTime\"}\n\t\tprintTableResultsCustom(output.(apl.Release), fields)\n\t}\n}", "func NewCmdGetRelease(commonOpts *opts.CommonOptions) *cobra.Command {\n\toptions := &GetReleaseOptions{\n\t\tGetOptions: GetOptions{\n\t\t\tCommonOptions: commonOpts,\n\t\t},\n\t}\n\tcmd := &cobra.Command{\n\t\tUse: \"releases\",\n\t\tShort: \"Display the Release or Releases the current user is a member of\",\n\t\tAliases: []string{\"release\"},\n\t\tLong: getReleaseLong,\n\t\tExample: getReleaseExample,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\toptions.Cmd = cmd\n\t\t\toptions.Args = args\n\t\t\terr := options.Run()\n\t\t\thelper.CheckErr(err)\n\t\t},\n\t}\n\tcmd.Flags().StringVarP(&options.Filter, \"filter\", \"f\", \"\", \"Filter the releases with the given text\")\n\tcmd.Flags().StringVarP(&options.Namespace, \"namespace\", \"n\", \"\", \"The namespace to view or defaults to the current namespace\")\n\n\toptions.AddGetFlags(cmd)\n\treturn cmd\n}", "func (r *Releaser) Find(tagName string) (*Release, error) {\n\trelease, _, err := r.client.Repositories.GetReleaseByTag(context.Background(), r.owner, r.repository, tagName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Printf(\"found release ID: %v, tag: %v\", *release.ID, *release.TagName)\n\treturn &Release{ID: *release.ID, TagName: *release.TagName, Releaser: r}, nil\n}", "func (repo BoshDirectorRepository) GetReleases() (releases models.Releases, apiResponse net.ApiResponse) {\n\tresponse := []releaseResponse{}\n\n\tpath := \"/releases\"\n\tapiResponse = repo.gateway.GetResource(repo.config.TargetURL+path, repo.config.Username, repo.config.Password, &response)\n\tif apiResponse.IsNotSuccessful() {\n\t\treturn\n\t}\n\n\tlist := []*models.Release{}\n\tfor _, resource := range response {\n\t\tlist = append(list, resource.ToModel())\n\t}\n\treleases = models.Releases(list)\n\n\treturn\n}", "func (c *GitHub) GetReleaseByTagOrNil(ctx context.Context, repo git.RepositoryID, tag git.TagName) (*git.Release, error) {\n\tc.Logger.Debugf(\"Getting the release associated to the tag %v on the repository %+v\", tag, repo)\n\trelease, resp, err := c.Client.GetReleaseByTag(ctx, repo.Owner, repo.Name, tag.Name())\n\tif resp != nil && resp.StatusCode == http.StatusNotFound {\n\t\tc.Logger.Debugf(\"GitHub API returned 404: %s\", err)\n\t\treturn nil, nil\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"GitHub API error: %w\", err)\n\t}\n\treturn &git.Release{\n\t\tID: git.ReleaseID{\n\t\t\tRepository: repo,\n\t\t\tInternalID: release.GetID(),\n\t\t},\n\t\tTagName: git.TagName(release.GetTagName()),\n\t\tName: release.GetName(),\n\t}, nil\n}", "func (c *gitlabClient) CreateRelease(ctx *context.Context, body string) (releaseID string, err error) {\n\ttitle, err := tmpl.New(ctx).Apply(ctx.Config.Release.NameTemplate)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tprojectID := ctx.Config.Release.GitLab.Owner + \"/\" + ctx.Config.Release.GitLab.Name\n\tlog.WithFields(log.Fields{\n\t\t\"owner\": ctx.Config.Release.GitLab.Owner,\n\t\t\"name\": ctx.Config.Release.GitLab.Name,\n\t}).Debug(\"projectID\")\n\n\tname := title\n\ttagName := ctx.Git.CurrentTag\n\trelease, resp, err := c.client.Releases.GetRelease(projectID, tagName)\n\tif err != nil && resp.StatusCode != 403 {\n\t\treturn \"\", err\n\t}\n\n\tif resp.StatusCode == 403 {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"err\": err.Error(),\n\t\t}).Debug(\"get release\")\n\n\t\tdescription := body\n\t\tref := ctx.Git.Commit\n\t\tgitURL := ctx.Git.URL\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"name\": name,\n\t\t\t\"description\": description,\n\t\t\t\"ref\": ref,\n\t\t\t\"url\": gitURL,\n\t\t}).Debug(\"creating release\")\n\t\trelease, _, err = c.client.Releases.CreateRelease(projectID, &gitlab.CreateReleaseOptions{\n\t\t\tName: &name,\n\t\t\tDescription: &description,\n\t\t\tRef: &ref,\n\t\t\tTagName: &tagName,\n\t\t})\n\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"err\": err.Error(),\n\t\t\t}).Debug(\"error create release\")\n\t\t\treturn \"\", err\n\t\t}\n\t\tlog.WithField(\"name\", release.Name).Info(\"release created\")\n\t} else {\n\t\tdesc := body\n\t\tif release != nil && release.DescriptionHTML != \"\" {\n\t\t\tdesc = release.DescriptionHTML\n\t\t}\n\n\t\trelease, _, err = c.client.Releases.UpdateRelease(projectID, tagName, &gitlab.UpdateReleaseOptions{\n\t\t\tName: &name,\n\t\t\tDescription: &desc,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"err\": err.Error(),\n\t\t\t}).Debug(\"error update release\")\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tlog.WithField(\"name\", release.Name).Info(\"release updated\")\n\t}\n\n\treturn tagName, err // gitlab references a tag in a repo by its name\n}", "func (s *ReleaseService) GetReleaseAuthorized(id uint, token string) (*Release, error) {\n\tvar (\n\t\tmethod = http.MethodGet\n\t\tpath = fmt.Sprintf(\"/releases/%d\", id)\n\t)\n\treq := s.client.newRequest(path, method)\n\taddJWTToRequest(req, token)\n\treturn s.getRelease(req)\n}", "func (o *VulnerabilitiesRequest) GetReleasever() string {\n\tif o == nil || o.Releasever == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Releasever\n}", "func GetReleaseTagFromVersion(version string) (releaseTag string) {\n\t// Check version exists in GH\n\t// Get the URL for the version\n\tghr := []github.ReleaseBody{}\n\tbody, err := github.GetAllReleases()\n\tif err != nil {\n\t\tfmt.Println(\"Unexpected error retrieving list of available Tokaido releases: \" + err.Error())\n\t\tos.Exit(1)\n\t}\n\n\terr = json.Unmarshal(body, &ghr)\n\tif err != nil {\n\t\tfmt.Println(\"Unexpected error assembling list of available Tokaido releases: \" + err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tfor _, r := range ghr {\n\t\tif r.TagName == version {\n\t\t\tif r.Draft == true {\n\t\t\t\tfmt.Println(\"\\nWarning: The selected version is a draft and may not work as intended\")\n\t\t\t}\n\n\t\t\tif r.Prerelease == true {\n\t\t\t\tfmt.Println(\"\\nWarning: The selected version is a prerelease and may not work as intended\")\n\t\t\t}\n\n\t\t\treleaseTag = r.TagName\n\t\t}\n\t}\n\n\tif releaseTag == \"\" {\n\t\tfmt.Println(\"There is no release information available for version [\" + version + \"]\")\n\t\tos.Exit(1)\n\t}\n\n\treturn releaseTag\n}", "func latestRelease(client *github.Client) (string, error) {\n\trelease, _, err := client.Repositories.GetLatestRelease(context.Background(), \"ReconfigureIO\", \"reco\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn *release.TagName, nil\n}", "func handleRepo(ctx context.Context, client *github.Client, repo *github.Repository) (*release, error) {\n\tif !in(orgs, repo.GetOwner().GetLogin()) {\n\t\t// return early\n\t\treturn nil, nil\n\t}\n\topt := &github.ListOptions{\n\t\tPage: 1,\n\t\tPerPage: 100,\n\t}\n\n\treleases, resp, err := client.Repositories.ListReleases(ctx, repo.GetOwner().GetLogin(), repo.GetName(), opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden || err != nil {\n\t\tif _, ok := err.(*github.RateLimitError); ok {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Skip it because there is no release.\n\t\treturn nil, nil\n\t}\n\tif err != nil || len(releases) < 1 {\n\t\treturn nil, err\n\t}\n\n\trl := release{\n\t\tRepository: repo,\n\t}\n\t// Get information about the binary assets for linux-amd64\n\tarch := \"linux-amd64\"\n\tfor i := 0; i < len(releases); i++ {\n\t\tr := releases[i]\n\t\tif rl.Release == nil && !r.GetDraft() {\n\t\t\t// If this is the latest release and it's not a draft make it the one\n\t\t\t// to return\n\t\t\trl.Release = r\n\t\t\tfor _, asset := range r.Assets {\n\t\t\t\trl.BinaryDownloadCount += asset.GetDownloadCount()\n\t\t\t\tif strings.HasSuffix(asset.GetName(), arch) {\n\t\t\t\t\trl.BinaryURL = asset.GetBrowserDownloadURL()\n\t\t\t\t\trl.BinaryName = asset.GetName()\n\t\t\t\t\trl.BinarySince = units.HumanDuration(time.Since(asset.GetCreatedAt().Time))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif strings.HasSuffix(asset.GetName(), arch+\".sha256\") {\n\t\t\t\t\tc, err := getURLContent(asset.GetBrowserDownloadURL())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\trl.BinarySHA256 = c\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif strings.HasSuffix(asset.GetName(), arch+\".md5\") {\n\t\t\t\t\tc, err := getURLContent(asset.GetBrowserDownloadURL())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\trl.BinaryMD5 = c\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfor _, asset := range r.Assets {\n\t\t\t\trl.BinaryDownloadCount += asset.GetDownloadCount()\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &rl, nil\n}", "func (dao *releasedHookDao) GetByReleaseID(kit *kit.Kit, bizID, releaseID uint32) (\n\t*table.ReleasedHook, *table.ReleasedHook, error) {\n\tif bizID == 0 {\n\t\treturn nil, nil, errf.New(errf.InvalidParameter, \"bizID is 0\")\n\t}\n\tm := dao.genQ.ReleasedHook\n\tpre, err := m.WithContext(kit.Ctx).Where(m.BizID.Eq(bizID), m.ReleaseID.Eq(releaseID),\n\t\tm.HookType.Eq(table.PreHook.String())).Take()\n\tif err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {\n\t\treturn nil, nil, err\n\t}\n\tif pre != nil {\n\t\tcontent, e := base64.StdEncoding.DecodeString(pre.Content)\n\t\tif e != nil {\n\t\t\treturn nil, nil, e\n\t\t}\n\t\tpre.Content = string(content)\n\t}\n\tpost, err := m.WithContext(kit.Ctx).Where(m.BizID.Eq(bizID), m.ReleaseID.Eq(releaseID),\n\t\tm.HookType.Eq(table.PostHook.String())).Take()\n\tif err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {\n\t\treturn nil, nil, err\n\t}\n\tif post != nil {\n\t\tcontent, e := base64.StdEncoding.DecodeString(post.Content)\n\t\tif e != nil {\n\t\t\treturn nil, nil, e\n\t\t}\n\t\tpost.Content = string(content)\n\t}\n\n\treturn pre, post, nil\n}", "func (g *GHR) CreateRelease(ctx context.Context, req *github.RepositoryRelease, recreate bool) (*github.RepositoryRelease, error) {\n\n\t// When draft release creation is requested,\n\t// create it without any check (it can).\n\tif *req.Draft {\n\t\tfmt.Fprintln(g.outStream, \"==> Create a draft release\")\n\t\treturn g.GitHub.CreateRelease(ctx, req)\n\t}\n\n\t// Always create release as draft first. After uploading assets, turn off\n\t// draft unless the `-draft` flag is explicitly specified.\n\t// It is to prevent users from seeing empty release.\n\treq.Draft = github.Bool(true)\n\n\t// Check release exists.\n\t// If release is not found, then create a new release.\n\trelease, err := g.GitHub.GetRelease(ctx, *req.TagName)\n\tif err != nil {\n\t\tif !errors.Is(err, ErrReleaseNotFound) {\n\t\t\treturn nil, fmt.Errorf(\"failed to get release: %w\", err)\n\t\t}\n\t\tDebugf(\"Release (with tag %s) not found: create a new one\",\n\t\t\t*req.TagName)\n\n\t\tif recreate {\n\t\t\tfmt.Fprintf(g.outStream,\n\t\t\t\t\"WARNING: '-recreate' is specified but release (%s) not found\",\n\t\t\t\t*req.TagName)\n\t\t}\n\n\t\tfmt.Fprintln(g.outStream, \"==> Create a new release\")\n\t\treturn g.GitHub.CreateRelease(ctx, req)\n\t}\n\n\t// recreate is not true. Then use that existing release.\n\tif !recreate {\n\t\tDebugf(\"Release (with tag %s) exists: use existing one\",\n\t\t\t*req.TagName)\n\n\t\tfmt.Fprintf(g.outStream, \"WARNING: found release (%s). Use existing one.\\n\",\n\t\t\t*req.TagName)\n\t\treturn release, nil\n\t}\n\n\t// When recreate is requested, delete existing release and create a\n\t// new release.\n\tfmt.Fprintln(g.outStream, \"==> Recreate a release\")\n\tif err := g.DeleteRelease(ctx, *release.ID, *req.TagName); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn g.GitHub.CreateRelease(ctx, req)\n}", "func GetHelmRelease(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *HelmReleaseState, opts ...pulumi.ResourceOption) (*HelmRelease, error) {\n\tvar resource HelmRelease\n\terr := ctx.ReadResource(\"kubernetes:apps.open-cluster-management.io/v1:HelmRelease\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (s *ReleaseService) GetReleases(page, perPage uint) ([]*Release, error) {\n\tp := PaginateParams{}\n\tp.Limit, p.Offset = calculateLimitOffset(page, perPage)\n\treturn s.SearchReleases(\"\", \"\", p)\n}", "func GetReleaseInformation(res http.ResponseWriter, req *http.Request) {\n\tres.Header().Set(\"Content-Type\", \"application/json\")\n\tc := ReleaseInformation{\"relid\", \"app\", \"stream\", \"something\", \"xebia\", \"1234\"}\n\toutgoingJSON, err := json.Marshal(c)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(res, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tres.WriteHeader(http.StatusCreated)\n\tfmt.Fprint(res, string(outgoingJSON))\n}", "func (s *Services) ReleaseType(ctx context.Context, request *proto.ReleaseTypeRequest) (*proto.ReleaseTypeResponse, error) {\n\tvar result models.ReleaseType\n\tquery := s.DB\n\n\tif request.Id != 0 {\n\t\tquery = query.Where(\"id = ?\", request.Id)\n\t}\n\n\tif err := query.First(&result).Error; err != nil {\n\n\t\t// If nothing was found\n\t\tif gorm.IsRecordNotFoundError(err) {\n\t\t\treturn &proto.ReleaseTypeResponse{ReleaseType: nil}, nil\n\t\t}\n\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\n\treturn &proto.ReleaseTypeResponse{ReleaseType: result.ToProto()}, nil\n}", "func (m *MockGithubAssetClient) GetReleaseByTag(ctx context.Context, tag string) (*github.RepositoryRelease, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetReleaseByTag\", ctx, tag)\n\tret0, _ := ret[0].(*github.RepositoryRelease)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func GetReleaseName(hr *appv1.HelmRequest) string {\n\treturn hr.GetReleaseName()\n}", "func GetHostingRelease(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *HostingReleaseState, opts ...pulumi.ResourceOption) (*HostingRelease, error) {\n\tvar resource HostingRelease\n\terr := ctx.ReadResource(\"gcp:firebase/hostingRelease:HostingRelease\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (a *Agent) GetReleaseHistory(\n\tctx context.Context,\n\tname string,\n) ([]*release.Release, error) {\n\tctx, span := telemetry.NewSpan(ctx, \"helm-get-release-history\")\n\tdefer span.End()\n\n\ttelemetry.WithAttributes(span,\n\t\ttelemetry.AttributeKV{Key: \"name\", Value: name},\n\t)\n\n\tcmd := action.NewHistory(a.ActionConfig)\n\n\treturn cmd.Run(name)\n}", "func compareRelease(owner, repo, tagName string) (*github.CommitsComparison, error) {\n\tclient, ctx := defaultGithubClient(), context.Background()\n\tdefer timeTrack(time.Now(), \"API call to client.Repositories.CompareCommits()\")\n\tcc, _, err := client.Repositories.CompareCommits(ctx, owner, repo, tagName, \"HEAD\")\n\tif cc != nil {\n\t\treverseCommitOrder(cc)\n\t}\n\treturn cc, err\n}", "func (h *handler) Release(ctx context.Context, evt *github.ReleaseEvent) error {\n\tif evt.GetAction() != \"released\" {\n\t\tlogrus.WithField(\"action\", evt.GetAction()).Info(\"ignoring release event\")\n\t\treturn nil\n\t}\n\tnotifyRepos := h.cfg.ReleaseDispatchRepos()\n\tlogrus.WithField(\"repos\", len(notifyRepos)).Info(\"notifying repositories of release\")\n\tif len(notifyRepos) == 0 {\n\t\treturn nil\n\t}\n\n\tgh := repo.NewGitHubClient(h.cfg.GitHubToken)\n\tfeedbackIssue, err := releaseFeedbackIssue(ctx, gh, evt, notifyRepos)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogrus.WithField(\"issue_number\", feedbackIssue.Number).Debug(\"created feedback issue\")\n\n\tdispatchOpts, err := h.releaseDispatchOptions(evt, feedbackIssue)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, notifyRepo := range notifyRepos {\n\t\tnotifyRepoParts := strings.SplitN(notifyRepo, \"/\", 2)\n\t\towner := notifyRepoParts[0]\n\t\tname := notifyRepoParts[1]\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"owner\": owner,\n\t\t\t\"name\": name,\n\t\t}).Debug(\"dispatching release to repository\")\n\t\tif _, _, err := gh.Repositories.Dispatch(ctx, owner, name, dispatchOpts); err != nil {\n\t\t\tlogrus.WithError(err).Warn(\"error dispatching update\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (v GithubVCS) GetObject(ctx context.Context, sha string, runinfo *RunInfo) ([]byte, error) {\n\tblob, _, err := v.Client.Git.GetBlob(ctx, runinfo.Owner, runinfo.Repository, sha)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdecoded, err := base64.StdEncoding.DecodeString(blob.GetContent())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn decoded, err\n}", "func ReleaseID(v int64) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldEQ(FieldReleaseID, v))\n}", "func (cr APIContractRepository) Get(ctx context.Context, revisionID uuid.UUID) (models.APIContractRevision, error) {\n\tif revisionID == uuid.Nil {\n\t\treturn models.APIContractRevision{}, errors.New(\"invalid contract revision id supplied\")\n\t}\n\n\tvar acr models.APIContractRevision\n\ttx := cr.db.Find(&acr, \"id = ?\", revisionID.String())\n\tif tx.Error != nil {\n\t\treturn models.APIContractRevision{}, fmt.Errorf(\"no contract revision found for id %s: %w\", revisionID, tx.Error)\n\t}\n\n\treturn acr, nil\n}", "func (gc *githubClient) getLatestReleaseVersion(ctx context.Context, owner string, repo string) (string, error) {\n\trelease, response, err := gc.client.Repositories.GetLatestRelease(ctx, owner, repo)\n\tif err != nil {\n\t\tlog.WithField(\"repo\", owner+\"/\"+repo).WithError(err).Warn(\"Error fetching latest version\")\n\t\treturn \"\", errors.Wrap(err, \"Error fetching latest version\")\n\t}\n\tif response.StatusCode != 200 {\n\t\tlog.WithField(\"repo\", owner+\"/\"+repo).Warnf(\"Error fetching latest version: http-status: %s\", response.Status)\n\t\treturn \"\", errors.Wrapf(err, \"Error fetching latest version: http-status: %s\", response.Status)\n\t}\n\treturn release.GetTagName(), nil\n}", "func (st *buildStatus) latestRelease(submodule string) (string, error) {\n\t// Baseline is the latest gopls release tag (but not prerelease).\n\tgerritClient := pool.NewGCEConfiguration().GerritClient()\n\ttags, err := gerritClient.GetProjectTags(st.ctx, st.SubName)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error fetching tags for %q: %w\", st.SubName, err)\n\t}\n\n\tvar versions []string\n\trevisions := make(map[string]string)\n\tprefix := \"refs/tags\"\n\tif submodule != \"\" {\n\t\tprefix += \"/\" + submodule // e.g. gopls tags are \"gopls/vX.Y.Z\"\n\t}\n\tfor ref, ti := range tags {\n\t\tif !strings.HasPrefix(ref, prefix) {\n\t\t\tcontinue\n\t\t}\n\t\tversion := ref[len(prefix):]\n\t\tversions = append(versions, version)\n\t\trevisions[version] = ti.Revision\n\t}\n\n\tsemver.Sort(versions)\n\n\t// Return latest non-prerelease version.\n\tfor i := len(versions) - 1; i >= 0; i-- {\n\t\tver := versions[i]\n\t\tif !semver.IsValid(ver) {\n\t\t\tcontinue\n\t\t}\n\t\tif semver.Prerelease(ver) != \"\" {\n\t\t\tcontinue\n\t\t}\n\t\treturn revisions[ver], nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"no valid versions found in %+v\", versions)\n}", "func (r *RLS) CreateRelease(ctx context.Context, req *github.RepositoryRelease, recreate bool) (*github.RepositoryRelease, error) {\n\n\t// When draft release creation is requested,\n\t// create it without any check (it can).\n\tif *req.Draft {\n\t\tfmt.Fprintln(r.outStream, \"==> Create a draft release\")\n\t\treturn r.GitHub.CreateRelease(ctx, req)\n\t}\n\n\t// Check release exists.\n\t// If release is not found, then create a new release.\n\trelease, err := r.GitHub.GetRelease(ctx, *req.TagName)\n\tif err != nil {\n\t\tif err != RelaseNotFound {\n\t\t\treturn nil, errors.Wrap(err, \"failed to get release\")\n\t\t}\n\t\tif recreate {\n\t\t\tfmt.Fprintf(r.outStream,\n\t\t\t\t\"WARNING: '-recreate' is specified but release (%s) not found\",\n\t\t\t\t*req.TagName)\n\t\t}\n\n\t\tfmt.Fprintln(r.outStream, \"==> Create a new release\")\n\t\treturn r.GitHub.CreateRelease(ctx, req)\n\t}\n\n\t// recreate is not true. Then use that existing release.\n\tif !recreate {\n\t\tfmt.Fprintf(r.outStream, \"WARNING: found release (%s). Use existing one.\\n\",\n\t\t\t*req.TagName)\n\t\treturn release, nil\n\t}\n\n\t// When recreate is requested, delete existing release and create a\n\t// new release.\n\tfmt.Fprintln(r.outStream, \"==> Recreate a release\")\n\tif err := r.DeleteRelease(ctx, *release.ID, *req.TagName); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r.GitHub.CreateRelease(ctx, req)\n}", "func (repo BoshDirectorRepository) DeleteRelease(name string, version string) (apiResponse net.ApiResponse) {\n\tpath := fmt.Sprintf(\"/releases/%s?force=true&version=%s\", name, version)\n\tapiResponse = repo.gateway.DeleteResource(repo.config.TargetURL+path, repo.config.Username, repo.config.Password)\n\tif apiResponse.IsNotSuccessful() {\n\t\treturn\n\t}\n\tif !apiResponse.IsRedirection() {\n\t\treturn\n\t}\n\n\tvar taskStatus models.TaskStatus\n\ttaskURL, err := url.Parse(apiResponse.RedirectLocation)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tapiResponse = repo.gateway.GetResource(repo.config.TargetURL+taskURL.Path, repo.config.Username, repo.config.Password, &taskStatus)\n\tif apiResponse.IsNotSuccessful() {\n\t\treturn\n\t}\n\n\t/* Progression should be: queued, progressing, done */\n\t/* TODO task might fail; end states: done, error, cancelled */\n\tfor taskStatus.State != \"done\" {\n\t\ttime.Sleep(1)\n\t\ttaskStatus, apiResponse = repo.GetTaskStatus(taskStatus.ID)\n\t\tif apiResponse.IsNotSuccessful() {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}", "func (c Provider) Releases(params []string) (rs *results.ResultSet, err error) {\n\t// Query the API\n\tid := strings.Join(strings.Split(params[1], \"/\"), \"%2f\")\n\turl := fmt.Sprintf(TagsEndpoint, params[0], id)\n\tvar tags Tags\n\tif err = util.FetchJSON(url, \"releases\", &tags); err != nil {\n\t\treturn\n\t}\n\trs = tags.Convert(params[0], params[1])\n\treturn\n}", "func (d Document) Release() string {\n\tvalue, _ := d.labels[releaseLabel].(string)\n\n\treturn value\n}", "func (a *Client) GetReleaseEnvironment(params *GetReleaseEnvironmentParams, authInfo runtime.ClientAuthInfoWriter) (*GetReleaseEnvironmentOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetReleaseEnvironmentParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"Get Release Environment\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/{organization}/{project}/_apis/Release/releases/{releaseId}/environments/{environmentId}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetReleaseEnvironmentReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetReleaseEnvironmentOK), nil\n\n}", "func runReleaseCmd(cmd *cobra.Command, args []string) {\n\tconfigFile, _ := cmd.Flags().GetString(\"config\")\n\tconfig := &config.Config{}\n\terr := config.Load(configFile)\n\tif err != nil {\n\t\tfmt.Printf(\"could not load config file: %v\\n\", err)\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\n\tspinner, err := initSpinner(fmt.Sprintf(\"Releasing v%s of %s\", args[0], config.Repository))\n\tif err != nil {\n\t\tfmt.Println(\"could not init spinner\")\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\tspinner.Start()\n\n\tnewRelease, err := github.NewRelease(config, args, spinner)\n\tif err != nil {\n\t\tspinner.StopFailMessage(fmt.Sprintf(\"%v\", err))\n\t\tspinner.StopFail()\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\n\tcl, err := changelog.HandleChangelog(newRelease.ProjectName, newRelease.Version, newRelease.Date, spinner)\n\tif err != nil {\n\t\tspinner.StopFailMessage(fmt.Sprintf(\"%v\", err))\n\t\tspinner.StopFail()\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\n\tnewRelease.Changelog = cl\n\n\tvar binaryPath string\n\tskipBinary, _ := cmd.Flags().GetBool(\"skipBinary\")\n\tif !skipBinary {\n\t\t// set project build path so we have a predictable location\n\t\tbinaryPath = fmt.Sprintf(binaryPathFmt, newRelease.ProjectName, newRelease.Version)\n\t\trunBuildCmd(cmd, []string{newRelease.Version, binaryPath})\n\t}\n\n\ttokenFile, _ := cmd.Flags().GetString(\"tokenFile\")\n\terr = newRelease.CreateGithubRelease(tokenFile, binaryPath, spinner)\n\tif err != nil {\n\t\tspinner.StopFailMessage(fmt.Sprintf(\"%v\", err))\n\t\tspinner.StopFail()\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\n\tspinner.Suffix(\" Finished release\")\n\tspinner.Stop()\n}", "func (s *Services) RandomRelease(ctx context.Context, request *empty.Empty) (*proto.ReleaseResponse, error) {\n\tvar result models.Release\n\tquery := s.DB\n\n\tif err := query.Order(gorm.Expr(\"random()\")).First(&result).Error; err != nil {\n\n\t\t// If nothing was found\n\t\tif gorm.IsRecordNotFoundError(err) {\n\t\t\treturn &proto.ReleaseResponse{Release: nil}, nil\n\t\t}\n\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\n\treturn &proto.ReleaseResponse{Release: result.ToProto()}, nil\n}", "func GetLatestReleaseURL(fetchURL string) (string, string, int, error) {\n\tresp, err := http.Get(fetchURL)\n\tif err != nil {\n\t\treturn \"\", \"\", 0, fmt.Errorf(\"failed getting latest release of rclone-webui: %w\", err)\n\t}\n\tdefer fs.CheckClose(resp.Body, &err)\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn \"\", \"\", 0, fmt.Errorf(\"bad HTTP status %d (%s) when fetching %s\", resp.StatusCode, resp.Status, fetchURL)\n\t}\n\tresults := gitHubRequest{}\n\tif err := json.NewDecoder(resp.Body).Decode(&results); err != nil {\n\t\treturn \"\", \"\", 0, fmt.Errorf(\"could not decode results from http request: %w\", err)\n\t}\n\tif len(results.Assets) < 1 {\n\t\treturn \"\", \"\", 0, errors.New(\"could not find an asset in the release. \" +\n\t\t\t\"check if asset was successfully added in github release assets\")\n\t}\n\tres := results.Assets[0].BrowserDownloadURL\n\ttag := results.TagName\n\tsize := results.Assets[0].Size\n\n\treturn res, tag, size, nil\n}", "func NewRelease(tag string) Release {\n\treturn Release{\n\t\tTag: tag,\n\t}\n}", "func ListReleases(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/releases repository repoListReleases\n\t// ---\n\t// summary: List a repo's releases\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: draft\n\t// in: query\n\t// description: filter (exclude / include) drafts, if you dont have repo write access none will show\n\t// type: boolean\n\t// - name: pre-release\n\t// in: query\n\t// description: filter (exclude / include) pre-releases\n\t// type: boolean\n\t// - name: per_page\n\t// in: query\n\t// description: page size of results, deprecated - use limit\n\t// type: integer\n\t// deprecated: true\n\t// - name: page\n\t// in: query\n\t// description: page number of results to return (1-based)\n\t// type: integer\n\t// - name: limit\n\t// in: query\n\t// description: page size of results\n\t// type: integer\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/ReleaseList\"\n\tlistOptions := utils.GetListOptions(ctx)\n\tif listOptions.PageSize == 0 && ctx.FormInt(\"per_page\") != 0 {\n\t\tlistOptions.PageSize = ctx.FormInt(\"per_page\")\n\t}\n\n\topts := repo_model.FindReleasesOptions{\n\t\tListOptions: listOptions,\n\t\tIncludeDrafts: ctx.Repo.AccessMode >= perm.AccessModeWrite || ctx.Repo.UnitAccessMode(unit.TypeReleases) >= perm.AccessModeWrite,\n\t\tIncludeTags: false,\n\t\tIsDraft: ctx.FormOptionalBool(\"draft\"),\n\t\tIsPreRelease: ctx.FormOptionalBool(\"pre-release\"),\n\t}\n\n\treleases, err := repo_model.GetReleasesByRepoID(ctx, ctx.Repo.Repository.ID, opts)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetReleasesByRepoID\", err)\n\t\treturn\n\t}\n\trels := make([]*api.Release, len(releases))\n\tfor i, release := range releases {\n\t\tif err := release.LoadAttributes(ctx); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"LoadAttributes\", err)\n\t\t\treturn\n\t\t}\n\t\trels[i] = convert.ToAPIRelease(ctx, ctx.Repo.Repository, release)\n\t}\n\n\tfilteredCount, err := repo_model.CountReleasesByRepoID(ctx.Repo.Repository.ID, opts)\n\tif err != nil {\n\t\tctx.InternalServerError(err)\n\t\treturn\n\t}\n\n\tctx.SetLinkHeader(int(filteredCount), listOptions.PageSize)\n\tctx.SetTotalCountHeader(filteredCount)\n\tctx.JSON(http.StatusOK, rels)\n}", "func (r *ReleaseNotes) Get(prNumber int) *ReleaseNote {\n\treturn r.byPR[prNumber]\n}", "func NewRelease(executer executer.Executer, config Config) Release {\n\treturn &release{executer: executer, config: config}\n}", "func (p *GetAnnotationsParams) ByRelease(name, revision string) {\n\tp.Tags = append(p.Tags,\n\t\t\"heritage=chronologist\",\n\t\t\"release_name=\"+name,\n\t\t\"release_revision=\"+revision,\n\t)\n}", "func (s *Server) HandleRelease(w http.ResponseWriter, r *http.Request) {\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar release *shared.ReleaseEvent\n\terr = json.Unmarshal(body, &release)\n\tif err != nil {\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Tag the request with an ID for tracing in the logs.\n\trelease.RequestID = nuid.Next()\n\tfmt.Println(release)\n\n\t// Publish event to the NATS server\n\tnc := s.NATS()\n\t\n\trelease.RequestID = nuid.Next()\n\trelease_event := shared.ReleaseEvent{release.ID, release.Time, release.NextState, release.PostMedication, release.Notes, release.RequestID}\n\trel_event, err := json.Marshal(release_event)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\n\tlog.Printf(\"requestID:%s - Publishing inspection event with patientID %d\\n\", release.RequestID, release.ID)\n\t// Publishing the message to NATS Server\n\tnc.Publish(\"patient.release\", rel_event)\n\n\tjson.NewEncoder(w).Encode(\"Release event published\")\n}", "func releases(ctx context.Context, c *github.Client, org string, project string) ([]*release, error) {\n\tvar result []*release\n\n\topts := &github.ListOptions{PerPage: 100}\n\n\tklog.Infof(\"Downloading releases for %s/%s ...\", org, project)\n\n\tfor page := 1; page != 0; {\n\t\topts.Page = page\n\t\trs, resp, err := c.Repositories.ListReleases(ctx, org, project, opts)\n\t\tif err != nil {\n\t\t\treturn result, err\n\t\t}\n\n\t\tpage = resp.NextPage\n\t\tuntil := time.Now()\n\n\t\tfor _, r := range rs {\n\t\t\tname := r.GetName()\n\t\t\tif name == \"\" {\n\t\t\t\tname = r.GetTagName()\n\t\t\t}\n\n\t\t\trel := &release{\n\t\t\t\tName: name,\n\t\t\t\tDraft: r.GetDraft(),\n\t\t\t\tPrerelease: r.GetPrerelease(),\n\t\t\t\tPublishedAt: r.GetPublishedAt().Time,\n\t\t\t\tActiveUntil: until,\n\t\t\t\tDownloads: map[string]int{},\n\t\t\t\tDownloadRatios: map[string]float64{},\n\t\t\t}\n\n\t\t\tfor _, a := range r.Assets {\n\t\t\t\tif ignoreAssetRe.MatchString(a.GetName()) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\trel.Downloads[a.GetName()] = a.GetDownloadCount()\n\t\t\t\trel.DownloadsTotal += int64(a.GetDownloadCount())\n\t\t\t}\n\n\t\t\tif !rel.Draft && !rel.Prerelease {\n\t\t\t\tuntil = rel.PublishedAt\n\t\t\t}\n\n\t\t\tresult = append(result, rel)\n\t\t}\n\t}\n\n\tfor _, r := range result {\n\t\tr.DaysActive = r.ActiveUntil.Sub(r.PublishedAt).Hours() / 24\n\t\tr.DownloadsPerDay = float64(r.DownloadsTotal) / r.DaysActive\n\n\t\tfor k, v := range r.Downloads {\n\t\t\tr.DownloadRatios[k] = float64(v) / float64(r.DownloadsTotal)\n\t\t}\n\t}\n\n\treturn result, nil\n}", "func (r *RepositoryRelease) GetName() string {\n\tif r == nil || r.Name == nil {\n\t\treturn \"\"\n\t}\n\treturn *r.Name\n}", "func (t TargetProductNameRepository) Get(productID int) (string, error) {\n\turl := fmt.Sprintf(redskyAPI, productID)\n\tlog.Printf(\"Making request to %s\", url)\n\n\tresponse, err := t.httpClient.Get(url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer response.Body.Close()\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn t.readTitle(body)\n}", "func (v1 v1RPMInfo) GetRPMRelease() string {\n\t// v1 doesn't support displaying release number, so return empty.\n\treturn \"\"\n}", "func Get(repo repository.Repo, revision string) (*Review, error) {\n\tsummary, err := GetSummary(repo, revision)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif summary == nil {\n\t\treturn nil, nil\n\t}\n\treturn summary.Details()\n}", "func (s *GiteaSource) DownloadReleaseAsset(owner, repo string, releaseID, id int64) (io.ReadCloser, error) {\n\terr := checkOwnerRepoParameters(owner, repo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// create a new http client so the GitHub library can download the redirected file (if any)\n\t// don't pass the \"default\" one as it could be the one it's already using\n\tattachment, _, err := s.api.GetReleaseAttachment(owner, repo, releaseID, id)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to call Gitea Releases API for getting the asset ID %d on repository '%s/%s': %w\", id, owner, repo, err)\n\t}\n\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(\"GET\", attachment.DownloadURL, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Print(err)\n\n\treq.Header.Set(\"Authorization\", \"token \"+s.token)\n\trc, err := client.Do(req)\n\tlog.Print(err)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn rc.Body, nil\n}", "func GetVersion() string {\n\treturn fmt.Sprintf(\"%s (git+sha %s)\", Release, GitSHA)\n}", "func (hdr RPMHeader) Release() string {\n\treturn hdr.Tag(\"Release\")[0]\n}", "func (v2 v2RPMInfo) GetRPMRelease() string {\n\treturn v2.Release\n}", "func (o *UpdatesV3Request) GetReleasever() string {\n\tif o == nil || o.Releasever == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Releasever\n}", "func CreateRelease(ctx *context.APIContext) {\n\t// swagger:operation POST /repos/{owner}/{repo}/releases repository repoCreateRelease\n\t// ---\n\t// summary: Create a release\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/CreateReleaseOption\"\n\t// responses:\n\t// \"201\":\n\t// \"$ref\": \"#/responses/Release\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"409\":\n\t// \"$ref\": \"#/responses/error\"\n\tform := web.GetForm(ctx).(*api.CreateReleaseOption)\n\trel, err := repo_model.GetRelease(ctx.Repo.Repository.ID, form.TagName)\n\tif err != nil {\n\t\tif !repo_model.IsErrReleaseNotExist(err) {\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetRelease\", err)\n\t\t\treturn\n\t\t}\n\t\t// If target is not provided use default branch\n\t\tif len(form.Target) == 0 {\n\t\t\tform.Target = ctx.Repo.Repository.DefaultBranch\n\t\t}\n\t\trel = &repo_model.Release{\n\t\t\tRepoID: ctx.Repo.Repository.ID,\n\t\t\tPublisherID: ctx.Doer.ID,\n\t\t\tPublisher: ctx.Doer,\n\t\t\tTagName: form.TagName,\n\t\t\tTarget: form.Target,\n\t\t\tTitle: form.Title,\n\t\t\tNote: form.Note,\n\t\t\tIsDraft: form.IsDraft,\n\t\t\tIsPrerelease: form.IsPrerelease,\n\t\t\tIsTag: false,\n\t\t\tRepo: ctx.Repo.Repository,\n\t\t}\n\t\tif err := release_service.CreateRelease(ctx.Repo.GitRepo, rel, nil, \"\"); err != nil {\n\t\t\tif repo_model.IsErrReleaseAlreadyExist(err) {\n\t\t\t\tctx.Error(http.StatusConflict, \"ReleaseAlreadyExist\", err)\n\t\t\t} else {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"CreateRelease\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !rel.IsTag {\n\t\t\tctx.Error(http.StatusConflict, \"GetRelease\", \"Release is has no Tag\")\n\t\t\treturn\n\t\t}\n\n\t\trel.Title = form.Title\n\t\trel.Note = form.Note\n\t\trel.IsDraft = form.IsDraft\n\t\trel.IsPrerelease = form.IsPrerelease\n\t\trel.PublisherID = ctx.Doer.ID\n\t\trel.IsTag = false\n\t\trel.Repo = ctx.Repo.Repository\n\t\trel.Publisher = ctx.Doer\n\t\trel.Target = form.Target\n\n\t\tif err = release_service.UpdateRelease(ctx.Doer, ctx.Repo.GitRepo, rel, nil, nil, nil); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"UpdateRelease\", err)\n\t\t\treturn\n\t\t}\n\t}\n\tctx.JSON(http.StatusCreated, convert.ToAPIRelease(ctx, ctx.Repo.Repository, rel))\n}", "func DecodeRelease(data string) (*rspb.Release, error) {\n\t// base64 decode string\n\tb, err := b64.DecodeString(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// For backwards compatibility with releases that were stored before\n\t// compression was introduced we skip decompression if the\n\t// gzip magic header is not found\n\tif bytes.Equal(b[0:3], magicGzip) {\n\t\tr, err := gzip.NewReader(bytes.NewReader(b))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tb2, err := ioutil.ReadAll(r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tb = b2\n\t}\n\n\tvar rls rspb.Release\n\t// unmarshal protobuf bytes\n\tif err := proto.Unmarshal(b, &rls); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &rls, nil\n}", "func (c *FakeReleaseHistories) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ReleaseHistory, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewGetAction(releasehistoriesResource, c.ns, name), &v1alpha1.ReleaseHistory{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.ReleaseHistory), err\n}", "func (gf *GitHubFetcher) Get(parentSpan tracer.Span, owner string, repo string, ref string) (tarball io.Reader, err error) {\n\tspan := tracer.StartSpan(\"github_fetcher.get\", tracer.ChildOf(parentSpan.Context()))\n\tdefer func() {\n\t\tspan.Finish(tracer.WithError(err))\n\t}()\n\topt := &github.RepositoryContentGetOptions{\n\t\tRef: ref,\n\t}\n\tctx, cf := context.WithTimeout(context.Background(), githubDownloadTimeoutSecs*time.Second)\n\tdefer cf()\n\n\texcludes, err := gf.parseDockerIgnoreIfExists(ctx, owner, repo, opt)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error parsing %v file: %v\", dockerIgnorePath, err)\n\t}\n\turl, resp, err := gf.c.Repositories.GetArchiveLink(ctx, owner, repo, github.Tarball, opt)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting archive link: %v\", err)\n\t}\n\tif resp.StatusCode > 399 {\n\t\treturn nil, fmt.Errorf(\"error status when getting archive link: %v\", resp.Status)\n\t}\n\tif url == nil {\n\t\treturn nil, fmt.Errorf(\"url is nil\")\n\t}\n\treturn gf.getArchive(url, excludes)\n}", "func LatestRelease() (string, error) {\n\tclient := github.NewClient(nil)\n\treleases, _, err := client.Repositories.ListReleases(\n\t\t\"caarlos0\", \"antibody\", nil,\n\t)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn *releases[0].TagName, nil\n}", "func (c Client) Release(id string, params *stripe.SubscriptionScheduleReleaseParams) (*stripe.SubscriptionSchedule, error) {\n\tpath := stripe.FormatURLPath(\"/v1/subscription_schedules/%s/release\", id)\n\tsched := &stripe.SubscriptionSchedule{}\n\terr := c.B.Call(http.MethodPost, path, c.Key, params, sched)\n\n\treturn sched, err\n}", "func (ghc GithubClient) Get(ctx context.Context, path string, output interface{}) error {\n\treq, err := http.NewRequestWithContext(ctx, \"GET\", ghc.CreateURL(path), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tres, err := ghc.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer func() { _ = res.Body.Close() }()\n\tif err := json.NewDecoder(res.Body).Decode(output); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *ZebraFotaArtifact) GetReleaseNotesUrl()(*string) {\n val, err := m.GetBackingStore().Get(\"releaseNotesUrl\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (r *Releaser) Create(name string) (*Release, error) {\n\trelease, _, err := r.client.Repositories.CreateRelease(context.Background(), r.owner, r.repository, &gogithub.RepositoryRelease{Name: gogithub.String(name), TagName: gogithub.String(name)})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Printf(\"created release ID: %v, tag: %v\", *release.ID, *release.TagName)\n\treturn &Release{ID: *release.ID, TagName: *release.TagName, Releaser: r}, nil\n}", "func getRelease(secretName string) (release int) {\n\tpattern := regexp.MustCompile(`(\\d+)$`)\n\trelease, _ = strconv.Atoi(pattern.FindString(secretName))\n\treturn\n}", "func parseReleasesAPI() (releases, error) {\n\tr, err := http.Get(\"https://api.github.com/repos/eze-kiel/shaloc/releases\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Body.Close()\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar rel releases\n\tif err = json.Unmarshal(body, &rel); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn rel, nil\n}", "func (g *Github) Get(url string) (*http.Response, error) {\n\treturn g.Do(http.MethodGet, url, http.NoBody)\n}", "func Release(version, commit, date string) {\n\tif version == \"\" {\n\t\tversion = \"dev\"\n\t} else if version[0] == 'v' {\n\t\tversion = version[1:]\n\t}\n\tif commit == \"\" {\n\t\tcommit = \"-\"\n\t}\n\tif date == \"\" {\n\t\tdate = \"-\"\n\t}\n\tVersion, Commit, Date = version, commit, date\n}", "func (s *Services) Releases(ctx context.Context, request *proto.ReleasesRequest) (*proto.ReleasesResponse, error) {\n\tvar result []models.Release\n\tvar resultCount int64\n\tquery := s.DB\n\n\tif request != nil && request.Query != nil {\n\t\tif request.Query.AnimeId != 0 {\n\t\t\tquery = query.Where(\"anime_id = ?\", request.Query.AnimeId)\n\t\t}\n\n\t\tif request.Query.Title != \"\" {\n\t\t\tquery = models.WhereFieldLikeString(\n\t\t\t\tquery,\n\t\t\t\tfmt.Sprintf(`\"%s\".title`, models.Release.TableName(models.Release{})),\n\t\t\t\trequest.Query.Title,\n\t\t\t)\n\t\t}\n\n\t\tif len(request.Query.Genres) > 0 {\n\t\t\t// This JOIN method is use of Correlated Subqueries when the foreign key is indexed, (good explanation on the link below)\n\t\t\t// https://www.periscopedata.com/blog/4-ways-to-join-only-the-first-row-in-sql\n\n\t\t\t// SELECT \"Releases\".* FROM \"Releases\" INNER JOIN (\n\t\t\t// \tSELECT * FROM \"Releases\" AS \"Release\" WHERE (\n\t\t\t// \t\tSELECT \"release_id\" FROM public.\"ReleaseGenres\" WHERE (\n\t\t\t// \t\t\tpublic.\"ReleaseGenres\".genre_id IN (1,4)) AND \"Release\".id = public.\"ReleaseGenres\".release_id LIMIT 1\n\t\t\t// ) IS NOT NULL) AS \"Release\" ON public.\"Releases\" .id = \"Release\".id\n\n\t\t\tquery = query.Joins(\n\t\t\t\tfmt.Sprintf(`INNER JOIN ( \n\t\t\t\t\tSELECT * FROM \"%s\" AS \"Release\" WHERE (\n\t\t\t\t\t\tSELECT \"release_id\" FROM public.\"ReleaseGenres\" WHERE (\n\t\t\t\t\t\t\tpublic.\"ReleaseGenres\".genre_id IN (?)) AND \"Release\".id = public.\"ReleaseGenres\".release_id LIMIT 1\n\t\t\t\t\t\t) IS NOT NULL) AS \"Release\" ON public.\"%s\" .id = \"Release\".id`,\n\t\t\t\t\tmodels.Release.TableName(models.Release{}),\n\t\t\t\t\tmodels.Release.TableName(models.Release{}),\n\t\t\t\t),\n\t\t\t\trequest.Query.Genres,\n\t\t\t)\n\t\t}\n\n\t\tif request.Query.Limit != 0 {\n\t\t\tquery = query.Limit(request.Query.Limit)\n\t\t}\n\n\t\tif request.Query.Offset != 0 {\n\t\t\tquery = query.Offset(request.Query.Offset)\n\t\t}\n\t}\n\n\tif err := query.Find(&result).Limit(nil).Offset(nil).Count(&resultCount).Error; err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\n\tfinalRes := []*proto.Release{}\n\n\tfor i := range result {\n\t\tfinalRes = append(finalRes, result[i].ToProto())\n\t}\n\n\treturn &proto.ReleasesResponse{Releases: finalRes, Count: resultCount}, nil\n}", "func (dao *releasedHookDao) Get(kit *kit.Kit, bizID, appID, releaseID uint32, tp table.HookType) (\n\t*table.ReleasedHook, error) {\n\tif bizID == 0 {\n\t\treturn nil, errf.New(errf.InvalidParameter, \"bizID is 0\")\n\t}\n\tif appID == 0 {\n\t\treturn nil, errf.New(errf.InvalidParameter, \"appID is 0\")\n\t}\n\tif err := tp.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\tm := dao.genQ.ReleasedHook\n\trh, err := m.WithContext(kit.Ctx).\n\t\tWhere(m.BizID.Eq(bizID), m.AppID.Eq(appID), m.ReleaseID.Eq(releaseID), m.HookType.Eq(tp.String())).Take()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcontent, err := base64.StdEncoding.DecodeString(rh.Content)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trh.Content = string(content)\n\treturn rh, nil\n}", "func GetAllReleases(ctx context.Context, client models.Client, opts models.ListReleasesOptions) (Releases, error) {\n\tvar (\n\t\tvariables = map[string]interface{}{\n\t\t\t\"cursor\": (*githubv4.String)(nil),\n\t\t\t\"owner\": githubv4.String(opts.Owner),\n\t\t\t\"name\": githubv4.String(opts.Repository),\n\t\t}\n\n\t\treleases = []Release{}\n\t)\n\n\tfor {\n\t\tq := &QueryListReleases{}\n\t\tif err := client.Query(ctx, q, variables); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treleases = append(releases, q.Repository.Releases.Nodes...)\n\t\tif !q.Repository.Releases.PageInfo.HasNextPage {\n\t\t\tbreak\n\t\t}\n\t\tvariables[\"cursor\"] = q.Repository.Releases.PageInfo.EndCursor\n\t}\n\n\treturn releases, nil\n}", "func DeleteRelease(ctx *context.APIContext) {\n\t// swagger:operation DELETE /repos/{owner}/{repo}/releases/{id} repository repoDeleteRelease\n\t// ---\n\t// summary: Delete a release\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: id\n\t// in: path\n\t// description: id of the release to delete\n\t// type: integer\n\t// format: int64\n\t// required: true\n\t// responses:\n\t// \"204\":\n\t// \"$ref\": \"#/responses/empty\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"405\":\n\t// \"$ref\": \"#/responses/empty\"\n\n\tid := ctx.ParamsInt64(\":id\")\n\trel, err := repo_model.GetReleaseByID(ctx, id)\n\tif err != nil && !repo_model.IsErrReleaseNotExist(err) {\n\t\tctx.Error(http.StatusInternalServerError, \"GetReleaseByID\", err)\n\t\treturn\n\t}\n\tif err != nil && repo_model.IsErrReleaseNotExist(err) ||\n\t\trel.IsTag || rel.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\tif err := release_service.DeleteReleaseByID(ctx, id, ctx.Doer, false); err != nil {\n\t\tif models.IsErrProtectedTagName(err) {\n\t\t\tctx.Error(http.StatusMethodNotAllowed, \"delTag\", \"user not allowed to delete protected tag\")\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"DeleteReleaseByID\", err)\n\t\treturn\n\t}\n\tctx.Status(http.StatusNoContent)\n}", "func (c *GitHub) CreateRelease(ctx context.Context, r git.Release) (*git.Release, error) {\n\tc.Logger.Debugf(\"Creating a release %+v\", r)\n\trelease, _, err := c.Client.CreateRelease(ctx, r.ID.Repository.Owner, r.ID.Repository.Name, &github.RepositoryRelease{\n\t\tName: github.String(r.Name),\n\t\tTagName: github.String(r.TagName.Name()),\n\t\tTargetCommitish: github.String(r.TargetCommitish),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"GitHub API error: %w\", err)\n\t}\n\treturn &git.Release{\n\t\tID: git.ReleaseID{\n\t\t\tRepository: r.ID.Repository,\n\t\t\tInternalID: release.GetID(),\n\t\t},\n\t\tTagName: git.TagName(release.GetTagName()),\n\t\tTargetCommitish: release.GetTargetCommitish(),\n\t\tName: release.GetName(),\n\t}, nil\n}", "func (r *ReconcileBuildRun) GetBuildObject(ctx context.Context, objectName string, objectNS string, build *buildv1alpha1.Build) error {\n\tif err := r.client.Get(ctx, types.NamespacedName{Name: objectName, Namespace: objectNS}, build); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (r *Radarr) GetReleaseProfile(profileID int64) (*ReleaseProfile, error) {\n\treturn r.GetReleaseProfileContext(context.Background(), profileID)\n}", "func GetHelmRelease(path string) *HelmRelease {\n\n\tdat, err := ioutil.ReadFile(path)\n\tutils.CheckErrorOrDie(err)\n\n\tmarshaller := MapperYaml{}\n\tapp, err := marshaller.Unmarshall(dat)\n\tutils.CheckErrorOrDie(err)\n\n\treturn app\n}", "func (r *RefsService) GetTag(owner, repoSlug, name string, opts ...interface{}) (*Ref, *Response, error) {\n\tresult := new(Ref)\n\turlStr := r.client.requestURL(\"/repositories/%s/%s/refs/tags/%s\", owner, repoSlug, name)\n\turlStr, addOptErr := addQueryParams(urlStr, opts...)\n\tif addOptErr != nil {\n\t\treturn nil, nil, addOptErr\n\t}\n\n\tresponse, err := r.client.execute(\"GET\", urlStr, result, nil)\n\n\treturn result, response, err\n}", "func (operator *AccessOperator) CreateRelease(cxt context.Context, option *ReleaseOption) (string, error) {\n\tif option == nil {\n\t\treturn \"\", fmt.Errorf(\"Lost create Commit info\")\n\t}\n\n\tgrpcOptions := []grpc.CallOption{\n\t\tgrpc.WaitForReady(true),\n\t}\n\n\t// query business first.\n\tbusiness, app, err := getBusinessAndApp(operator, operator.Business, option.AppName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\trequest := &accessserver.CreateReleaseReq{\n\t\tSeq: pkgcommon.Sequence(),\n\t\tBid: business.Bid,\n\t\tName: option.Name,\n\t\tCommitid: option.CommitID,\n\t\tCreator: operator.User,\n\t}\n\n\t// check strategy for this release.\n\tif len(option.StrategyName) != 0 {\n\t\tstrategy, styerr := operator.innerGetStrategyByID(cxt, business.Bid, app.Appid, option.StrategyName)\n\t\tif styerr != nil {\n\t\t\treturn \"\", styerr\n\t\t}\n\n\t\tif strategy == nil {\n\t\t\tlogger.V(3).Infof(\"CreateRelease: No relative Strategy %s with Release.\", option.StrategyName)\n\t\t\treturn \"\", fmt.Errorf(\"No relative Strategy %s\", option.StrategyName)\n\t\t}\n\t\trequest.Strategyid = strategy.Strategyid\n\t}\n\n\tresponse, err := operator.Client.CreateRelease(cxt, request, grpcOptions...)\n\tif err != nil {\n\t\tlogger.V(3).Infof(\n\t\t\t\"CreateRelease: post new Release %s for App[%s]/Cfgset[%s]/Commit %s failed, %s\",\n\t\t\toption.Name, option.AppName, option.CfgSetName, option.CommitID, err.Error(),\n\t\t)\n\t\treturn \"\", err\n\t}\n\tif response.ErrCode != common.ErrCode_E_OK {\n\t\tlogger.V(3).Infof(\n\t\t\t\"CreateStrategy: post new Release %s for App[%s]/Cfgset[%s]/Commit %s successfully, but reponse failed: %s\",\n\t\t\toption.Name, option.AppName, option.CfgSetName, option.CommitID, response.ErrMsg,\n\t\t)\n\t\treturn \"\", fmt.Errorf(\"%s\", response.ErrMsg)\n\t}\n\tif len(response.Releaseid) == 0 {\n\t\tlogger.V(3).Infof(\"CreateStrategy: BSCP system error, No ReleaseID response\")\n\t\treturn \"\", fmt.Errorf(\"Lost ReleaseID from configuraiotn platform\")\n\t}\n\treturn response.Releaseid, nil\n}", "func (c *Client) CreateRelease(ctx context.Context, req *github.RepositoryRelease) (*github.RepositoryRelease, error) {\n\n\trelease, res, err := c.Repositories.CreateRelease(context.TODO(), c.Owner, c.Repo, req)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create a release\")\n\t}\n\n\tif res.StatusCode != http.StatusCreated {\n\t\treturn nil, errors.Errorf(\"create release: invalid status: %s\", res.Status)\n\t}\n\n\treturn release, nil\n}", "func (c *HTTP) GetManifest(ctx context.Context, release string) (*manifest.A2, error) {\n\turl := fmt.Sprintf(c.manifestURLFmt, release)\n\treturn c.manifestFromURL(ctx, url)\n}", "func (driver donutDriver) GetObject(bucket, object string) (io.ReadCloser, error) {\n\treturn nil, notImplemented()\n}" ]
[ "0.78475475", "0.75622654", "0.73541814", "0.7291853", "0.7269361", "0.7257871", "0.70396906", "0.7035385", "0.6988299", "0.695543", "0.67087036", "0.67073405", "0.665561", "0.66521287", "0.6647988", "0.6646991", "0.65817267", "0.64389217", "0.64276576", "0.6333727", "0.62475353", "0.6187226", "0.6182427", "0.6119821", "0.60657173", "0.60510945", "0.6026627", "0.59634393", "0.5928837", "0.58511084", "0.58330697", "0.58187616", "0.5774398", "0.5755584", "0.57437927", "0.5726849", "0.5726027", "0.5725593", "0.57196796", "0.5717888", "0.56832063", "0.5611171", "0.5608011", "0.560732", "0.56058556", "0.5602629", "0.55966216", "0.5576", "0.5571722", "0.5557117", "0.5554288", "0.5537108", "0.5522195", "0.5508259", "0.5490083", "0.5430867", "0.542474", "0.5387621", "0.53810996", "0.53572667", "0.53458804", "0.5345165", "0.5332892", "0.5327629", "0.53224456", "0.5315449", "0.53127927", "0.5312231", "0.5310549", "0.5308318", "0.5307627", "0.52911294", "0.52841043", "0.52645975", "0.52396786", "0.5235141", "0.52320325", "0.523184", "0.5230847", "0.52249366", "0.52128613", "0.52000105", "0.51845354", "0.51842374", "0.51778764", "0.5173289", "0.51631606", "0.51591206", "0.51588047", "0.5146175", "0.5132975", "0.5127322", "0.5120098", "0.5114741", "0.509415", "0.507948", "0.50698435", "0.50645816", "0.5060677", "0.5056508" ]
0.71325344
6
EditRelease edit a release object within the GitHub API
func (c *Client) EditRelease(ctx context.Context, releaseID int64, req *github.RepositoryRelease) (*github.RepositoryRelease, error) { var release *github.RepositoryRelease err := retry.Retry(3, 3*time.Second, func() error { var ( res *github.Response err error ) release, res, err = c.Repositories.EditRelease(context.TODO(), c.Owner, c.Repo, releaseID, req) if err != nil { return errors.Wrapf(err, "failed to edit release: %d", releaseID) } if res.StatusCode != http.StatusOK { return errors.Errorf("edit release: invalid status: %s", res.Status) } return nil }) return release, err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func EditRelease(ctx *context.APIContext) {\n\t// swagger:operation PATCH /repos/{owner}/{repo}/releases/{id} repository repoEditRelease\n\t// ---\n\t// summary: Update a release\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: id\n\t// in: path\n\t// description: id of the release to edit\n\t// type: integer\n\t// format: int64\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/EditReleaseOption\"\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/Release\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\n\tform := web.GetForm(ctx).(*api.EditReleaseOption)\n\tid := ctx.ParamsInt64(\":id\")\n\trel, err := repo_model.GetReleaseByID(ctx, id)\n\tif err != nil && !repo_model.IsErrReleaseNotExist(err) {\n\t\tctx.Error(http.StatusInternalServerError, \"GetReleaseByID\", err)\n\t\treturn\n\t}\n\tif err != nil && repo_model.IsErrReleaseNotExist(err) ||\n\t\trel.IsTag || rel.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tif len(form.TagName) > 0 {\n\t\trel.TagName = form.TagName\n\t}\n\tif len(form.Target) > 0 {\n\t\trel.Target = form.Target\n\t}\n\tif len(form.Title) > 0 {\n\t\trel.Title = form.Title\n\t}\n\tif len(form.Note) > 0 {\n\t\trel.Note = form.Note\n\t}\n\tif form.IsDraft != nil {\n\t\trel.IsDraft = *form.IsDraft\n\t}\n\tif form.IsPrerelease != nil {\n\t\trel.IsPrerelease = *form.IsPrerelease\n\t}\n\tif err := release_service.UpdateRelease(ctx.Doer, ctx.Repo.GitRepo, rel, nil, nil, nil); err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"UpdateRelease\", err)\n\t\treturn\n\t}\n\n\t// reload data from database\n\trel, err = repo_model.GetReleaseByID(ctx, id)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetReleaseByID\", err)\n\t\treturn\n\t}\n\tif err := rel.LoadAttributes(ctx); err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"LoadAttributes\", err)\n\t\treturn\n\t}\n\tctx.JSON(http.StatusOK, convert.ToAPIRelease(ctx, ctx.Repo.Repository, rel))\n}", "func (s *ReleaseService) UpdateRelease(id uint, r *Release, t ReleaseType, authToken string) (*Release, error) {\n\tvar (\n\t\tmethod = http.MethodPatch\n\t\tpath = fmt.Sprintf(\"/releases/%d\", id)\n\t)\n\treq := s.client.newRequest(path, method)\n\taddJWTToRequest(req, authToken)\n\tr.Type = t\n\terr := addBodyToRequestAsJSON(req, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn s.updateRelease(req)\n}", "func (operator *AccessOperator) UpdateRelease(cxt context.Context, option *ReleaseOption) error {\n\t//business first\n\tbusiness, _, err := getBusinessAndApp(operator, operator.Business, option.AppName)\n\tif err != nil {\n\t\treturn err\n\t}\n\trequest := &accessserver.UpdateReleaseReq{\n\t\tSeq: pkgcommon.Sequence(),\n\t\tBid: business.Bid,\n\t\tReleaseid: option.ReleaseID,\n\t\tName: option.Name,\n\t\tOperator: operator.User,\n\t}\n\tgrpcOptions := []grpc.CallOption{\n\t\tgrpc.WaitForReady(true),\n\t}\n\tresponse, err := operator.Client.UpdateRelease(cxt, request, grpcOptions...)\n\tif err != nil {\n\t\tlogger.V(3).Infof(\"UpdateRelease %s failed, %s\", option.Name, err.Error())\n\t\treturn err\n\t}\n\tif response.ErrCode != common.ErrCode_E_OK {\n\t\tlogger.V(3).Infof(\"UpdateRelease %s successfully, but response Err, %s\", option.ReleaseID, response.ErrMsg)\n\t\treturn fmt.Errorf(\"%s\", response.ErrMsg)\n\t}\n\treturn nil\n\n}", "func (a *Client) UpdateRelease(params *UpdateReleaseParams, authInfo runtime.ClientAuthInfoWriter) (*UpdateReleaseOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewUpdateReleaseParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"Update Release\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/{organization}/{project}/_apis/release/releases/{releaseId}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &UpdateReleaseReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*UpdateReleaseOK), nil\n\n}", "func (a *Client) UpdateReleaseResource(params *UpdateReleaseResourceParams, authInfo runtime.ClientAuthInfoWriter) (*UpdateReleaseResourceOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewUpdateReleaseResourceParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"Update Release Resource\",\n\t\tMethod: \"PATCH\",\n\t\tPathPattern: \"/{organization}/{project}/_apis/release/releases/{releaseId}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &UpdateReleaseResourceReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*UpdateReleaseResourceOK), nil\n\n}", "func (p Database) Edit(d interface{}) (string, error) {\n\tjsonBuf, err := json.Marshal(d)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tidRev := idAndRev{}\n\tmust(json.Unmarshal(jsonBuf, &idRev))\n\tif idRev.ID == \"\" {\n\t\treturn \"\", errNoID\n\t}\n\tif idRev.Rev == \"\" {\n\t\treturn \"\", errNoRev\n\t}\n\tu := fmt.Sprintf(\"%s/%s\", p.DBURL(), url.QueryEscape(idRev.ID))\n\tir := Response{}\n\tif _, err = interact(\"PUT\", u, p.defaultHdrs, jsonBuf, &ir); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ir.Rev, nil\n}", "func Release(version, commit, date string) {\n\tif version == \"\" {\n\t\tversion = \"dev\"\n\t} else if version[0] == 'v' {\n\t\tversion = version[1:]\n\t}\n\tif commit == \"\" {\n\t\tcommit = \"-\"\n\t}\n\tif date == \"\" {\n\t\tdate = \"-\"\n\t}\n\tVersion, Commit, Date = version, commit, date\n}", "func (c *gitlabClient) CreateRelease(ctx *context.Context, body string) (releaseID string, err error) {\n\ttitle, err := tmpl.New(ctx).Apply(ctx.Config.Release.NameTemplate)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tprojectID := ctx.Config.Release.GitLab.Owner + \"/\" + ctx.Config.Release.GitLab.Name\n\tlog.WithFields(log.Fields{\n\t\t\"owner\": ctx.Config.Release.GitLab.Owner,\n\t\t\"name\": ctx.Config.Release.GitLab.Name,\n\t}).Debug(\"projectID\")\n\n\tname := title\n\ttagName := ctx.Git.CurrentTag\n\trelease, resp, err := c.client.Releases.GetRelease(projectID, tagName)\n\tif err != nil && resp.StatusCode != 403 {\n\t\treturn \"\", err\n\t}\n\n\tif resp.StatusCode == 403 {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"err\": err.Error(),\n\t\t}).Debug(\"get release\")\n\n\t\tdescription := body\n\t\tref := ctx.Git.Commit\n\t\tgitURL := ctx.Git.URL\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"name\": name,\n\t\t\t\"description\": description,\n\t\t\t\"ref\": ref,\n\t\t\t\"url\": gitURL,\n\t\t}).Debug(\"creating release\")\n\t\trelease, _, err = c.client.Releases.CreateRelease(projectID, &gitlab.CreateReleaseOptions{\n\t\t\tName: &name,\n\t\t\tDescription: &description,\n\t\t\tRef: &ref,\n\t\t\tTagName: &tagName,\n\t\t})\n\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"err\": err.Error(),\n\t\t\t}).Debug(\"error create release\")\n\t\t\treturn \"\", err\n\t\t}\n\t\tlog.WithField(\"name\", release.Name).Info(\"release created\")\n\t} else {\n\t\tdesc := body\n\t\tif release != nil && release.DescriptionHTML != \"\" {\n\t\t\tdesc = release.DescriptionHTML\n\t\t}\n\n\t\trelease, _, err = c.client.Releases.UpdateRelease(projectID, tagName, &gitlab.UpdateReleaseOptions{\n\t\t\tName: &name,\n\t\t\tDescription: &desc,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"err\": err.Error(),\n\t\t\t}).Debug(\"error update release\")\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tlog.WithField(\"name\", release.Name).Info(\"release updated\")\n\t}\n\n\treturn tagName, err // gitlab references a tag in a repo by its name\n}", "func (s *Services) Release(ctx context.Context, request *proto.ReleaseRequest) (*proto.ReleaseResponse, error) {\n\tvar result models.Release\n\tquery := s.DB\n\n\tif request.Id != 0 {\n\t\tquery = query.Where(\"id = ?\", request.Id)\n\t}\n\n\tif err := query.First(&result).Error; err != nil {\n\n\t\t// If nothing was found\n\t\tif gorm.IsRecordNotFoundError(err) {\n\t\t\treturn &proto.ReleaseResponse{Release: nil}, nil\n\t\t}\n\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\n\treturn &proto.ReleaseResponse{Release: result.ToProto()}, nil\n}", "func (g *GHR) CreateRelease(ctx context.Context, req *github.RepositoryRelease, recreate bool) (*github.RepositoryRelease, error) {\n\n\t// When draft release creation is requested,\n\t// create it without any check (it can).\n\tif *req.Draft {\n\t\tfmt.Fprintln(g.outStream, \"==> Create a draft release\")\n\t\treturn g.GitHub.CreateRelease(ctx, req)\n\t}\n\n\t// Always create release as draft first. After uploading assets, turn off\n\t// draft unless the `-draft` flag is explicitly specified.\n\t// It is to prevent users from seeing empty release.\n\treq.Draft = github.Bool(true)\n\n\t// Check release exists.\n\t// If release is not found, then create a new release.\n\trelease, err := g.GitHub.GetRelease(ctx, *req.TagName)\n\tif err != nil {\n\t\tif !errors.Is(err, ErrReleaseNotFound) {\n\t\t\treturn nil, fmt.Errorf(\"failed to get release: %w\", err)\n\t\t}\n\t\tDebugf(\"Release (with tag %s) not found: create a new one\",\n\t\t\t*req.TagName)\n\n\t\tif recreate {\n\t\t\tfmt.Fprintf(g.outStream,\n\t\t\t\t\"WARNING: '-recreate' is specified but release (%s) not found\",\n\t\t\t\t*req.TagName)\n\t\t}\n\n\t\tfmt.Fprintln(g.outStream, \"==> Create a new release\")\n\t\treturn g.GitHub.CreateRelease(ctx, req)\n\t}\n\n\t// recreate is not true. Then use that existing release.\n\tif !recreate {\n\t\tDebugf(\"Release (with tag %s) exists: use existing one\",\n\t\t\t*req.TagName)\n\n\t\tfmt.Fprintf(g.outStream, \"WARNING: found release (%s). Use existing one.\\n\",\n\t\t\t*req.TagName)\n\t\treturn release, nil\n\t}\n\n\t// When recreate is requested, delete existing release and create a\n\t// new release.\n\tfmt.Fprintln(g.outStream, \"==> Recreate a release\")\n\tif err := g.DeleteRelease(ctx, *release.ID, *req.TagName); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn g.GitHub.CreateRelease(ctx, req)\n}", "func (h *handler) Release(ctx context.Context, evt *github.ReleaseEvent) error {\n\tif evt.GetAction() != \"released\" {\n\t\tlogrus.WithField(\"action\", evt.GetAction()).Info(\"ignoring release event\")\n\t\treturn nil\n\t}\n\tnotifyRepos := h.cfg.ReleaseDispatchRepos()\n\tlogrus.WithField(\"repos\", len(notifyRepos)).Info(\"notifying repositories of release\")\n\tif len(notifyRepos) == 0 {\n\t\treturn nil\n\t}\n\n\tgh := repo.NewGitHubClient(h.cfg.GitHubToken)\n\tfeedbackIssue, err := releaseFeedbackIssue(ctx, gh, evt, notifyRepos)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogrus.WithField(\"issue_number\", feedbackIssue.Number).Debug(\"created feedback issue\")\n\n\tdispatchOpts, err := h.releaseDispatchOptions(evt, feedbackIssue)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, notifyRepo := range notifyRepos {\n\t\tnotifyRepoParts := strings.SplitN(notifyRepo, \"/\", 2)\n\t\towner := notifyRepoParts[0]\n\t\tname := notifyRepoParts[1]\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"owner\": owner,\n\t\t\t\"name\": name,\n\t\t}).Debug(\"dispatching release to repository\")\n\t\tif _, _, err := gh.Repositories.Dispatch(ctx, owner, name, dispatchOpts); err != nil {\n\t\t\tlogrus.WithError(err).Warn(\"error dispatching update\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *FakeClient) UpdateRelease(rlsName, chStr string, opts ...UpdateOption) (*release.Release, error) {\n\treturn c.UpdateReleaseFromChart(rlsName, &chart.Chart{}, opts...)\n}", "func EditPost(req *http.Request, params martini.Params, res render.Render) {\n\tvar post Post\n\tpost.Slug = params[\"slug\"]\n\tpost, err := post.Get()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tres.JSON(500, map[string]interface{}{\"error\": \"Internal server error\"})\n\t\treturn\n\t}\n\tres.HTML(200, \"post/edit\", post)\n}", "func runReleaseCmd(cmd *cobra.Command, args []string) {\n\tconfigFile, _ := cmd.Flags().GetString(\"config\")\n\tconfig := &config.Config{}\n\terr := config.Load(configFile)\n\tif err != nil {\n\t\tfmt.Printf(\"could not load config file: %v\\n\", err)\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\n\tspinner, err := initSpinner(fmt.Sprintf(\"Releasing v%s of %s\", args[0], config.Repository))\n\tif err != nil {\n\t\tfmt.Println(\"could not init spinner\")\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\tspinner.Start()\n\n\tnewRelease, err := github.NewRelease(config, args, spinner)\n\tif err != nil {\n\t\tspinner.StopFailMessage(fmt.Sprintf(\"%v\", err))\n\t\tspinner.StopFail()\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\n\tcl, err := changelog.HandleChangelog(newRelease.ProjectName, newRelease.Version, newRelease.Date, spinner)\n\tif err != nil {\n\t\tspinner.StopFailMessage(fmt.Sprintf(\"%v\", err))\n\t\tspinner.StopFail()\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\n\tnewRelease.Changelog = cl\n\n\tvar binaryPath string\n\tskipBinary, _ := cmd.Flags().GetBool(\"skipBinary\")\n\tif !skipBinary {\n\t\t// set project build path so we have a predictable location\n\t\tbinaryPath = fmt.Sprintf(binaryPathFmt, newRelease.ProjectName, newRelease.Version)\n\t\trunBuildCmd(cmd, []string{newRelease.Version, binaryPath})\n\t}\n\n\ttokenFile, _ := cmd.Flags().GetString(\"tokenFile\")\n\terr = newRelease.CreateGithubRelease(tokenFile, binaryPath, spinner)\n\tif err != nil {\n\t\tspinner.StopFailMessage(fmt.Sprintf(\"%v\", err))\n\t\tspinner.StopFail()\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\n\tspinner.Suffix(\" Finished release\")\n\tspinner.Stop()\n}", "func (s *ReleaseServer) UpdateRelease(c ctx.Context, req *services.UpdateReleaseRequest) (*services.UpdateReleaseResponse, error) {\n\tif err := validateReleaseName(req.Name); err != nil {\n\t\ts.Log(\"updateRelease: Release name is invalid: %s\", req.Name)\n\t\treturn nil, err\n\t}\n\ts.Log(\"preparing update for %s\", req.Name)\n\tcurrentRelease, updatedRelease, err := s.prepareUpdate(req)\n\tif err != nil {\n\t\tif req.Force {\n\t\t\t// Use the --force, Luke.\n\t\t\treturn s.performUpdateForce(req)\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tif !req.DryRun {\n\t\ts.Log(\"creating updated release for %s\", req.Name)\n\t\tif err := s.env.Releases.Create(updatedRelease); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\ts.Log(\"performing update for %s\", req.Name)\n\tres, err := s.performUpdate(currentRelease, updatedRelease, req)\n\tif err != nil {\n\t\treturn res, err\n\t}\n\n\tif !req.DryRun {\n\t\ts.Log(\"updating status for updated release for %s\", req.Name)\n\t\tif err := s.env.Releases.Update(updatedRelease); err != nil {\n\t\t\treturn res, err\n\t\t}\n\t}\n\n\treturn res, nil\n}", "func (c *client) EditPullRequest(org, repo string, number int, pr *PullRequest) (*PullRequest, error) {\n\tdurationLogger := c.log(\"EditPullRequest\", org, repo, number)\n\tdefer durationLogger()\n\n\tif c.dry {\n\t\treturn pr, nil\n\t}\n\tedit := struct {\n\t\tTitle string `json:\"title,omitempty\"`\n\t\tBody string `json:\"body,omitempty\"`\n\t\tState string `json:\"state,omitempty\"`\n\t}{\n\t\tTitle: pr.Title,\n\t\tBody: pr.Body,\n\t\tState: pr.State,\n\t}\n\tvar ret PullRequest\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodPatch,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/pulls/%d\", org, repo, number),\n\t\torg: org,\n\t\texitCodes: []int{200},\n\t\trequestBody: &edit,\n\t}, &ret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ret, nil\n}", "func (r *RLS) CreateRelease(ctx context.Context, req *github.RepositoryRelease, recreate bool) (*github.RepositoryRelease, error) {\n\n\t// When draft release creation is requested,\n\t// create it without any check (it can).\n\tif *req.Draft {\n\t\tfmt.Fprintln(r.outStream, \"==> Create a draft release\")\n\t\treturn r.GitHub.CreateRelease(ctx, req)\n\t}\n\n\t// Check release exists.\n\t// If release is not found, then create a new release.\n\trelease, err := r.GitHub.GetRelease(ctx, *req.TagName)\n\tif err != nil {\n\t\tif err != RelaseNotFound {\n\t\t\treturn nil, errors.Wrap(err, \"failed to get release\")\n\t\t}\n\t\tif recreate {\n\t\t\tfmt.Fprintf(r.outStream,\n\t\t\t\t\"WARNING: '-recreate' is specified but release (%s) not found\",\n\t\t\t\t*req.TagName)\n\t\t}\n\n\t\tfmt.Fprintln(r.outStream, \"==> Create a new release\")\n\t\treturn r.GitHub.CreateRelease(ctx, req)\n\t}\n\n\t// recreate is not true. Then use that existing release.\n\tif !recreate {\n\t\tfmt.Fprintf(r.outStream, \"WARNING: found release (%s). Use existing one.\\n\",\n\t\t\t*req.TagName)\n\t\treturn release, nil\n\t}\n\n\t// When recreate is requested, delete existing release and create a\n\t// new release.\n\tfmt.Fprintln(r.outStream, \"==> Recreate a release\")\n\tif err := r.DeleteRelease(ctx, *release.ID, *req.TagName); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r.GitHub.CreateRelease(ctx, req)\n}", "func (up *Updater) UpdateTo(rel *Release, cmdPath string) error {\n\tvar client http.Client\n\tsrc, redirectURL, err := up.api.Repositories.DownloadReleaseAsset(up.apiCtx, rel.RepoOwner, rel.RepoName, rel.AssetID, &client)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to call GitHub Releases API for getting an asset(ID: %d) for repository '%s/%s': %s\", rel.AssetID, rel.RepoOwner, rel.RepoName, err)\n\t}\n\tif redirectURL != \"\" {\n\t\tlog.Println(\"Redirect URL was returned while trying to download a release asset from GitHub API. Falling back to downloading from asset URL directly:\", redirectURL)\n\t\tsrc, err = up.downloadDirectlyFromURL(redirectURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tdefer src.Close()\n\n\tdata, err := ioutil.ReadAll(src)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed reading asset body: %v\", err)\n\t}\n\n\tif up.validator == nil {\n\t\treturn uncompressAndUpdate(bytes.NewReader(data), rel.AssetURL, cmdPath)\n\t}\n\n\tvalidationSrc, validationRedirectURL, err := up.api.Repositories.DownloadReleaseAsset(up.apiCtx, rel.RepoOwner, rel.RepoName, rel.ValidationAssetID, &client)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to call GitHub Releases API for getting an validation asset(ID: %d) for repository '%s/%s': %s\", rel.ValidationAssetID, rel.RepoOwner, rel.RepoName, err)\n\t}\n\tif validationRedirectURL != \"\" {\n\t\tlog.Println(\"Redirect URL was returned while trying to download a release validation asset from GitHub API. Falling back to downloading from asset URL directly:\", redirectURL)\n\t\tvalidationSrc, err = up.downloadDirectlyFromURL(validationRedirectURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tdefer validationSrc.Close()\n\n\tvalidationData, err := ioutil.ReadAll(validationSrc)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed reading validation asset body: %v\", err)\n\t}\n\n\tif err := up.validator.Validate(data, validationData); err != nil {\n\t\treturn fmt.Errorf(\"Failed validating asset content: %v\", err)\n\t}\n\n\treturn uncompressAndUpdate(bytes.NewReader(data), rel.AssetURL, cmdPath)\n}", "func patchRelease(f *os.File, info *ReleaseInfo) {\n\t// Release note for different labels\n\tf.WriteString(fmt.Sprintf(\"## Changelog since %s\\n\\n\", info.startTag))\n\n\tif len(info.releaseActionRequiredPRs) > 0 {\n\t\tf.WriteString(\"### Action Required\\n\\n\")\n\t\tfor _, pr := range info.releaseActionRequiredPRs {\n\t\t\tf.WriteString(fmt.Sprintf(\"* %s (#%d, @%s)\\n\", extractReleaseNoteFromPR(info.prMap[pr]), pr, *info.prMap[pr].User.Login))\n\t\t}\n\t\tf.WriteString(\"\\n\")\n\t}\n\n\tif len(info.releasePRs) > 0 {\n\t\tf.WriteString(\"### Other notable changes\\n\\n\")\n\t\tfor _, pr := range info.releasePRs {\n\t\t\tf.WriteString(fmt.Sprintf(\"* %s (#%d, @%s)\\n\", extractReleaseNoteFromPR(info.prMap[pr]), pr, *info.prMap[pr].User.Login))\n\t\t}\n\t\tf.WriteString(\"\\n\")\n\t} else {\n\t\tf.WriteString(\"**No notable changes for this release**\\n\\n\")\n\t}\n}", "func (ac *ArticleController) Edit(w http.ResponseWriter, r *http.Request) {\n\t// u := userContext(r.Context())\n\t// debug: dummy user\n\tctx := r.Context()\n\tu := models.UserContext(ctx)\n\tif u.IsAdmin {\n\t\tp := httptreemux.ContextParams(ctx)\n\n\t\tidParam, _ := strconv.Atoi(p[\"id\"])\n\t\tif idParam <= 0 { // conversion failed or bad input\n\t\t\tsendJSON(\"Input not valid\", http.StatusBadRequest, w)\n\t\t\treturn\n\t\t}\n\n\t\tid := uint(idParam)\n\t\ttitle := r.FormValue(\"title\")\n\t\ttext := r.FormValue(\"text\")\n\n\t\ta := models.ArticleUpdate(id, title, text)\n\t\tif a.ID == 0 { // Something went wrong\n\t\t\tsendJSON(\"Error: impossible to edit article\", http.StatusInternalServerError, w)\n\t\t\treturn\n\t\t}\n\n\t\turl := r.URL.EscapedPath()\n\t\tcache.RemoveURL(url)\n\n\t\tw.Header().Set(\"Content-Location\", url)\n\t\tsendJSON(a, http.StatusOK, w)\n\t} else {\n\t\tsendJSON(\"You are not admin\", http.StatusForbidden, w)\n\t}\n}", "func (r *ReleaseModuleServiceServer) RollbackRelease(ctx context.Context, in *rudderAPI.RollbackReleaseRequest) (*rudderAPI.RollbackReleaseResponse, error) {\n\tgrpclog.Print(\"rollback\")\n\tc := bytes.NewBufferString(in.Current.Manifest)\n\tt := bytes.NewBufferString(in.Target.Manifest)\n\terr := kubeClient.Update(in.Target.Namespace, c, t, in.Force, in.Recreate, in.Timeout, in.Wait)\n\treturn &rudderAPI.RollbackReleaseResponse{}, err\n}", "func CreateRelease(res http.ResponseWriter, req *http.Request) {\n\tres.Header().Set(\"Content-Type\", \"application/json\")\n\tc := Release{\"relid\", \"http://ispw:8080/ispw/ispw/releases/relid\"}\n\toutgoingJSON, err := json.Marshal(c)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(res, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tres.WriteHeader(http.StatusCreated)\n\tfmt.Fprint(res, string(outgoingJSON))\n}", "func (s *Server) HandleRelease(w http.ResponseWriter, r *http.Request) {\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar release *shared.ReleaseEvent\n\terr = json.Unmarshal(body, &release)\n\tif err != nil {\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Tag the request with an ID for tracing in the logs.\n\trelease.RequestID = nuid.Next()\n\tfmt.Println(release)\n\n\t// Publish event to the NATS server\n\tnc := s.NATS()\n\t\n\trelease.RequestID = nuid.Next()\n\trelease_event := shared.ReleaseEvent{release.ID, release.Time, release.NextState, release.PostMedication, release.Notes, release.RequestID}\n\trel_event, err := json.Marshal(release_event)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\n\tlog.Printf(\"requestID:%s - Publishing inspection event with patientID %d\\n\", release.RequestID, release.ID)\n\t// Publishing the message to NATS Server\n\tnc.Publish(\"patient.release\", rel_event)\n\n\tjson.NewEncoder(w).Encode(\"Release event published\")\n}", "func CreateRelease(ctx *context.APIContext) {\n\t// swagger:operation POST /repos/{owner}/{repo}/releases repository repoCreateRelease\n\t// ---\n\t// summary: Create a release\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/CreateReleaseOption\"\n\t// responses:\n\t// \"201\":\n\t// \"$ref\": \"#/responses/Release\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"409\":\n\t// \"$ref\": \"#/responses/error\"\n\tform := web.GetForm(ctx).(*api.CreateReleaseOption)\n\trel, err := repo_model.GetRelease(ctx.Repo.Repository.ID, form.TagName)\n\tif err != nil {\n\t\tif !repo_model.IsErrReleaseNotExist(err) {\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetRelease\", err)\n\t\t\treturn\n\t\t}\n\t\t// If target is not provided use default branch\n\t\tif len(form.Target) == 0 {\n\t\t\tform.Target = ctx.Repo.Repository.DefaultBranch\n\t\t}\n\t\trel = &repo_model.Release{\n\t\t\tRepoID: ctx.Repo.Repository.ID,\n\t\t\tPublisherID: ctx.Doer.ID,\n\t\t\tPublisher: ctx.Doer,\n\t\t\tTagName: form.TagName,\n\t\t\tTarget: form.Target,\n\t\t\tTitle: form.Title,\n\t\t\tNote: form.Note,\n\t\t\tIsDraft: form.IsDraft,\n\t\t\tIsPrerelease: form.IsPrerelease,\n\t\t\tIsTag: false,\n\t\t\tRepo: ctx.Repo.Repository,\n\t\t}\n\t\tif err := release_service.CreateRelease(ctx.Repo.GitRepo, rel, nil, \"\"); err != nil {\n\t\t\tif repo_model.IsErrReleaseAlreadyExist(err) {\n\t\t\t\tctx.Error(http.StatusConflict, \"ReleaseAlreadyExist\", err)\n\t\t\t} else {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"CreateRelease\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !rel.IsTag {\n\t\t\tctx.Error(http.StatusConflict, \"GetRelease\", \"Release is has no Tag\")\n\t\t\treturn\n\t\t}\n\n\t\trel.Title = form.Title\n\t\trel.Note = form.Note\n\t\trel.IsDraft = form.IsDraft\n\t\trel.IsPrerelease = form.IsPrerelease\n\t\trel.PublisherID = ctx.Doer.ID\n\t\trel.IsTag = false\n\t\trel.Repo = ctx.Repo.Repository\n\t\trel.Publisher = ctx.Doer\n\t\trel.Target = form.Target\n\n\t\tif err = release_service.UpdateRelease(ctx.Doer, ctx.Repo.GitRepo, rel, nil, nil, nil); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"UpdateRelease\", err)\n\t\t\treturn\n\t\t}\n\t}\n\tctx.JSON(http.StatusCreated, convert.ToAPIRelease(ctx, ctx.Repo.Repository, rel))\n}", "func (a *Agent) RollbackRelease(\n\tctx context.Context,\n\tname string,\n\tversion int,\n) error {\n\tctx, span := telemetry.NewSpan(ctx, \"helm-rollback-release\")\n\tdefer span.End()\n\n\ttelemetry.WithAttributes(span,\n\t\ttelemetry.AttributeKV{Key: \"name\", Value: name},\n\t\ttelemetry.AttributeKV{Key: \"version\", Value: version},\n\t)\n\n\tcmd := action.NewRollback(a.ActionConfig)\n\tcmd.Version = version\n\treturn cmd.Run(name)\n}", "func (r *SoftwareResource) Edit(id string, item SoftwareConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+SoftwareEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func RepoUpdate(w http.ResponseWriter, r *http.Request, u *User, repo *Repo) error {\n\tswitch r.FormValue(\"action\") {\n\tcase \"params\":\n\t\trepo.Params = map[string]string{}\n\t\tif err := goyaml.Unmarshal([]byte(r.FormValue(\"params\")), &repo.Params); err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\trepo.URL = r.FormValue(\"URL\")\n\t\trepo.Disabled = len(r.FormValue(\"Disabled\")) == 0\n\t\trepo.DisabledPullRequest = len(r.FormValue(\"DisabledPullRequest\")) == 0\n\t\trepo.Private = len(r.FormValue(\"Private\")) > 0\n\t\trepo.Privileged = u.Admin && len(r.FormValue(\"Privileged\")) > 0\n\n\t\t// value of \"\" indicates the currently authenticated user\n\t\t// should be set as the administrator.\n\t\tif len(r.FormValue(\"Owner\")) == 0 {\n\t\t\trepo.UserID = u.ID\n\t\t\trepo.TeamID = 0\n\t\t} else {\n\t\t\t// else the user has chosen a team\n\t\t\tteam, err := database.GetTeamSlug(r.FormValue(\"Owner\"))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// verify the user is a member of the team\n\t\t\tif member, _ := database.IsMemberAdmin(u.ID, team.ID); !member {\n\t\t\t\treturn fmt.Errorf(\"Forbidden\")\n\t\t\t}\n\n\t\t\t// set the team ID\n\t\t\trepo.TeamID = team.ID\n\t\t}\n\t}\n\n\t// save the page\n\tif err := database.SaveRepo(repo); err != nil {\n\t\treturn err\n\t}\n\n\thttp.Redirect(w, r, r.URL.Path, http.StatusSeeOther)\n\treturn nil\n}", "func (v FaturasResource) Edit(c buffalo.Context) error {\n\treturn c.Error(404, errors.New(\"not available\"))\n}", "func rollbackRelease(c *gin.Context, r *api.HelmRelease) error {\n\tlogEntry := log.ReqEntry(c).\n\t\tWithField(\"cluster\", r.Cluster).WithField(\"namespace\", r.Namespace).WithField(\"releaseName\", r.Name)\n\n\tlogEntry.Debugf(\"getting helm action config...\")\n\trollbackConfig, err := generateHelmActionConfig(r.Cluster, r.Namespace, logEntry)\n\tif err != nil {\n\t\tlogEntry.WithField(\"error\", err).Warningf(\"failed to generate configuration for helm action\")\n\t\treturn err\n\t}\n\trollbackAction := action.NewRollback(rollbackConfig)\n\trollbackAction.Version = int(r.Revision)\n\terr = rollbackAction.Run(r.Name)\n\tif err != nil {\n\t\tlogEntry.WithField(\"error\", err).Warningf(\"failed to run rollback action\")\n\t}\n\treturn nil\n}", "func minorRelease(f *os.File, release, draftURL, changelogURL string) {\n\t// Check for draft and use it if available\n\tlog.Printf(\"Checking if draft release notes exist for %s...\", release)\n\n\tresp, err := http.Get(draftURL)\n\tif err == nil {\n\t\tdefer resp.Body.Close()\n\t}\n\n\tif err == nil && resp.StatusCode == 200 {\n\t\tlog.Print(\"Draft found - using for release notes...\")\n\t\t_, err = io.Copy(f, resp.Body)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error during copy to file: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tf.WriteString(\"\\n\")\n\t} else {\n\t\tlog.Print(\"Failed to find draft - creating generic template... (error message/status code printed below)\")\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error message: %v\", err)\n\t\t} else {\n\t\t\tlog.Printf(\"Response status code: %d\", resp.StatusCode)\n\t\t}\n\t\tf.WriteString(\"## Major Themes\\n\\n* TBD\\n\\n## Other notable improvements\\n\\n* TBD\\n\\n## Known Issues\\n\\n* TBD\\n\\n## Provider-specific Notes\\n\\n* TBD\\n\\n\")\n\t}\n\n\t// Aggregate all previous release in series\n\tf.WriteString(fmt.Sprintf(\"### Previous Release Included in %s\\n\\n\", release))\n\n\t// Regexp Example:\n\t// Assume the release tag is v1.7.0, this regexp matches \"- [v1.7.0-\" in\n\t// \"- [v1.7.0-rc.1](#v170-rc1)\"\n\t// \"- [v1.7.0-beta.2](#v170-beta2)\"\n\t// \"- [v1.7.0-alpha.3](#v170-alpha3)\"\n\treAnchor, _ := regexp.Compile(fmt.Sprintf(\"- \\\\[%s-\", release))\n\n\tresp, err = http.Get(changelogURL)\n\tif err == nil {\n\t\tdefer resp.Body.Close()\n\t}\n\n\tif err == nil && resp.StatusCode == 200 {\n\t\tbuf := new(bytes.Buffer)\n\t\tbuf.ReadFrom(resp.Body)\n\t\tfor _, line := range strings.Split(buf.String(), \"\\n\") {\n\t\t\tif anchor := reAnchor.FindStringSubmatch(line); anchor != nil {\n\t\t\t\tf.WriteString(line + \"\\n\")\n\t\t\t}\n\t\t}\n\t\tf.WriteString(\"\\n\")\n\t} else {\n\t\tlog.Print(\"Failed to fetch past changelog for minor release - continuing... (error message/status code printed below)\")\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error message: %v\", err)\n\t\t} else {\n\t\t\tlog.Printf(\"Response status code: %d\", resp.StatusCode)\n\t\t}\n\t}\n}", "func Edit(w http.ResponseWriter, r *http.Request){\n\tidDoProduto := r.URL.Query().Get(\"id\")\n\tproduto := models.EditaProduto(idDoProduto)\n\ttemp.ExecuteTemplate(w, \"Edit\", produto)\n}", "func (c *Client) EditPullRequest(owner, repo string, index int64, opt EditPullRequestOption) (*PullRequest, *Response, error) {\n\tif err := escapeValidatePathSegments(&owner, &repo); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif err := opt.Validate(c); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tbody, err := json.Marshal(&opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tpr := new(PullRequest)\n\tresp, err := c.getParsedResponse(\"PATCH\",\n\t\tfmt.Sprintf(\"/repos/%s/%s/pulls/%d\", owner, repo, index),\n\t\tjsonHeader, bytes.NewReader(body), pr)\n\treturn pr, resp, err\n}", "func (p Database) EditWith(d interface{}, id, rev string) (string, error) {\n\tif id == \"\" {\n\t\treturn \"\", errNoID\n\t}\n\tif rev == \"\" {\n\t\treturn \"\", errNoRev\n\t}\n\tjsonBuf, err := json.Marshal(d)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tm := map[string]interface{}{}\n\tmust(json.Unmarshal(jsonBuf, &m))\n\tm[\"_id\"] = id\n\tm[\"_rev\"] = rev\n\treturn p.Edit(m)\n}", "func (s *ReleaseService) AddTextRelease(r *Release, authToken string) (*Release, error) {\n\tvar (\n\t\tmethod = http.MethodPost\n\t\tpath = fmt.Sprintf(\"/releases\")\n\t)\n\treq := s.client.newRequest(path, method)\n\taddJWTToRequest(req, authToken)\n\tr.Type = Text\n\terr := addBodyToRequestAsJSON(req, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn s.addRelease(req)\n}", "func (gauo *GithubAssetUpdateOne) SetRelease(g *GithubRelease) *GithubAssetUpdateOne {\n\treturn gauo.SetReleaseID(g.ID)\n}", "func (db *DB) Put(id string, doc interface{}, rev string) (newrev string, err error) {\n\tpath := revpath(rev, db.name, id)\n\t// TODO: make it possible to stream encoder output somehow\n\tjson, err := json.Marshal(doc)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tb := bytes.NewReader(json)\n\treturn responseRev(db.closedRequest(db.ctx, \"PUT\", path, b))\n}", "func (g *Gatherer) ReleaseNoteFromCommit(result *Result) (*ReleaseNote, error) {\n\tpr := result.pullRequest\n\n\tprBody := pr.GetBody()\n\ttext, err := noteTextFromString(prBody)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdocumentation := DocumentationFromString(prBody)\n\n\tauthor := pr.GetUser().GetLogin()\n\tauthorURL := pr.GetUser().GetHTMLURL()\n\tprURL := pr.GetHTMLURL()\n\tisFeature := hasString(labelsWithPrefix(pr, \"kind\"), \"feature\")\n\tnoteSuffix := prettifySIGList(labelsWithPrefix(pr, \"sig\"))\n\n\tisDuplicateSIG := false\n\tif len(labelsWithPrefix(pr, \"sig\")) > 1 {\n\t\tisDuplicateSIG = true\n\t}\n\n\tisDuplicateKind := false\n\tif len(labelsWithPrefix(pr, \"kind\")) > 1 {\n\t\tisDuplicateKind = true\n\t}\n\n\t// TODO: Spin this to sep function\n\tindented := strings.ReplaceAll(text, \"\\n\", \"\\n \")\n\tmarkdown := fmt.Sprintf(\"%s (#%d, @%s)\",\n\t\tindented, pr.GetNumber(), author)\n\tif g.options.AddMarkdownLinks {\n\t\tmarkdown = fmt.Sprintf(\"%s ([#%d](%s), [@%s](%s))\",\n\t\t\tindented, pr.GetNumber(), prURL, author, authorURL)\n\t}\n\n\tif noteSuffix != \"\" {\n\t\tmarkdown = fmt.Sprintf(\"%s [%s]\", markdown, noteSuffix)\n\t}\n\n\t// Uppercase the first character of the markdown to make it look uniform\n\tmarkdown = capitalizeString(markdown)\n\n\treturn &ReleaseNote{\n\t\tCommit: result.commit.GetSHA(),\n\t\tText: text,\n\t\tMarkdown: markdown,\n\t\tDocumentation: documentation,\n\t\tAuthor: author,\n\t\tAuthorURL: authorURL,\n\t\tPrURL: prURL,\n\t\tPrNumber: pr.GetNumber(),\n\t\tSIGs: labelsWithPrefix(pr, \"sig\"),\n\t\tKinds: labelsWithPrefix(pr, \"kind\"),\n\t\tAreas: labelsWithPrefix(pr, \"area\"),\n\t\tFeature: isFeature,\n\t\tDuplicate: isDuplicateSIG,\n\t\tDuplicateKind: isDuplicateKind,\n\t\tActionRequired: labelExactMatch(pr, \"release-note-action-required\"),\n\t\tDoNotPublish: labelExactMatch(pr, \"release-note-none\"),\n\t}, nil\n}", "func (c *client) EditIssue(org, repo string, number int, issue *Issue) (*Issue, error) {\n\tdurationLogger := c.log(\"EditIssue\", org, repo, number)\n\tdefer durationLogger()\n\n\tif c.dry {\n\t\treturn issue, nil\n\t}\n\tedit := struct {\n\t\tTitle string `json:\"title,omitempty\"`\n\t\tBody string `json:\"body,omitempty\"`\n\t\tState string `json:\"state,omitempty\"`\n\t}{\n\t\tTitle: issue.Title,\n\t\tBody: issue.Body,\n\t\tState: issue.State,\n\t}\n\tvar ret Issue\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodPatch,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/issues/%d\", org, repo, number),\n\t\torg: org,\n\t\texitCodes: []int{200},\n\t\trequestBody: &edit,\n\t}, &ret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ret, nil\n}", "func (r *SoftwareVolumeResource) Edit(id string, item SoftwareVolumeConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+SoftwareVolumeEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func Edit(w http.ResponseWriter, r *http.Request) {\n\tc := flight.Context(w, r)\n\n\tarticle, _, err := article.ByID(c.DB, c.Param(\"id\"), c.UserID)\n\tif err != nil {\n\t\tc.FlashErrorGeneric(err)\n\t\tc.Redirect(uri)\n\t\treturn\n\t}\n\n\tv := c.View.New(\"article/edit\")\n\tc.Repopulate(v.Vars, \"tittle\")\n\tv.Vars[\"article\"] = article\n\tv.Render(w, r)\n}", "func (r Repository) EditPost(author string, categoryName string, topic string, content string) {\n\t// session, _ := mgo.Dial(r.ipAddress)\n\t// defer session.Close()\n\t// session.SetMode(mgo.Monotonic, true)\n\t// collection := session.DB(\"u-talk\").C(\"forum\")\n\t// query := bson.M{\"name\": categoryName, \"threads.topic\": topic, }\n}", "func (m *ZebraFotaArtifact) SetReleaseNotesUrl(value *string)() {\n err := m.GetBackingStore().Set(\"releaseNotesUrl\", value)\n if err != nil {\n panic(err)\n }\n}", "func (s *ReleaseService) UpdateImageRelease(id uint, r *Release, image io.Reader, imageName, authToken string) (*Release, error) {\n\tvar (\n\t\tmethod = http.MethodPatch\n\t\tpath = fmt.Sprintf(\"/releases/%d\", id)\n\t)\n\treq := s.client.newRequest(path, method)\n\taddJWTToRequest(req, authToken)\n\tr.Type = Image\n\terr := addJSONAndImageToRequestAsMultipart(req, r, image, imageName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.updateRelease(req)\n}", "func (z *zfsctl) Release(ctx context.Context, name string, r bool, tag string) *execute {\n\targs := []string{\"release\"}\n\tif r {\n\t\targs = append(args, \"-r\")\n\t}\n\targs = append(args, tag, name)\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func (h *MovieHandler) edit(w http.ResponseWriter, r *http.Request) {\n\t// Parse the id param from the URL and convert it into an int64.\n\tid, err := strconv.ParseInt(chi.URLParam(r, \"id\"), 10, 64)\n\tif err != nil {\n\t\t// Render an error response and set status code.\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\tlog.Println(\"Error:\", err)\n\t\treturn\n\t}\n\n\t// Call GetMovie to get the movie from the database.\n\tif movie, err := h.MovieService.GetMovie(id); err != nil {\n\t\t// Render an error response and set status code.\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\tlog.Println(\"Error:\", err)\n\t} else {\n\t\t// Render a HTML response and set status code.\n\t\trender.HTML(w, http.StatusOK, \"movie/edit.html\", movie)\n\t}\n}", "func (s *ReleaseServer) prepareUpdate(req *services.UpdateReleaseRequest) (*release.Release, *release.Release, error) {\n\tif req.Chart == nil {\n\t\treturn nil, nil, errMissingChart\n\t}\n\n\t// finds the deployed release with the given name\n\tcurrentRelease, err := s.env.Releases.Deployed(req.Name)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// determine if values will be reused\n\tif err := s.reuseValues(req, currentRelease); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// finds the non-deleted release with the given name\n\tlastRelease, err := s.env.Releases.Last(req.Name)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// Increment revision count. This is passed to templates, and also stored on\n\t// the release object.\n\trevision := lastRelease.Version + 1\n\n\tts := timeconv.Now()\n\toptions := chartutil.ReleaseOptions{\n\t\tName: req.Name,\n\t\tTime: ts,\n\t\tNamespace: currentRelease.Namespace,\n\t\tIsUpgrade: true,\n\t\tRevision: int(revision),\n\t}\n\n\tcaps, err := capabilities(s.clientset.Discovery())\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tvaluesToRender, err := chartutil.ToRenderValuesCaps(req.Chart, req.Values, options, caps)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\thooks, manifestDoc, notesTxt, err := s.renderResources(req.Chart, valuesToRender, caps.APIVersions)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// Store an updated release.\n\tupdatedRelease := &release.Release{\n\t\tName: req.Name,\n\t\tNamespace: currentRelease.Namespace,\n\t\tChart: req.Chart,\n\t\tConfig: req.Values,\n\t\tInfo: &release.Info{\n\t\t\tFirstDeployed: currentRelease.Info.FirstDeployed,\n\t\t\tLastDeployed: ts,\n\t\t\tStatus: &release.Status{Code: release.Status_PENDING_UPGRADE},\n\t\t\tDescription: \"Preparing upgrade\", // This should be overwritten later.\n\t\t},\n\t\tVersion: revision,\n\t\tManifest: manifestDoc.String(),\n\t\tHooks: hooks,\n\t}\n\n\tif len(notesTxt) > 0 {\n\t\tupdatedRelease.Info.Status.Notes = notesTxt\n\t}\n\terr = validateManifest(s.env.KubeClient, currentRelease.Namespace, manifestDoc.Bytes())\n\treturn currentRelease, updatedRelease, err\n}", "func JournalEdit(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tdb, err := sql.Open(\"mysql\", connectionString)\n\tcheckErr(err)\n\n\t// query\n\trows, err := db.Query(\"SELECT title, content FROM chengshair.journal_entry WHERE idjournal_entry=?;\", vars[\"Id\"])\n\tcheckErr(err)\n\n\trows.Next()\n\tvar title string\n\tvar content string\n\terr = rows.Scan(&title, &content)\n\tcheckErr(err)\n\n\ti, err := strconv.Atoi(vars[\"Id\"])\n\ttemplates.ExecuteTemplate(w, \"edit.html\", &Entry{\n\t\tTitle: template.HTML(title),\n\t\tBody: content,\n\t\tId: i,\n\t\tContent: template.HTML(title + content)})\n\n}", "func Edit(w http.ResponseWriter, r *http.Request) {\n\tc := flight.Context(w, r)\n\n\titem, _, err := code.ByID(c.DB, c.Param(\"id\"))\n\tif err != nil {\n\t\tc.FlashErrorGeneric(err)\n\t\tc.Redirect(uri)\n\t\treturn\n\t}\n\n\tv := c.View.New(\"code/edit\")\n\tc.Repopulate(v.Vars, \"amount\")\n\tv.Vars[\"item\"] = item\n\tv.Vars[\"setdate\"] = item.Trans_Datetime.Time.Format(\"2006-01-02\")\n\tv.Render(w, r)\n}", "func GetRelease(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/releases/{id} repository repoGetRelease\n\t// ---\n\t// summary: Get a release\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: id\n\t// in: path\n\t// description: id of the release to get\n\t// type: integer\n\t// format: int64\n\t// required: true\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/Release\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\n\tid := ctx.ParamsInt64(\":id\")\n\trelease, err := repo_model.GetReleaseByID(ctx, id)\n\tif err != nil && !repo_model.IsErrReleaseNotExist(err) {\n\t\tctx.Error(http.StatusInternalServerError, \"GetReleaseByID\", err)\n\t\treturn\n\t}\n\tif err != nil && repo_model.IsErrReleaseNotExist(err) ||\n\t\trelease.IsTag || release.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tif err := release.LoadAttributes(ctx); err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"LoadAttributes\", err)\n\t\treturn\n\t}\n\tctx.JSON(http.StatusOK, convert.ToAPIRelease(ctx, ctx.Repo.Repository, release))\n}", "func (c Client) Release(id string, params *stripe.SubscriptionScheduleReleaseParams) (*stripe.SubscriptionSchedule, error) {\n\tpath := stripe.FormatURLPath(\"/v1/subscription_schedules/%s/release\", id)\n\tsched := &stripe.SubscriptionSchedule{}\n\terr := c.B.Call(http.MethodPost, path, c.Key, params, sched)\n\n\treturn sched, err\n}", "func Release(path string, change parser.SemVerChange, ch chan Result, options ReleaseOptions) {\n\tdefer close(ch)\n\n\t// Get Git User\n\tuser, err := git.GetUser(path)\n\tif err != nil {\n\t\tch <- Result{\n\t\t\tError: fmt.Errorf(\"[Git] get user: %v\", err),\n\t\t}\n\t\treturn\n\t}\n\tch <- Result{\n\t\tPhase: PhaseGetGitUser,\n\t\tMessage: user.String(),\n\t}\n\n\t// Parse Commits\n\tcommits, err := parser.ParseCommits(path)\n\tif err != nil {\n\t\tch <- Result{\n\t\t\tError: fmt.Errorf(\"[Release] parse commits: %v\", err),\n\t\t}\n\t\treturn\n\t}\n\tch <- Result{\n\t\tPhase: PhaseParseCommits,\n\t\tMessage: strconv.Itoa(len(commits)),\n\t}\n\n\t// Read version from last bump commit if exist\n\tvar version string\n\tif len(commits) > 0 {\n\t\tlastCommit := commits[len(commits)-1]\n\t\tif lastCommit.SemVer != \"\" {\n\t\t\tversion = lastCommit.SemVer\n\t\t\tch <- Result{\n\t\t\t\tPhase: PhaseLastVersionFromCommit,\n\t\t\t\tMessage: version,\n\t\t\t}\n\t\t}\n\t}\n\n\t// Read version from npm (package.json) if exist\n\tvar npmVersion string\n\tisNpm := npm.HasPackage(path)\n\tif isNpm {\n\t\tpkg, err := npm.ParsePackage(path)\n\t\tif err != nil {\n\t\t\tch <- Result{\n\t\t\t\tError: fmt.Errorf(\n\t\t\t\t\t\"[Release] parse npm package: %v\",\n\t\t\t\t\terr,\n\t\t\t\t),\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tnpmVersion = pkg.Version\n\t\tch <- Result{\n\t\t\tPhase: PhaseLastVersionFromPackage,\n\t\t\tMessage: npmVersion,\n\t\t}\n\t}\n\n\t// Inconsistency between commit history and package.json version\n\tif npmVersion != \"\" && npmVersion != version {\n\t\tch <- Result{\n\t\t\tPhase: PhaseLastVersionInconsistency,\n\t\t\tMessage: fmt.Sprintf(\n\t\t\t\t\"package.json: %s, git: %s\",\n\t\t\t\tnpmVersion,\n\t\t\t\tversion,\n\t\t\t),\n\t\t}\n\t\tversion = npmVersion\n\t}\n\n\t// Find Change\n\tif change == \"\" {\n\t\tchange = semver.GetChange(commits)\n\t\tch <- Result{\n\t\t\tPhase: PhaseChangeFound,\n\t\t\tMessage: string(change),\n\t\t}\n\t}\n\n\t// Calculate new version\n\tnewVersion, err := semver.GetVersion(version, change)\n\tif err != nil {\n\t\tch <- Result{\n\t\t\tError: fmt.Errorf(\n\t\t\t\t\"[Release] get semver version: %v\",\n\t\t\t\terr,\n\t\t\t),\n\t\t}\n\t\treturn\n\t}\n\tch <- Result{\n\t\tPhase: PhaseNextVersion,\n\t\tMessage: newVersion,\n\t}\n\n\t// Generate changelog\n\tcf, _, err := changelog.Save(path, newVersion, version, change, commits, user)\n\tif err != nil {\n\t\tch <- Result{\n\t\t\tError: fmt.Errorf(\"[Release] save changelog: %v\", err),\n\t\t}\n\t\treturn\n\t}\n\tch <- Result{\n\t\tPhase: PhaseChangelogUpdated,\n\t\tMessage: cf,\n\t}\n\n\t// Version: npm\n\tif isNpm {\n\t\t_, err = npm.Version(path, newVersion, string(change))\n\t\tif err != nil {\n\t\t\tch <- Result{\n\t\t\t\tError: fmt.Errorf(\"[npm] version: %v\", err),\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tch <- Result{\n\t\t\tPhase: PhasePackageVersion,\n\t\t}\n\t}\n\n\t// Release: Git\n\terr = git.Release(path, newVersion, user, options.SuppressPush)\n\tif err != nil {\n\t\tch <- Result{\n\t\t\tError: fmt.Errorf(\"[Release] git: %v\", err),\n\t\t}\n\t\treturn\n\t}\n\tch <- Result{\n\t\tPhase: PhaseGitRelease,\n\t\tMessage: newVersion,\n\t}\n\n\t// Publish: npm\n\tif isNpm {\n\t\t_, err = npm.Publish(path)\n\t\tif err != nil {\n\t\t\tch <- Result{\n\t\t\t\tError: fmt.Errorf(\"[npm] publish: %v\", err),\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tch <- Result{\n\t\t\tPhase: PhasePackagePublish,\n\t\t}\n\t}\n}", "func (a *Agent) UpgradeRelease(\n\tctx context.Context,\n\tconf *UpgradeReleaseConfig,\n\tvalues string,\n\tdoAuth *oauth2.Config,\n\tdisablePullSecretsInjection bool,\n\tignoreDependencies bool,\n) (*release.Release, error) {\n\tctx, span := telemetry.NewSpan(ctx, \"helm-upgrade-release\")\n\tdefer span.End()\n\n\ttelemetry.WithAttributes(span,\n\t\ttelemetry.AttributeKV{Key: \"project-id\", Value: conf.Cluster.ProjectID},\n\t\ttelemetry.AttributeKV{Key: \"cluster-id\", Value: conf.Cluster.ID},\n\t\ttelemetry.AttributeKV{Key: \"name\", Value: conf.Name},\n\t\ttelemetry.AttributeKV{Key: \"stack-name\", Value: conf.StackName},\n\t\ttelemetry.AttributeKV{Key: \"stack-revision\", Value: conf.StackRevision},\n\t)\n\n\tvaluesYaml, err := chartutil.ReadValues([]byte(values))\n\tif err != nil {\n\t\treturn nil, telemetry.Error(ctx, span, err, \"Values could not be parsed\")\n\t}\n\n\tconf.Values = valuesYaml\n\n\treturn a.UpgradeReleaseByValues(ctx, conf, doAuth, disablePullSecretsInjection, ignoreDependencies)\n}", "func (r *SoftwareImageResource) Edit(id string, item SoftwareImageConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+SoftwareImageEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (gau *GithubAssetUpdate) SetRelease(g *GithubRelease) *GithubAssetUpdate {\n\treturn gau.SetReleaseID(g.ID)\n}", "func (r *PoolNAPTRResource) Edit(id string, item Pool) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+PoolNAPTREndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func EditIssueAttachment(ctx *context.APIContext) {\n\t// swagger:operation PATCH /repos/{owner}/{repo}/issues/{index}/assets/{attachment_id} issue issueEditIssueAttachment\n\t// ---\n\t// summary: Edit an issue attachment\n\t// produces:\n\t// - application/json\n\t// consumes:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: index\n\t// in: path\n\t// description: index of the issue\n\t// type: integer\n\t// format: int64\n\t// required: true\n\t// - name: attachment_id\n\t// in: path\n\t// description: id of the attachment to edit\n\t// type: integer\n\t// format: int64\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/EditAttachmentOptions\"\n\t// responses:\n\t// \"201\":\n\t// \"$ref\": \"#/responses/Attachment\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/error\"\n\n\tattachment := getIssueAttachmentSafeWrite(ctx)\n\tif attachment == nil {\n\t\treturn\n\t}\n\n\t// do changes to attachment. only meaningful change is name.\n\tform := web.GetForm(ctx).(*api.EditAttachmentOptions)\n\tif form.Name != \"\" {\n\t\tattachment.Name = form.Name\n\t}\n\n\tif err := repo_model.UpdateAttachment(ctx, attachment); err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"UpdateAttachment\", err)\n\t}\n\n\tctx.JSON(http.StatusCreated, convert.ToAPIAttachment(ctx.Repo.Repository, attachment))\n}", "func (c *FakeClient) UpdateReleaseFromChart(rlsName string, chart *chart.Chart, opts ...UpdateOption) (*release.Release, error) {\n\t// Check to see if the release already exists.\n\treturn c.ReleaseContent(rlsName, 0)\n}", "func (s *Service) Edit(id alarmquote.ArticleID, a alarmquote.Article) error {\n\tif err := validate(a); err != nil {\n\t\treturn err\n\t}\n\n\tif id != a.ID {\n\t\treturn alarmquote.ErrChangeIDForbidden\n\t}\n\n\tif _, err := s.Article(id); err != nil {\n\t\treturn errors.Wrap(err, \"error retrieving when editing an article\")\n\t}\n\n\tif err := s.repo.Modify(id, a); err != nil {\n\t\treturn errors.Wrap(err, \"error editing an article\")\n\t}\n\n\treturn nil\n}", "func (f *File) Release(ctx context.Context, req *fuse.ReleaseRequest) error {\n\tf.fs.logger.Debugf(\"File Release request\")\n\n\tf.offset = 0\n\tif f.body != nil {\n\t\terr := f.body.Close()\n\t\tif err != nil {\n\t\t\tf.fs.logger.Printf(\"File release failed: %v\\n\", err)\n\t\t}\n\t}\n\treturn nil\n}", "func (c *CalVer) Release() string {\n\tc.major, c.minor, c.micro, c.increment = c.next(false)\n\n\tc.pre = false\n\n\treturn c.String()\n}", "func (p *GetAnnotationsParams) ByRelease(name, revision string) {\n\tp.Tags = append(p.Tags,\n\t\t\"heritage=chronologist\",\n\t\t\"release_name=\"+name,\n\t\t\"release_revision=\"+revision,\n\t)\n}", "func release(name string, year uint32, lead string) {\n\tevents.EmitEvent(MovieRelease, name, year, lead)\n}", "func (d Document) Release() string {\n\tvalue, _ := d.labels[releaseLabel].(string)\n\n\treturn value\n}", "func (r *ReleaseModuleServiceServer) UpgradeRelease(ctx context.Context, in *rudderAPI.UpgradeReleaseRequest) (*rudderAPI.UpgradeReleaseResponse, error) {\n\tgrpclog.Print(\"upgrade\")\n\tc := bytes.NewBufferString(in.Current.Manifest)\n\tt := bytes.NewBufferString(in.Target.Manifest)\n\terr := kubeClient.Update(in.Target.Namespace, c, t, in.Force, in.Recreate, in.Timeout, in.Wait)\n\t// upgrade response object should be changed to include status\n\treturn &rudderAPI.UpgradeReleaseResponse{}, err\n}", "func (gauo *GithubAssetUpdateOne) SetReleaseID(id int) *GithubAssetUpdateOne {\n\tgauo.mutation.SetReleaseID(id)\n\treturn gauo\n}", "func editImage(w http.ResponseWriter, r *http.Request) {\r\n\t//\tvars := mux.Vars(r)\r\n\t//\tid, _ := strconv.ParseInt(vars[\"id\"], 10, 64)\r\n\tvar ci CRImage\r\n\tif err := json.NewDecoder(r.Body).Decode(&ci); err != nil {\r\n\t\tlogger.Warnf(\"error decoding image: %s\", err)\r\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\r\n\t\treturn\r\n\t}\r\n\tif err := ci.UpdateImage(); err != nil {\r\n\t\tlogger.Warnf(\"error updating image: %s\", err)\r\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\r\n\t\treturn\r\n\t}\r\n\tw.Header().Set(\"content-type\", \"application/json\")\r\n\tw.WriteHeader(http.StatusCreated)\r\n}", "func (gau *GithubAssetUpdate) SetReleaseID(id int) *GithubAssetUpdate {\n\tgau.mutation.SetReleaseID(id)\n\treturn gau\n}", "func (r *runners) releaseLint(cmd *cobra.Command, args []string) error {\n\tvar isBuildersRelease bool\n\tvar lintReleaseData []byte\n\tvar contentType string\n\tif r.args.lintReleaseYamlDir != \"\" {\n\t\tdata, err := tarYAMLDir(r.args.lintReleaseYamlDir)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to read yaml dir\")\n\t\t}\n\t\tlintReleaseData = data\n\t\t// TODO: all specfiles are charts => isBuildersRelease\n\t\tisBuildersRelease = false\n\t\tcontentType = \"application/tar\"\n\t} else if r.args.lintReleaseChart != \"\" {\n\t\tdata, err := ioutil.ReadFile(r.args.lintReleaseChart)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to read chart file\")\n\t\t}\n\t\tlintReleaseData = data\n\t\tisBuildersRelease = true\n\t\tcontentType = \"application/gzip\"\n\t} else {\n\t\treturn errors.Errorf(\"a yaml directory or a chart file is required\")\n\t}\n\n\tif _, ok := validFailOnValues[r.args.lintReleaseFailOn]; !ok {\n\t\treturn errors.Errorf(\"fail-on value %q not supported, supported values are [info, warn, error, none]\", r.args.lintReleaseFailOn)\n\t}\n\n\tlintResult, err := r.api.LintRelease(r.appType, lintReleaseData, isBuildersRelease, contentType)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to lint release\")\n\t}\n\n\tif err := print.LintErrors(r.w, lintResult); err != nil {\n\t\treturn errors.Wrap(err, \"failed to print lint errors\")\n\t}\n\n\tif hasError := shouldFail(lintResult, r.args.lintReleaseFailOn); hasError {\n\t\treturn errors.Errorf(\"One or more errors of severity %q or higher were found\", r.args.lintReleaseFailOn)\n\t}\n\n\treturn nil\n}", "func (wh *Webhook) Edit(webhookId string, accountId string, data map[string]interface{}, extraHeaders map[string]string) (map[string]interface{}, error) {\n\tif accountId != \"\" {\n\t\turl := fmt.Sprintf(\"/%s%s/%s%s/%s\", constants.VERSION_V2, constants.ACCOUNT_URL, url.PathEscape(accountId), constants.WEBHOOK, url.PathEscape(webhookId))\n\t\treturn wh.Request.Patch(url, data, extraHeaders)\n\t}\n\turl := fmt.Sprintf(\"/%s%s/%s\", constants.VERSION_V1, constants.WEBHOOK, url.PathEscape(accountId))\n\treturn wh.Request.Put(url, data, extraHeaders)\n}", "func (as *apiServer) DownloadRelease(w http.ResponseWriter, r *http.Request) {\n\treqLogger := as.logger.New(\"method\", r.Method, \"url\", r.RequestURI)\n\treqLogger.Info(\"fetching release URL\")\n\n\tvars := mux.Vars(r)\n\tctx, cancel := context.WithTimeout(r.Context(), requestTimeout)\n\tdefer cancel()\n\n\turl, err := as.githubClient.FetchReleaseURL(ctx, vars[\"owner\"], vars[\"repo\"], vars[\"tag\"], vars[\"assetName\"])\n\tif ctx.Err() != nil {\n\t\treqLogger.Error(\"error retrieving release URL\", \"err\", err, \"ctx error\", ctx.Err())\n\t\twriteHTTPError(w, reqLogger, http.StatusBadGateway, \"Bad Gateway\")\n\t\treturn\n\t}\n\tif err != nil {\n\t\tswitch t := err.(type) {\n\t\tcase GitHubError:\n\t\t\tif t.Type == TypeNotFound {\n\t\t\t\treqLogger.Info(\"data not found\", \"err\", t.WrappedError, \"vars\", vars)\n\t\t\t\twriteHTTPError(w, reqLogger, http.StatusNotFound, t.WrappedError.Error())\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\treqLogger.Error(\"unhandled github error\", \"err\", t.WrappedError, \"vars\", vars)\n\t\t\t\twriteHTTPError(w, reqLogger, http.StatusInternalServerError, \"Internal Server Error\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\treqLogger.Error(\"error retrieving release URL\", \"err\", err, \"vars\", vars)\n\t\twriteHTTPError(w, reqLogger, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\n\treqLogger.Info(\"found release URL\", \"url\", url)\n\n\tw.Header().Set(\"Location\", url)\n\tw.WriteHeader(http.StatusMovedPermanently)\n}", "func (h *Handler) EditBook(w http.ResponseWriter, r *http.Request, param httprouter.Params) {\n\t// TODO: Implement this. Query = UPDATE books SET title = '<title>', author = '<author>', isbn = '<isbn>', stock = <stock> WHERE id = <id>\n\t// read json body\n\tbody, err := ioutil.ReadAll(r.Body)\n\tdefer r.Body.Close()\n\tif err != nil {\n\t\trenderJSON(w, []byte(`\n\t\t\tmessage: \"Fail to read body\"\n\t\t\t`), http.StatusBadRequest)\n\t\treturn\n\t}\n\t// parse json body\n\tvar book Book\n\terr = json.Unmarshal(body, &book)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\t// executing insert query\n\tquery := fmt.Sprintf(\"UPDATE books SET title='%s', author='%s', isbn='%s',stock=%d WHERE id = '%s'\", book.Title, book.Author, book.ISBN, book.Stock, param.ByName(\"bookID\"))\n\t_, err = h.DB.Query(query)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\trenderJSON(w, []byte(`\n\t{\n\t\tstatus:\"success\",\n\t\tmessage:\"Book Updated Successfully\",\n\t}\n\t`), http.StatusOK)\n}", "func EditTeam(data Team) {\n\tquery := \"UPDATE teams SET name=:name, description=:description, picture_url=:picture_url WHERE id=:id\"\n\n\t_, err := DB.NamedExec(query, data)\n\tif err != nil {\n\t\tLogger.Fatal(err.Error())\n\t}\n}", "func (f *File) Release(ctx context.Context, req *fuse.ReleaseRequest) error {\n\tlog.Infoln(\"Release requested on file\", f.Metadata.PathDisplay)\n\tif f.NeedsUpload {\n\t\t// Entirely reckless\n\t\tgo func() {\n\t\t\tlog.Infoln(\"Uploading file to Dropbox\", f.Metadata.PathDisplay)\n\t\t\tretryNotice := func(err error, duration time.Duration) {\n\t\t\t\tlog.Errorf(\"Retrying %s in %s due to %s\\n\", f.Metadata.PathDisplay, err, duration)\n\t\t\t}\n\t\t\terr := backoff.RetryNotify(func() error {\n\t\t\t\t_, err := f.Client.Upload(f.Metadata.PathDisplay, f.getData())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}, backoff.NewExponentialBackOff(), retryNotice)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Panicln(\"Unable to upload file\", f.Metadata.PathDisplay, err)\n\t\t\t}\n\t\t\tf.NeedsUpload = false\n\t\t}()\n\t}\n\n\treturn nil\n}", "func NewRelease(name, namespace, chart, chartVersion string, chartSpec ChartSpec, currentReleaseVersion int32, values Values, usedSubstitute Substitute, overrides Overrides) Release {\n\treturn Release{Name: name, Namespace: namespace, Chart: chart, ChartVersion: chartVersion, ChartSpec: chartSpec, CurrentReleaseVersion: currentReleaseVersion, Values: values, usedSubstitute: usedSubstitute, overrides: overrides}\n}", "func (c *FakeClient) RollbackRelease(rlsName string, opts ...RollbackOption) (*release.Release, error) {\n\treturn nil, nil\n}", "func (r *FPGAInfoResource) Edit(id string, item FPGAInfoConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+FPGAInfoEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (controller *WidgetController) Put(context *qhttp.Context) {\n\treq := &WidgetRequest{}\n\tif err := context.ReadObject(req); nil != err {\n\t\tcontext.SetError(&qerror.RestError{Code: qerror.ValidationError, Message: err.Error()}, http.StatusNotAcceptable)\n\t\treturn\n\t}\n\tif !req.Valid() {\n\t\tcontext.SetError(&qerror.RestError{Code: qerror.ValidationError, Message: req.Error()}, http.StatusNotAcceptable)\n\t\treturn\n\t}\n\twidget := &Widget{\n\t\tID: context.URIParameters[\"id\"],\n\t\tDescription: req.Description,\n\t\tSerialNumber: req.SerialNumber,\n\t}\n\tif err := controller.storage.Update(widget); nil != err {\n\t\tcontext.SetError(\n\t\t\tqerror.NewRestError(qerror.NotFound, fmt.Sprintf(\"No widget for ID %s found\", context.URIParameters[\"id\"]), nil),\n\t\t\thttp.StatusNotFound)\n\t}\n\n\tcontext.SetResponse(widget, http.StatusOK)\n}", "func (m *MockClient) EditPullRequest(org, repo string, number int, pr *github.PullRequest) (*github.PullRequest, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EditPullRequest\", org, repo, number, pr)\n\tret0, _ := ret[0].(*github.PullRequest)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func newTestRelease() *Release {\n\tv, _ := version.NewVersion(\"1.0.0\")\n\tt, _ := time.Parse(time.RFC1123Z, \"Fri, 13 May 2016 12:00:00 +0200\")\n\n\treturn &Release{\n\t\tversion: v,\n\t\tbuild: \"1000\",\n\t\ttitle: \"Test\",\n\t\tdescription: \"Test\",\n\t\tpublishedDateTime: NewPublishedDateTime(&t),\n\t\treleaseNotesLink: \"https://example.com/changelogs/1.0.0.html\",\n\t\tminimumSystemVersion: \"10.9\",\n\t\tdownloads: []Download{\n\t\t\t*NewDownload(\"https://example.com/1.0.0/one.dmg\", \"application/octet-stream\", 100000),\n\t\t\t*NewDownload(\"https://example.com/1.0.0/two.dmg\", \"application/octet-stream\", 100000),\n\t\t},\n\t\tisPreRelease: false,\n\t}\n}", "func (repo BoshDirectorRepository) DeleteRelease(name string, version string) (apiResponse net.ApiResponse) {\n\tpath := fmt.Sprintf(\"/releases/%s?force=true&version=%s\", name, version)\n\tapiResponse = repo.gateway.DeleteResource(repo.config.TargetURL+path, repo.config.Username, repo.config.Password)\n\tif apiResponse.IsNotSuccessful() {\n\t\treturn\n\t}\n\tif !apiResponse.IsRedirection() {\n\t\treturn\n\t}\n\n\tvar taskStatus models.TaskStatus\n\ttaskURL, err := url.Parse(apiResponse.RedirectLocation)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tapiResponse = repo.gateway.GetResource(repo.config.TargetURL+taskURL.Path, repo.config.Username, repo.config.Password, &taskStatus)\n\tif apiResponse.IsNotSuccessful() {\n\t\treturn\n\t}\n\n\t/* Progression should be: queued, progressing, done */\n\t/* TODO task might fail; end states: done, error, cancelled */\n\tfor taskStatus.State != \"done\" {\n\t\ttime.Sleep(1)\n\t\ttaskStatus, apiResponse = repo.GetTaskStatus(taskStatus.ID)\n\t\tif apiResponse.IsNotSuccessful() {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}", "func (s *ReleaseServer) RollbackRelease(c ctx.Context, req *services.RollbackReleaseRequest) (*services.RollbackReleaseResponse, error) {\n\ts.Log(\"preparing rollback of %s\", req.Name)\n\tcurrentRelease, targetRelease, err := s.prepareRollback(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !req.DryRun {\n\t\ts.Log(\"creating rolled back release for %s\", req.Name)\n\t\tif err := s.env.Releases.Create(targetRelease); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\ts.Log(\"performing rollback of %s\", req.Name)\n\tres, err := s.performRollback(currentRelease, targetRelease, req)\n\tif err != nil {\n\t\treturn res, err\n\t}\n\n\tif !req.DryRun {\n\t\ts.Log(\"updating status for rolled back release for %s\", req.Name)\n\t\tif err := s.env.Releases.Update(targetRelease); err != nil {\n\t\t\treturn res, err\n\t\t}\n\t}\n\n\treturn res, nil\n}", "func (r *FeatureModuleResource) Edit(id string, item FeatureModuleConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+FeatureModuleEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (ar *arangorepository) EditStrain(\n\tus *stock.StrainUpdate,\n) (*model.StockDoc, error) {\n\tm := &model.StockDoc{}\n\tpropKey, err := ar.checkStock(us.Data.Id)\n\tif err != nil {\n\t\treturn m, err\n\t}\n\tbindVars := getUpdatableStrainBindParams(us.Data.Attributes)\n\tbindStVars := getUpdatableStrainPropBindParams(us.Data.Attributes)\n\tcmBindVars := mergeBindParams(\n\t\tmap[string]interface{}{\n\t\t\t\"@stock_properties_collection\": ar.stockc.stockProp.Name(),\n\t\t\t\"@stock_collection\": ar.stockc.stock.Name(),\n\t\t\t\"key\": us.Data.Id,\n\t\t\t\"propkey\": propKey,\n\t\t},\n\t\tbindVars, bindStVars,\n\t)\n\tparent := us.Data.Attributes.Parent\n\tstmt := statement.StrainUpd\n\tif len(parent) > 0 { // in case parent is present\n\t\tpVars, pStmt, err := ar.handleEditStrainWithParent(parent, us.Data.Id)\n\t\tif err != nil {\n\t\t\treturn m, err\n\t\t}\n\t\tstmt = pStmt\n\t\tcmBindVars = mergeBindParams(cmBindVars, pVars)\n\t\tm.StrainProperties = &model.StrainProperties{Parent: parent}\n\t}\n\trupd, err := ar.database.DoRun(\n\t\tfmt.Sprintf(\n\t\t\tstmt,\n\t\t\tgenAQLDocExpression(bindVars),\n\t\t\tgenAQLDocExpression(bindStVars),\n\t\t),\n\t\tcmBindVars,\n\t)\n\tif err != nil {\n\t\treturn m, errors.Errorf(\n\t\t\t\"error in editing strain %s %s\",\n\t\t\tus.Data.Id, err,\n\t\t)\n\t}\n\terr = rupd.Read(m)\n\treturn m, err\n}", "func handleRepo(ctx context.Context, client *github.Client, repo *github.Repository) (*release, error) {\n\tif !in(orgs, repo.GetOwner().GetLogin()) {\n\t\t// return early\n\t\treturn nil, nil\n\t}\n\topt := &github.ListOptions{\n\t\tPage: 1,\n\t\tPerPage: 100,\n\t}\n\n\treleases, resp, err := client.Repositories.ListReleases(ctx, repo.GetOwner().GetLogin(), repo.GetName(), opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden || err != nil {\n\t\tif _, ok := err.(*github.RateLimitError); ok {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Skip it because there is no release.\n\t\treturn nil, nil\n\t}\n\tif err != nil || len(releases) < 1 {\n\t\treturn nil, err\n\t}\n\n\trl := release{\n\t\tRepository: repo,\n\t}\n\t// Get information about the binary assets for linux-amd64\n\tarch := \"linux-amd64\"\n\tfor i := 0; i < len(releases); i++ {\n\t\tr := releases[i]\n\t\tif rl.Release == nil && !r.GetDraft() {\n\t\t\t// If this is the latest release and it's not a draft make it the one\n\t\t\t// to return\n\t\t\trl.Release = r\n\t\t\tfor _, asset := range r.Assets {\n\t\t\t\trl.BinaryDownloadCount += asset.GetDownloadCount()\n\t\t\t\tif strings.HasSuffix(asset.GetName(), arch) {\n\t\t\t\t\trl.BinaryURL = asset.GetBrowserDownloadURL()\n\t\t\t\t\trl.BinaryName = asset.GetName()\n\t\t\t\t\trl.BinarySince = units.HumanDuration(time.Since(asset.GetCreatedAt().Time))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif strings.HasSuffix(asset.GetName(), arch+\".sha256\") {\n\t\t\t\t\tc, err := getURLContent(asset.GetBrowserDownloadURL())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\trl.BinarySHA256 = c\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif strings.HasSuffix(asset.GetName(), arch+\".md5\") {\n\t\t\t\t\tc, err := getURLContent(asset.GetBrowserDownloadURL())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\trl.BinaryMD5 = c\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfor _, asset := range r.Assets {\n\t\t\t\trl.BinaryDownloadCount += asset.GetDownloadCount()\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &rl, nil\n}", "func EditMilestone(ctx *context.APIContext) {\n\t// swagger:operation PATCH /repos/{owner}/{repo}/milestones/{id} issue issueEditMilestone\n\t// ---\n\t// summary: Update a milestone\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: id\n\t// in: path\n\t// description: the milestone to edit, identified by ID and if not available by name\n\t// type: string\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/EditMilestoneOption\"\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/Milestone\"\n\tform := web.GetForm(ctx).(*api.EditMilestoneOption)\n\tmilestone := getMilestoneByIDOrName(ctx)\n\tif ctx.Written() {\n\t\treturn\n\t}\n\n\tif len(form.Title) > 0 {\n\t\tmilestone.Name = form.Title\n\t}\n\tif form.Description != nil {\n\t\tmilestone.Content = *form.Description\n\t}\n\tif form.Deadline != nil && !form.Deadline.IsZero() {\n\t\tmilestone.DeadlineUnix = timeutil.TimeStamp(form.Deadline.Unix())\n\t}\n\n\toldIsClosed := milestone.IsClosed\n\tif form.State != nil {\n\t\tmilestone.IsClosed = *form.State == string(api.StateClosed)\n\t}\n\n\tif err := issues_model.UpdateMilestone(milestone, oldIsClosed); err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"UpdateMilestone\", err)\n\t\treturn\n\t}\n\tctx.JSON(http.StatusOK, convert.ToAPIMilestone(milestone))\n}", "func (r *RollbackReleaseV1Action) Handle(ctx context.Context,\n\treq *helmmanager.RollbackReleaseV1Req, resp *helmmanager.RollbackReleaseV1Resp) error {\n\tr.ctx = ctx\n\tr.req = req\n\tr.resp = resp\n\n\tif err := r.req.Validate(); err != nil {\n\t\tblog.Errorf(\"rollback release failed, invalid request, %s, param: %v\", err.Error(), r.req)\n\t\tr.setResp(common.ErrHelmManagerRequestParamInvalid, err.Error())\n\t\treturn nil\n\t}\n\n\tif err := r.rollback(); err != nil {\n\t\tblog.Errorf(\"rollback release %s failed, clusterID: %s, namespace: %s, targetRevision: %d, error: %s\",\n\t\t\tr.req.GetName(), r.req.GetClusterID(), r.req.GetNamespace(), r.req.GetRevision(), err.Error())\n\t\tr.setResp(common.ErrHelmManagerRollbackActionFailed, err.Error())\n\t\treturn nil\n\t}\n\n\tblog.Infof(\"dispatch release successfully, projectCode: %s, clusterID: %s, namespace: %s, name: %s, operator: %s\",\n\t\tr.req.GetProjectCode(), r.req.GetClusterID(), r.req.GetNamespace(), r.req.GetName(), auth.GetUserFromCtx(r.ctx))\n\tr.setResp(common.ErrHelmManagerSuccess, \"ok\")\n\treturn nil\n}", "func (p Project) EditRow() error {\n\t_, err := DB.Exec(\n\t\t`UPDATE project SET \n\t\t group_id = ?,\n\t\t name = ?,\n\t\t url = ?,\n\t\t path = ?,\n\t\t environment = ?,\n\t\t branch = ?,\n\t\t after_pull_script = ?,\n\t\t after_deploy_script = ?,\n\t\t rsync_option = ?\n\t\tWHERE\n\t\t id = ?`,\n\t\tp.GroupID,\n\t\tp.Name,\n\t\tp.URL,\n\t\tp.Path,\n\t\tp.Environment,\n\t\tp.Branch,\n\t\tp.AfterPullScript,\n\t\tp.AfterDeployScript,\n\t\tp.RsyncOption,\n\t\tp.ID,\n\t)\n\treturn err\n}", "func ReleaseID(v int64) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldEQ(FieldReleaseID, v))\n}", "func (h *Handler) EditVariant(w http.ResponseWriter, r *http.Request) {\n\treq, err := request.NewEditVariant(r)\n\tif err != nil {\n\t\tif err == request.ErrInvalidVariantID {\n\t\t\tlog.Printf(err.Error())\n\t\t\tapi.Fail(w, http.StatusBadRequest, http.StatusBadRequest, request.ErrInvalidVariantID.Error())\n\t\t\treturn\n\t\t}\n\t\tif err == request.ErrInvalidJSON {\n\t\t\tlog.Printf(err.Error())\n\t\t\tapi.Fail(w, http.StatusBadRequest, http.StatusBadRequest, request.ErrInvalidJSON.Error())\n\t\t\treturn\n\t\t}\n\t\tif err == request.ErrInvalidProductID {\n\t\t\tlog.Printf(err.Error())\n\t\t\tapi.Fail(w, http.StatusBadRequest, http.StatusBadRequest, request.ErrInvalidProductID.Error())\n\t\t\treturn\n\t\t}\n\t\tif err == request.ErrInvalidMRP {\n\t\t\tlog.Printf(err.Error())\n\t\t\tapi.Fail(w, http.StatusBadRequest, http.StatusBadRequest, request.ErrInvalidMRP.Error())\n\t\t\treturn\n\t\t}\n\t\tlog.Printf(err.Error())\n\t\tapi.Fail(w, http.StatusBadRequest, http.StatusBadRequest, \"Request body is invalid\")\n\t\treturn\n\t}\n\terr = h.Variant.EditVariant(r.Context(), req)\n\tif err != nil {\n\t\tif err == service.ErrVariantNotFound {\n\t\t\tlog.Printf(err.Error())\n\t\t\tapi.Fail(w, http.StatusNotFound, http.StatusNotFound, service.ErrVariantNotFound.Error())\n\t\t}\n\t\tlog.Printf(err.Error())\n\t\tapi.Fail(w, http.StatusInternalServerError, api.ErrCodeInternalServiceError)\n\t\treturn\n\t}\n\n\tapi.Send(w, http.StatusOK, nil)\n}", "func (s *Services) ReleaseType(ctx context.Context, request *proto.ReleaseTypeRequest) (*proto.ReleaseTypeResponse, error) {\n\tvar result models.ReleaseType\n\tquery := s.DB\n\n\tif request.Id != 0 {\n\t\tquery = query.Where(\"id = ?\", request.Id)\n\t}\n\n\tif err := query.First(&result).Error; err != nil {\n\n\t\t// If nothing was found\n\t\tif gorm.IsRecordNotFoundError(err) {\n\t\t\treturn &proto.ReleaseTypeResponse{ReleaseType: nil}, nil\n\t\t}\n\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\n\treturn &proto.ReleaseTypeResponse{ReleaseType: result.ToProto()}, nil\n}", "func (o *ShowPackageParams) SetRelease(release string) {\n\to.Release = release\n}", "func (t TestDescription) Release() TestDescription {\n\treturn t.newLabel(\"RELEASE\")\n}", "func EditBook(c *fiber.Ctx) {\n\tdb := database.DBConn\n\tbook := new(Book)\n\n\tif err := c.BodyParser(book); err != nil {\n\t\tc.Status(400).SendString(err.Error())\n\t}\n\n\tisupdated := db.Model(book).Updates(Book{Title: book.Title, Author: book.Author, Rating: book.Rating})\n\n\tif isupdated == nil {\n\t\tres := Response{Status: 400, Message: \"Error Updating book\"}\n\t\tc.Status(200).JSON(res)\n\t}\n\tres := Response{Status: 200, Message: \"Book Edited Successfully\"}\n\tc.Status(200).JSON(res)\n\n}", "func Edit(w http.ResponseWriter, r *http.Request) {\n\tc := flight.Context(w, r)\n\n\titem, _, err := summary.ByID(c.DB, c.Param(\"id\"))\n\tif err != nil {\n\t\tc.FlashErrorGeneric(err)\n\t\tc.Redirect(uri)\n\t\treturn\n\t}\n\n\tv := c.View.New(\"summary/edit\")\n\tc.Repopulate(v.Vars, \"name\")\n\tv.Vars[\"item\"] = item\n\tv.Render(w, r)\n}", "func SwitchRelease(binToSlice string, helmBinPath string, helmVersionPath string) error {\n\n\t// Delete actual symlink\n\trmLn := &BashCmd{\n\t\tCmd: \"find\",\n\t\tArgs: []string{\"-L\", \".\", \"-xtype\", \"l\", \"-delete\"},\n\t\tExecPath: helmBinPath,\n\t}\n\t_, err := ExecBashCmd(rmLn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create symlink to helm new version\n\tln := &BashCmd{\n\t\tCmd: \"ln\",\n\t\tArgs: []string{\"-s\", fmt.Sprintf(\"%s/helm-%s\", helmVersionPath, binToSlice), fmt.Sprintf(\"%s/helm\", helmBinPath)},\n\t}\n\t_, err = ExecBashCmd(ln)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (gc *githubClient) GetLatestVersionFromRelease(ctx context.Context, owner string, repo string) (string, error) {\n\tvar err error\n\tversion := \"\"\n\n\terr = retryWhenRateLimited(func() error {\n\t\tversion, err = gc.getLatestReleaseVersion(ctx, owner, repo)\n\t\treturn err\n\t})\n\n\treturn version, err\n}", "func (r *VCMPResource) Edit(id string, item VCMPConfig) error {\n\tif err := r.c.ModQuery(\"PUT\", BasePath+VCMPEndpoint+\"/\"+id, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (env *Env) Edit(res http.ResponseWriter, req *http.Request, title string) {\n\tenv.Log.V(1, \"beginning handling of Edit.\")\n\tenv.Log.V(1, \"loading requested page from cache.\")\n\tp, err := env.Cache.LoadPageFromCache(title)\n\tif err != nil {\n\t\tenv.Log.V(1, \"if file from cache not found, then retrieve requested page from db.\")\n\t\tp, _ = env.DB.LoadPage(title)\n\t}\n\tif p.Title == \"\" {\n\t\tenv.Log.V(1, \"if page title is blank, then try again.\")\n\t\tp, _ = env.DB.LoadPage(title)\n\t}\n\tif p == nil {\n\t\tenv.Log.V(1, \"notifying client that the request page was not found.\")\n\t\thttp.NotFound(res, req)\n\t\treturn\n\t}\n\tif strings.Contains(p.Title, \"_\") {\n\t\tp.Title = strings.Replace(p.Title, \"_\", \" \", -1)\n\t}\n\tenv.Log.V(1, \"requested page found, rendering the edit template.\")\n\tenv.Render(res, \"edit\", p)\n}", "func (m *MockPullRequestClient) EditPullRequest(org, repo string, number int, pr *github.PullRequest) (*github.PullRequest, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"EditPullRequest\", org, repo, number, pr)\n\tret0, _ := ret[0].(*github.PullRequest)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func handleEdit(w http.ResponseWriter, r *http.Request, title string) {\n\tp, err := page.Load(title)\n\tif err != nil {\n\t\tp = &page.Page{Title: title}\n\t}\n\trender(w, \"edit\", p)\n\tlogInfo(p.Title, \"file opened in edit mode\")\n}" ]
[ "0.797417", "0.63441503", "0.6101559", "0.60008246", "0.5971021", "0.5868578", "0.583974", "0.57195973", "0.57152265", "0.5610921", "0.5536156", "0.55010283", "0.5471195", "0.5419356", "0.54037344", "0.5372626", "0.5371148", "0.5351311", "0.5314843", "0.5248477", "0.52337307", "0.52221", "0.5213594", "0.5209035", "0.5204636", "0.519609", "0.5194813", "0.51657975", "0.51654", "0.5162213", "0.5153898", "0.5130442", "0.51190066", "0.5118752", "0.51053834", "0.509498", "0.50885046", "0.50758964", "0.506997", "0.50665337", "0.5064072", "0.5060775", "0.5057429", "0.5048337", "0.5047358", "0.5043619", "0.50312185", "0.50302094", "0.50290155", "0.50068194", "0.5005914", "0.49965754", "0.4995655", "0.49938825", "0.49809322", "0.49804032", "0.49760008", "0.49705917", "0.49534106", "0.49475533", "0.49456832", "0.49401852", "0.49291986", "0.4882843", "0.48801702", "0.48722175", "0.48366165", "0.48208198", "0.48180205", "0.48149204", "0.48098615", "0.47848198", "0.47790462", "0.47785556", "0.47759828", "0.47636455", "0.4758243", "0.47506475", "0.47459164", "0.47438133", "0.47378525", "0.4735448", "0.47307396", "0.4726933", "0.4725947", "0.4715883", "0.47138384", "0.47133854", "0.4711417", "0.47084823", "0.47057417", "0.47052267", "0.46947342", "0.46898565", "0.4681278", "0.46791962", "0.46769723", "0.46759352", "0.46497336", "0.46477914" ]
0.758511
1
ListReleases lists Releases given a repository
func (c *Client) ListReleases(ctx context.Context) ([]*github.RepositoryRelease, error) { result := []*github.RepositoryRelease{} page := 1 for { assets, res, err := c.Repositories.ListReleases(context.TODO(), c.Owner, c.Repo, &github.ListOptions{Page: page}) if err != nil { return nil, errors.Wrap(err, "failed to list releases") } if res.StatusCode != http.StatusOK { return nil, errors.Errorf("list repository releases: invalid status code: %s", res.Status) } result = append(result, assets...) if res.NextPage <= page { break } page = res.NextPage } return result, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ListReleases(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/releases repository repoListReleases\n\t// ---\n\t// summary: List a repo's releases\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: draft\n\t// in: query\n\t// description: filter (exclude / include) drafts, if you dont have repo write access none will show\n\t// type: boolean\n\t// - name: pre-release\n\t// in: query\n\t// description: filter (exclude / include) pre-releases\n\t// type: boolean\n\t// - name: per_page\n\t// in: query\n\t// description: page size of results, deprecated - use limit\n\t// type: integer\n\t// deprecated: true\n\t// - name: page\n\t// in: query\n\t// description: page number of results to return (1-based)\n\t// type: integer\n\t// - name: limit\n\t// in: query\n\t// description: page size of results\n\t// type: integer\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/ReleaseList\"\n\tlistOptions := utils.GetListOptions(ctx)\n\tif listOptions.PageSize == 0 && ctx.FormInt(\"per_page\") != 0 {\n\t\tlistOptions.PageSize = ctx.FormInt(\"per_page\")\n\t}\n\n\topts := repo_model.FindReleasesOptions{\n\t\tListOptions: listOptions,\n\t\tIncludeDrafts: ctx.Repo.AccessMode >= perm.AccessModeWrite || ctx.Repo.UnitAccessMode(unit.TypeReleases) >= perm.AccessModeWrite,\n\t\tIncludeTags: false,\n\t\tIsDraft: ctx.FormOptionalBool(\"draft\"),\n\t\tIsPreRelease: ctx.FormOptionalBool(\"pre-release\"),\n\t}\n\n\treleases, err := repo_model.GetReleasesByRepoID(ctx, ctx.Repo.Repository.ID, opts)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetReleasesByRepoID\", err)\n\t\treturn\n\t}\n\trels := make([]*api.Release, len(releases))\n\tfor i, release := range releases {\n\t\tif err := release.LoadAttributes(ctx); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"LoadAttributes\", err)\n\t\t\treturn\n\t\t}\n\t\trels[i] = convert.ToAPIRelease(ctx, ctx.Repo.Repository, release)\n\t}\n\n\tfilteredCount, err := repo_model.CountReleasesByRepoID(ctx.Repo.Repository.ID, opts)\n\tif err != nil {\n\t\tctx.InternalServerError(err)\n\t\treturn\n\t}\n\n\tctx.SetLinkHeader(int(filteredCount), listOptions.PageSize)\n\tctx.SetTotalCountHeader(filteredCount)\n\tctx.JSON(http.StatusOK, rels)\n}", "func (r *GitLabRelease) ListReleases(ctx context.Context) ([]string, error) {\n\tversions := []string{}\n\topt := &gitlab.ListReleasesOptions{\n\t\tPerPage: 100, // max\n\t}\n\n\tfor {\n\t\treleases, resp, err := r.api.ProjectListReleases(ctx, r.owner, r.project, opt)\n\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to list releases for %s/%s: %s\", r.owner, r.project, err)\n\t\t}\n\n\t\tfor _, release := range releases {\n\t\t\tv := tagNameToVersion(release.TagName)\n\t\t\tversions = append(versions, v)\n\t\t}\n\t\tif resp.NextPage == 0 {\n\t\t\tbreak\n\t\t}\n\t\topt.Page = resp.NextPage\n\t}\n\n\treturn versions, nil\n}", "func cmdListReleases(ccmd *cobra.Command, args []string) {\n\taplSvc := apl.NewClient()\n\toutput := runListCommand(&releaseParams, aplSvc.Releases.List)\n\tif output != nil {\n\t\tfields := []string{\"ID\", \"StackID\", \"Version\", \"CreatedTime\"}\n\t\tprintTableResultsCustom(output.([]apl.Release), fields)\n\t}\n}", "func (s *GiteaSource) ListReleases(owner, repo string) ([]SourceRelease, error) {\n\terr := checkOwnerRepoParameters(owner, repo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trels, res, err := s.api.ListReleases(owner, repo, gitea.ListReleasesOptions{})\n\tif err != nil {\n\t\tlog.Printf(\"API returned an error response: %s\", err)\n\t\tif res != nil && res.StatusCode == 404 {\n\t\t\t// 404 means repository not found or release not found. It's not an error here.\n\t\t\tlog.Print(\"API returned 404. Repository or release not found\")\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\treleases := make([]SourceRelease, len(rels))\n\tfor i, rel := range rels {\n\t\treleases[i] = NewGiteaRelease(rel)\n\t}\n\treturn releases, nil\n}", "func (c *Client) List(p ListParameters) ([]Release, error) {\n\tresponse, err := c.client.ListReleases(p.Options()...) // TODO Paging.\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tvar releases []Release\n\tif response != nil && response.Releases != nil {\n\t\tfor _, item := range response.Releases {\n\t\t\treleases = append(releases, *(fromHelm(item)))\n\t\t}\n\t}\n\treturn releases, nil\n}", "func (g GithubClient) ListAllReleases(owner, repo string) ([]*github.RepositoryRelease, error) {\n\tlo := &github.ListOptions{\n\t\tPage: 1,\n\t\tPerPage: 100,\n\t}\n\n\treleases, resp, err := g.client.Repositories.ListReleases(context.Background(), owner, repo, lo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlo.Page++\n\n\tfor lo.Page <= resp.LastPage {\n\t\tre, _, err := g.client.Repositories.ListReleases(context.Background(), owner, repo, lo)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, r := range re {\n\t\t\treleases = append(releases, r)\n\t\t}\n\t\tlo.Page++\n\t}\n\treturn releases, nil\n}", "func (hc *Actions) ListReleases() ([]api.Stack, error) {\n\tactList := action.NewList(hc.Config)\n\treleases, err := actList.Run()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult := []api.Stack{}\n\tfor _, rel := range releases {\n\t\tresult = append(result, api.Stack{\n\t\t\tID: rel.Name,\n\t\t\tName: rel.Name,\n\t\t\tStatus: string(rel.Info.Status),\n\t\t})\n\t}\n\treturn result, nil\n}", "func (a *Agent) ListReleases(\n\tctx context.Context,\n\tnamespace string,\n\tfilter *types.ReleaseListFilter,\n) ([]*release.Release, error) {\n\tctx, span := telemetry.NewSpan(ctx, \"helm-list-releases\")\n\tdefer span.End()\n\n\ttelemetry.WithAttributes(span,\n\t\ttelemetry.AttributeKV{Key: \"namespace\", Value: namespace},\n\t)\n\n\tlsel := fmt.Sprintf(\"owner=helm,status in (%s)\", strings.Join(filter.StatusFilter, \",\"))\n\n\t// list secrets\n\tsecretList, err := a.K8sAgent.Clientset.CoreV1().Secrets(namespace).List(\n\t\tcontext.Background(),\n\t\tv1.ListOptions{\n\t\t\tLabelSelector: lsel,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, telemetry.Error(ctx, span, err, \"error getting secret list\")\n\t}\n\n\t// before decoding to helm release, only keep the latest releases for each chart\n\tlatestMap := make(map[string]corev1.Secret)\n\n\tfor _, secret := range secretList.Items {\n\t\trelName, relNameExists := secret.Labels[\"name\"]\n\n\t\tif !relNameExists {\n\t\t\tcontinue\n\t\t}\n\n\t\tid := fmt.Sprintf(\"%s/%s\", secret.Namespace, relName)\n\n\t\tif currLatest, exists := latestMap[id]; exists {\n\t\t\t// get version\n\t\t\tcurrVersionStr, currVersionExists := currLatest.Labels[\"version\"]\n\t\t\tversionStr, versionExists := secret.Labels[\"version\"]\n\n\t\t\tif versionExists && currVersionExists {\n\t\t\t\tcurrVersion, currErr := strconv.Atoi(currVersionStr)\n\t\t\t\tversion, err := strconv.Atoi(versionStr)\n\t\t\t\tif currErr == nil && err == nil && currVersion < version {\n\t\t\t\t\tlatestMap[id] = secret\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlatestMap[id] = secret\n\t\t}\n\t}\n\n\tchartList := []string{}\n\tres := make([]*release.Release, 0)\n\n\tfor _, secret := range latestMap {\n\t\trel, isErr, err := kubernetes.ParseSecretToHelmRelease(secret, chartList)\n\n\t\tif !isErr && err == nil {\n\t\t\tres = append(res, rel)\n\t\t}\n\t}\n\n\treturn res, nil\n}", "func (repo BoshDirectorRepository) GetReleases() (releases models.Releases, apiResponse net.ApiResponse) {\n\tresponse := []releaseResponse{}\n\n\tpath := \"/releases\"\n\tapiResponse = repo.gateway.GetResource(repo.config.TargetURL+path, repo.config.Username, repo.config.Password, &response)\n\tif apiResponse.IsNotSuccessful() {\n\t\treturn\n\t}\n\n\tlist := []*models.Release{}\n\tfor _, resource := range response {\n\t\tlist = append(list, resource.ToModel())\n\t}\n\treleases = models.Releases(list)\n\n\treturn\n}", "func (s *ReleaseTagService) List(ctx context.Context, releaseID int64) ([]*ReleaseTagResponse, error) {\n\tquery := \"%24filter=release/id+eq+%27\" + strconv.FormatInt(releaseID, 10) + \"%27\"\n\treturn s.GetWithQuery(ctx, query)\n}", "func releases(ctx context.Context, c *github.Client, org string, project string) ([]*release, error) {\n\tvar result []*release\n\n\topts := &github.ListOptions{PerPage: 100}\n\n\tklog.Infof(\"Downloading releases for %s/%s ...\", org, project)\n\n\tfor page := 1; page != 0; {\n\t\topts.Page = page\n\t\trs, resp, err := c.Repositories.ListReleases(ctx, org, project, opts)\n\t\tif err != nil {\n\t\t\treturn result, err\n\t\t}\n\n\t\tpage = resp.NextPage\n\t\tuntil := time.Now()\n\n\t\tfor _, r := range rs {\n\t\t\tname := r.GetName()\n\t\t\tif name == \"\" {\n\t\t\t\tname = r.GetTagName()\n\t\t\t}\n\n\t\t\trel := &release{\n\t\t\t\tName: name,\n\t\t\t\tDraft: r.GetDraft(),\n\t\t\t\tPrerelease: r.GetPrerelease(),\n\t\t\t\tPublishedAt: r.GetPublishedAt().Time,\n\t\t\t\tActiveUntil: until,\n\t\t\t\tDownloads: map[string]int{},\n\t\t\t\tDownloadRatios: map[string]float64{},\n\t\t\t}\n\n\t\t\tfor _, a := range r.Assets {\n\t\t\t\tif ignoreAssetRe.MatchString(a.GetName()) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\trel.Downloads[a.GetName()] = a.GetDownloadCount()\n\t\t\t\trel.DownloadsTotal += int64(a.GetDownloadCount())\n\t\t\t}\n\n\t\t\tif !rel.Draft && !rel.Prerelease {\n\t\t\t\tuntil = rel.PublishedAt\n\t\t\t}\n\n\t\t\tresult = append(result, rel)\n\t\t}\n\t}\n\n\tfor _, r := range result {\n\t\tr.DaysActive = r.ActiveUntil.Sub(r.PublishedAt).Hours() / 24\n\t\tr.DownloadsPerDay = float64(r.DownloadsTotal) / r.DaysActive\n\n\t\tfor k, v := range r.Downloads {\n\t\t\tr.DownloadRatios[k] = float64(v) / float64(r.DownloadsTotal)\n\t\t}\n\t}\n\n\treturn result, nil\n}", "func (c Provider) Releases(params []string) (rs *results.ResultSet, err error) {\n\t// Query the API\n\tid := strings.Join(strings.Split(params[1], \"/\"), \"%2f\")\n\turl := fmt.Sprintf(TagsEndpoint, params[0], id)\n\tvar tags Tags\n\tif err = util.FetchJSON(url, \"releases\", &tags); err != nil {\n\t\treturn\n\t}\n\trs = tags.Convert(params[0], params[1])\n\treturn\n}", "func GetAllReleases(ctx context.Context, client models.Client, opts models.ListReleasesOptions) (Releases, error) {\n\tvar (\n\t\tvariables = map[string]interface{}{\n\t\t\t\"cursor\": (*githubv4.String)(nil),\n\t\t\t\"owner\": githubv4.String(opts.Owner),\n\t\t\t\"name\": githubv4.String(opts.Repository),\n\t\t}\n\n\t\treleases = []Release{}\n\t)\n\n\tfor {\n\t\tq := &QueryListReleases{}\n\t\tif err := client.Query(ctx, q, variables); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treleases = append(releases, q.Repository.Releases.Nodes...)\n\t\tif !q.Repository.Releases.PageInfo.HasNextPage {\n\t\t\tbreak\n\t\t}\n\t\tvariables[\"cursor\"] = q.Repository.Releases.PageInfo.EndCursor\n\t}\n\n\treturn releases, nil\n}", "func cmdGetReleases(ccmd *cobra.Command, args []string) {\n\taplSvc := apl.NewClient()\n\toutput := runGetCommand(args, aplSvc.Releases.Get)\n\tif output != nil {\n\t\tfields := []string{\"ID\", \"StackID\", \"Version\", \"CreatedTime\"}\n\t\tprintTableResultsCustom(output.(apl.Release), fields)\n\t}\n}", "func (c *GitLabClient) ProjectListReleases(ctx context.Context, owner, project string, opt *gitlab.ListReleasesOptions) ([]*gitlab.Release, *gitlab.Response, error) {\n\treturn c.client.Releases.ListReleases(owner+\"/\"+project, opt, gitlab.WithContext(ctx))\n}", "func (s *ReleaseService) GetReleases(page, perPage uint) ([]*Release, error) {\n\tp := PaginateParams{}\n\tp.Limit, p.Offset = calculateLimitOffset(page, perPage)\n\treturn s.SearchReleases(\"\", \"\", p)\n}", "func (c Provider) Releases(params []string) (rs *results.ResultSet, err error) {\n\tname := params[0]\n\t// Query the API\n\turl := fmt.Sprintf(SeriesAPI, name)\n\tvar seriesList SeriesList\n\tif err = util.FetchJSON(url, \"series\", &seriesList); err != nil {\n\t\treturn\n\t}\n\t// Proccess Releases\n\tvar lrs Releases\n\tfor _, s := range seriesList.Entries {\n\t\t// Only Active Series\n\t\tif !s.Active {\n\t\t\tcontinue\n\t\t}\n\t\t// Only stable or supported\n\t\tswitch s.Status {\n\t\tcase \"Active Development\":\n\t\tcase \"Current Stable Release\":\n\t\tcase \"Supported\":\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t\turl := fmt.Sprintf(ReleasesAPI, name, s.Name)\n\t\tvar vl VersionList\n\t\tif err = util.FetchJSON(url, \"releases\", &vl); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor i := len(vl.Versions) - 1; i >= 0; i-- {\n\t\t\tr := vl.Versions[i]\n\t\t\turl := fmt.Sprintf(FilesAPI, name, s.Name, r.Number)\n\t\t\tvar fl FileList\n\t\t\tif err = util.FetchJSON(url, \"files\", &fl); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar lr Release\n\t\t\tfor _, f := range fl.Files {\n\t\t\t\tif f.Type != \"Code Release Tarball\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tlr.name = name\n\t\t\t\tlr.series = s.Name\n\t\t\t\tlr.release = r.Number\n\t\t\t\tlr.uploaded = f.Uploaded\n\t\t\t}\n\t\t\tlrs = append(lrs, lr)\n\t\t}\n\t}\n\tif len(lrs) == 0 {\n\t\terr = results.NotFound\n\t\treturn\n\t}\n\trs = lrs.Convert(name)\n\terr = nil\n\treturn\n}", "func (p *Project) Releases() []Release {\n\treturn p.releases\n}", "func (s *ReleaseService) SearchReleases(pattern string, by SortReleasesBy, params PaginateParams) ([]*Release, error) {\n\tvar (\n\t\tmethod = http.MethodGet\n\t\tpath = fmt.Sprintf(\"/releases\")\n\t)\n\n\tqueries := url.Values{}\n\t// use params only if not default values\n\tif params.Limit != 0 || params.Offset != 0 {\n\t\tqueries.Set(\"limit\", strconv.FormatUint(uint64(params.Limit), 10))\n\t\tqueries.Set(\"offset\", strconv.FormatUint(uint64(params.Offset), 10))\n\t}\n\tif pattern != \"\" {\n\t\tqueries.Set(\"pattern\", url.QueryEscape(pattern))\n\t}\n\tif by != \"\" {\n\t\tvar qString string\n\t\tif params.SortOrder != \"\" {\n\t\t\tqString = fmt.Sprintf(\"%s_%s\", by, params.SortOrder)\n\t\t} else {\n\t\t\tqString = string(by)\n\t\t}\n\t\tqueries.Set(\"sort\", qString)\n\t}\n\n\treq := s.client.newRequest(path, method)\n\treq.URL.RawQuery = queries.Encode()\n\n\tjs, statusCode, err := s.client.do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch js.Status {\n\tcase \"success\":\n\t\tbreak\n\tcase \"fail\":\n\t\tjF, ok := js.Data.(*jSendFailData)\n\t\tif !ok {\n\t\t\treturn nil, ErrRESTServerError\n\t\t}\n\t\ts.client.Logger.Printf(\"%+v\", jF)\n\t\tswitch statusCode {\n\t\tcase http.StatusBadRequest:\n\t\t\tswitch jF.ErrorReason {\n\t\t\tcase \"limit\":\n\t\t\t\tfallthrough\n\t\t\tcase \"offset\":\n\t\t\t\tfallthrough\n\t\t\tdefault:\n\t\t\t}\n\t\t\tfallthrough\n\t\tdefault:\n\t\t\treturn nil, ErrRESTServerError\n\t\t}\n\tcase \"error\":\n\t\treturn nil, ErrRESTServerError\n\tdefault:\n\t\tswitch statusCode {\n\t\tcase http.StatusUnauthorized:\n\t\t\treturn nil, ErrAccessDenied\n\t\tcase http.StatusInternalServerError:\n\t\t\tfallthrough\n\t\tdefault:\n\t\t\treturn nil, ErrRESTServerError\n\t\t}\n\t}\n\n\treleases := make([]*Release, 0)\n\tdata, ok := js.Data.(*json.RawMessage)\n\tif !ok {\n\t\treturn nil, ErrRESTServerError\n\t}\n\terr = json.Unmarshal(*data, &releases)\n\tif err != nil {\n\t\treturn nil, ErrRESTServerError\n\t}\n\treturn releases, nil\n}", "func (c *Client) List() ([]*release.Release, error) {\n\tlist := action.NewList(c.actionConfig)\n\treturn list.Run()\n}", "func (operator *AccessOperator) ListReleaseByApp(cxt context.Context, appName, cfgsetName string) ([]*common.Release, error) {\n\t//query business and app first\n\tbusiness, app, err := getBusinessAndApp(operator, operator.Business, appName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfigSet, err := operator.innergetConfigSet(cxt, business.Bid, app.Appid, cfgsetName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif configSet == nil {\n\t\treturn nil, nil\n\t}\n\n\trequest := &accessserver.QueryHistoryReleasesReq{\n\t\tSeq: pkgcommon.Sequence(),\n\t\tBid: business.Bid,\n\t\tCfgsetid: configSet.Cfgsetid,\n\t\t//fix: list all release\n\t\t//Operator: operator.User,\n\t\tIndex: operator.index,\n\t\tLimit: operator.limit,\n\t}\n\tgrpcOptions := []grpc.CallOption{\n\t\tgrpc.WaitForReady(true),\n\t}\n\tresponse, err := operator.Client.QueryHistoryReleases(cxt, request, grpcOptions...)\n\tif err != nil {\n\t\tlogger.V(3).Infof(\"ListReleaseByApp failed, %s\", err.Error())\n\t\treturn nil, err\n\t}\n\tif response.ErrCode != common.ErrCode_E_OK {\n\t\tlogger.V(3).Infof(\"ListReleaseByApp all successfully, but response Err, %s\", response.ErrMsg)\n\t\treturn nil, fmt.Errorf(\"%s\", response.ErrMsg)\n\t}\n\treturn response.Releases, nil\n}", "func (c Provider) Releases(params []string) (rs *results.ResultSet, err error) {\n\tname := params[0]\n\tclient, err := ftp.Dial(MirrorsFTP)\n\tif err != nil {\n\t\tlog.Debugf(\"Failed to connect to FTP server: %s\\n\", err)\n\t\terr = results.Unavailable\n\t\treturn\n\t}\n\tif err = client.Login(\"anonymous\", \"anonymous\"); err != nil {\n\t\tlog.Debugf(\"Failed to login to FTP server: %s\\n\", err)\n\t\terr = results.Unavailable\n\t\treturn\n\t}\n\tentries, err := client.List(\"gnu\" + \"/\" + name)\n\tif err != nil {\n\t\tlog.Debugf(\"FTP Error: %s\\n\", err.Error())\n\t\terr = results.NotFound\n\t\treturn\n\t}\n\trs = results.NewResultSet(name)\n\tfor _, entry := range entries {\n\t\tif entry.Type != ftp.EntryTypeFile {\n\t\t\tcontinue\n\t\t}\n\t\tif sm := TarballRegex.FindStringSubmatch(entry.Name); len(sm) > 2 {\n\t\t\tr := results.NewResult(sm[1], sm[2], fmt.Sprintf(GNUFormat, name, entry.Name), entry.Time)\n\t\t\trs.AddResult(r)\n\t\t}\n\t}\n\tif rs.Len() == 0 {\n\t\terr = results.NotFound\n\t}\n\tsort.Sort(rs)\n\treturn\n}", "func (m *MockGithubAssetClient) ListReleases(ctx context.Context, opt *github.ListOptions) ([]*github.RepositoryRelease, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListReleases\", ctx, opt)\n\tret0, _ := ret[0].([]*github.RepositoryRelease)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (c Provider) Releases(name string) (rs *results.ResultSet, s results.Status) {\n\trs, s = c.GetReleases(name, 100)\n\treturn\n}", "func (f *Factory) findReleases(ctx context.Context, u *url.URL) ([]*claircore.Distribution, error) {\n\tdir, err := u.Parse(\"dists/\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"debian: unable to construct URL: %w\", err)\n\t}\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, dir.String(), nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"debian: unable to construct request: %w\", err)\n\t}\n\tres, err := f.c.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"debian: unable to do request: %w\", err)\n\t}\n\tdefer res.Body.Close()\n\tswitch res.StatusCode {\n\tcase http.StatusOK:\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"debian: unexpected status fetching %q: %s\", dir.String(), res.Status)\n\t}\n\tvar buf bytes.Buffer\n\tif _, err := buf.ReadFrom(res.Body); err != nil {\n\t\treturn nil, fmt.Errorf(\"debian: unable to read dists listing: %w\", err)\n\t}\n\tms := linkRegexp.FindAllStringSubmatch(buf.String(), -1)\n\n\tvar todos []*claircore.Distribution\nListing:\n\tfor _, m := range ms {\n\t\tdist := m[1]\n\t\tswitch {\n\t\tcase dist == \"\":\n\t\t\tcontinue\n\t\tcase dist[0] == '/', dist[0] == '?':\n\t\t\tcontinue\n\t\t}\n\t\tfor _, s := range skipList {\n\t\t\tif strings.Contains(dist, s) {\n\t\t\t\tcontinue Listing\n\t\t\t}\n\t\t}\n\t\tdist = strings.Trim(dist, \"/\")\n\t\trf, err := dir.Parse(path.Join(dist, `Release`))\n\t\tif err != nil {\n\t\t\tzlog.Info(ctx).\n\t\t\t\tErr(err).\n\t\t\t\tStringer(\"context\", dir).\n\t\t\t\tStr(\"target\", path.Join(dist, `Release`)).\n\t\t\t\tMsg(\"unable to construct URL\")\n\t\t\tcontinue\n\t\t}\n\t\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, rf.String(), nil)\n\t\tif err != nil {\n\t\t\tzlog.Info(ctx).\n\t\t\t\tErr(err).\n\t\t\t\tStringer(\"url\", rf).\n\t\t\t\tMsg(\"unable to construct request\")\n\t\t\tcontinue\n\t\t}\n\t\treq.Header.Set(\"range\", \"bytes=0-512\")\n\t\tres, err := f.c.Do(req)\n\t\tif err != nil {\n\t\t\tzlog.Info(ctx).\n\t\t\t\tErr(err).\n\t\t\t\tStringer(\"url\", rf).\n\t\t\t\tMsg(\"unable to do request\")\n\t\t\tcontinue\n\t\t}\n\t\tbuf.Reset()\n\t\tbuf.ReadFrom(res.Body)\n\t\tres.Body.Close()\n\t\tswitch res.StatusCode {\n\t\tcase http.StatusPartialContent, http.StatusOK:\n\t\tcase http.StatusNotFound: // Probably extremely old, it's fine.\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tzlog.Info(ctx).\n\t\t\t\tStr(\"status\", res.Status).\n\t\t\t\tStringer(\"url\", rf).\n\t\t\t\tMsg(\"unexpected response\")\n\t\t\tcontinue\n\t\t}\n\t\ttp := textproto.NewReader(bufio.NewReader(io.MultiReader(&buf, bytes.NewReader([]byte(\"\\r\\n\\r\\n\")))))\n\t\th, err := tp.ReadMIMEHeader()\n\t\tif err != nil {\n\t\t\tzlog.Info(ctx).Err(err).Msg(\"unable to read MIME-ish headers\")\n\t\t\tcontinue\n\t\t}\n\t\tsv := h.Get(\"Version\")\n\t\tif sv == \"\" {\n\t\t\tzlog.Debug(ctx).Str(\"dist\", dist).Msg(\"no version assigned, skipping\")\n\t\t\tcontinue\n\t\t}\n\t\tvs := strings.Split(sv, \".\")\n\t\tif len(vs) == 1 {\n\t\t\tzlog.Debug(ctx).Str(\"dist\", dist).Msg(\"no version assigned, skipping\")\n\t\t\tcontinue\n\t\t}\n\t\tver, err := strconv.ParseInt(vs[0], 10, 32)\n\t\tif err != nil {\n\t\t\tzlog.Info(ctx).Err(err).Msg(\"unable to parse version\")\n\t\t\tcontinue\n\t\t}\n\n\t\ttodos = append(todos, mkDist(dist, int(ver)))\n\t}\n\n\treturn todos, nil\n}", "func GetReleases(sandboxName string) ([]Release, error) {\n\treturn getReleases(sandboxName)\n}", "func BeeReleasesInfo() (repos []Releases) {\n\tvar url = \"https://api.github.com/repos/beego/bee/releases\"\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tbeeLogger.Log.Warnf(\"Get bee releases from github error: %s\", err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tbodyContent, _ := ioutil.ReadAll(resp.Body)\n\tif err = json.Unmarshal(bodyContent, &repos); err != nil {\n\t\tbeeLogger.Log.Warnf(\"Unmarshal releases body error: %s\", err)\n\t\treturn\n\t}\n\treturn\n}", "func (h *Helm3Client) ListReleasesNames(labelSelector map[string]string) ([]string, error) {\n\tlabelsSet := make(kblabels.Set)\n\tfor k, v := range labelSelector {\n\t\tlabelsSet[k] = v\n\t}\n\tlabelsSet[\"owner\"] = \"helm\"\n\n\tlist, err := h.KubeClient.CoreV1().\n\t\tSecrets(h.Namespace).\n\t\tList(context.TODO(), metav1.ListOptions{LabelSelector: labelsSet.AsSelector().String()})\n\tif err != nil {\n\t\th.LogEntry.Debugf(\"helm: list of releases ConfigMaps failed: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tuniqNamesMap := make(map[string]struct{})\n\tfor _, secret := range list.Items {\n\t\treleaseName, hasKey := secret.Labels[\"name\"]\n\t\tif hasKey && releaseName != \"\" {\n\t\t\tuniqNamesMap[releaseName] = struct{}{}\n\t\t}\n\t}\n\n\t// Do not return ignored release.\n\tdelete(uniqNamesMap, app.HelmIgnoreRelease)\n\n\tuniqNames := make([]string, 0)\n\tfor name := range uniqNamesMap {\n\t\tuniqNames = append(uniqNames, name)\n\t}\n\n\tsort.Strings(uniqNames)\n\treturn uniqNames, nil\n}", "func (c *Client) AllReleases() ([]db.Release, error) {\n\tnames, err := c.names()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tuiprogress.Start()\n\tbar := uiprogress.AddBar(len(names))\n\tbar.PrependFunc(func(b *uiprogress.Bar) string {\n\t\trate := float64(b.Current()) / b.TimeElapsed().Seconds()\n\t\tremainingCount := b.Total - b.Current()\n\t\tremainingTime := time.Duration(float64(remainingCount)/rate) * time.Second\n\n\t\treturn fmt.Sprintf(\n\t\t\t\"%v left (%.f/s)\",\n\t\t\tremainingTime,\n\t\t\trate,\n\t\t)\n\t})\n\treleases := make(chan db.Release)\n\tc.addReleases(names, releases, bar)\n\tclose(releases)\n\treturn releaseChanToSlice(releases), nil\n}", "func (s *ReleaseTagService) ListByCommit(ctx context.Context, commit string) ([]*ReleaseTagResponse, error) {\n\tquery := \"%24filter=release/commit+eq+%27\" + commit + \"%27\"\n\treturn s.GetWithQuery(ctx, query)\n}", "func (m *MockRepoClient) ListReleases() ([]clients.Release, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListReleases\")\n\tret0, _ := ret[0].([]clients.Release)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (s *Services) Releases(ctx context.Context, request *proto.ReleasesRequest) (*proto.ReleasesResponse, error) {\n\tvar result []models.Release\n\tvar resultCount int64\n\tquery := s.DB\n\n\tif request != nil && request.Query != nil {\n\t\tif request.Query.AnimeId != 0 {\n\t\t\tquery = query.Where(\"anime_id = ?\", request.Query.AnimeId)\n\t\t}\n\n\t\tif request.Query.Title != \"\" {\n\t\t\tquery = models.WhereFieldLikeString(\n\t\t\t\tquery,\n\t\t\t\tfmt.Sprintf(`\"%s\".title`, models.Release.TableName(models.Release{})),\n\t\t\t\trequest.Query.Title,\n\t\t\t)\n\t\t}\n\n\t\tif len(request.Query.Genres) > 0 {\n\t\t\t// This JOIN method is use of Correlated Subqueries when the foreign key is indexed, (good explanation on the link below)\n\t\t\t// https://www.periscopedata.com/blog/4-ways-to-join-only-the-first-row-in-sql\n\n\t\t\t// SELECT \"Releases\".* FROM \"Releases\" INNER JOIN (\n\t\t\t// \tSELECT * FROM \"Releases\" AS \"Release\" WHERE (\n\t\t\t// \t\tSELECT \"release_id\" FROM public.\"ReleaseGenres\" WHERE (\n\t\t\t// \t\t\tpublic.\"ReleaseGenres\".genre_id IN (1,4)) AND \"Release\".id = public.\"ReleaseGenres\".release_id LIMIT 1\n\t\t\t// ) IS NOT NULL) AS \"Release\" ON public.\"Releases\" .id = \"Release\".id\n\n\t\t\tquery = query.Joins(\n\t\t\t\tfmt.Sprintf(`INNER JOIN ( \n\t\t\t\t\tSELECT * FROM \"%s\" AS \"Release\" WHERE (\n\t\t\t\t\t\tSELECT \"release_id\" FROM public.\"ReleaseGenres\" WHERE (\n\t\t\t\t\t\t\tpublic.\"ReleaseGenres\".genre_id IN (?)) AND \"Release\".id = public.\"ReleaseGenres\".release_id LIMIT 1\n\t\t\t\t\t\t) IS NOT NULL) AS \"Release\" ON public.\"%s\" .id = \"Release\".id`,\n\t\t\t\t\tmodels.Release.TableName(models.Release{}),\n\t\t\t\t\tmodels.Release.TableName(models.Release{}),\n\t\t\t\t),\n\t\t\t\trequest.Query.Genres,\n\t\t\t)\n\t\t}\n\n\t\tif request.Query.Limit != 0 {\n\t\t\tquery = query.Limit(request.Query.Limit)\n\t\t}\n\n\t\tif request.Query.Offset != 0 {\n\t\t\tquery = query.Offset(request.Query.Offset)\n\t\t}\n\t}\n\n\tif err := query.Find(&result).Limit(nil).Offset(nil).Count(&resultCount).Error; err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\n\tfinalRes := []*proto.Release{}\n\n\tfor i := range result {\n\t\tfinalRes = append(finalRes, result[i].ToProto())\n\t}\n\n\treturn &proto.ReleasesResponse{Releases: finalRes, Count: resultCount}, nil\n}", "func repoList(w http.ResponseWriter, r *http.Request) {}", "func GetReleases(dbOwner, dbFolder, dbName string) (releases map[string]ReleaseEntry, err error) {\n\tdbQuery := `\n\t\tSELECT release_list\n\t\tFROM sqlite_databases\n\t\tWHERE user_id = (\n\t\t\t\tSELECT user_id\n\t\t\t\tFROM users\n\t\t\t\tWHERE lower(user_name) = lower($1)\n\t\t\t)\n\t\t\tAND folder = $2\n\t\t\tAND db_name = $3`\n\terr = pdb.QueryRow(dbQuery, dbOwner, dbFolder, dbName).Scan(&releases)\n\tif err != nil {\n\t\tlog.Printf(\"Error when retrieving releases for database '%s%s%s': %v\\n\", dbOwner, dbFolder, dbName, err)\n\t\treturn nil, err\n\t}\n\tif releases == nil {\n\t\t// If there aren't any releases yet, return an empty set instead of nil\n\t\treleases = make(map[string]ReleaseEntry)\n\t}\n\treturn releases, nil\n}", "func ListDeployments(filter *string, kubeConfig []byte) (*rls.ListReleasesResponse, error) {\n\tdefer tearDown()\n\thClient, err := GetHelmClient(kubeConfig)\n\t// TODO doc the options here\n\tvar sortBy = int32(2)\n\tvar sortOrd = int32(1)\n\tops := []helm.ReleaseListOption{\n\t\thelm.ReleaseListSort(sortBy),\n\t\thelm.ReleaseListOrder(sortOrd),\n\t\t//helm.ReleaseListLimit(limit),\n\t\t//helm.ReleaseListFilter(filter),\n\t\t//helm.ReleaseListStatuses(codes),\n\t\t//helm.ReleaseListNamespace(\"\"),\n\t}\n\tif filter != nil {\n\t\tops = append(ops, helm.ReleaseListFilter(*filter))\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := hClient.ListReleases(ops...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (c Provider) Releases(name string) (rs *results.ResultSet, s results.Status) {\n\tr, s := c.Latest(name)\n\tif s != results.OK {\n\t\treturn\n\t}\n\trs = results.NewResultSet(name)\n\trs.AddResult(r)\n\treturn\n}", "func (c Provider) Releases(name string) (rs *results.ResultSet, s results.Status) {\n\tclient, err := ftp.Dial(MirrorsFTP)\n\tif err != nil {\n\t\ts = results.Unavailable\n\t\treturn\n\t}\n\terr = client.Login(\"anonymous\", \"anonymous\")\n\tif err != nil {\n\t\ts = results.Unavailable\n\t\treturn\n\t}\n\tentries, err := client.List(\"gnu\" + \"/\" + name)\n\tif err != nil {\n\t\tfmt.Printf(\"FTP Error: %s\\n\", err.Error())\n\t\ts = results.NotFound\n\t\treturn\n\t}\n\trs = results.NewResultSet(name)\n\tfor _, entry := range entries {\n\t\tif entry.Type != ftp.EntryTypeFile {\n\t\t\tcontinue\n\t\t}\n\t\tsm := TarballRegex.FindStringSubmatch(entry.Name)\n\t\tif len(sm) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tr := results.NewResult(sm[1], sm[2], fmt.Sprintf(GNUFormat, name, entry.Name), entry.Time)\n\t\trs.AddResult(r)\n\t\ts = results.OK\n\t}\n\treturn\n}", "func (c *FakeReleaseHistories) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ReleaseHistoryList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(releasehistoriesResource, releasehistoriesKind, c.ns, opts), &v1alpha1.ReleaseHistoryList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1alpha1.ReleaseHistoryList{ListMeta: obj.(*v1alpha1.ReleaseHistoryList).ListMeta}\n\tfor _, item := range obj.(*v1alpha1.ReleaseHistoryList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}", "func compListReleases(toComplete string, ignoredReleaseNames []string, cfg *action.Configuration) ([]string, cobra.ShellCompDirective) {\n\tcobra.CompDebugln(fmt.Sprintf(\"compListReleases with toComplete %s\", toComplete), settings.Debug)\n\n\tclient := action.NewList(cfg)\n\tclient.All = true\n\tclient.Limit = 0\n\t// Do not filter so as to get the entire list of releases.\n\t// This will allow zsh and fish to match completion choices\n\t// on other criteria then prefix. For example:\n\t// helm status ingress<TAB>\n\t// can match\n\t// helm status nginx-ingress\n\t//\n\t// client.Filter = fmt.Sprintf(\"^%s\", toComplete)\n\n\tclient.SetStateMask()\n\treleases, err := client.Run()\n\tif err != nil {\n\t\treturn nil, cobra.ShellCompDirectiveDefault\n\t}\n\n\tvar choices []string\n\tfilteredReleases := filterReleases(releases, ignoredReleaseNames)\n\tfor _, rel := range filteredReleases {\n\t\tchoices = append(choices,\n\t\t\tfmt.Sprintf(\"%s\\t%s-%s -> %s\", rel.Name, rel.Chart.Metadata.Name, rel.Chart.Metadata.Version, rel.Info.Status.String()))\n\t}\n\n\treturn choices, cobra.ShellCompDirectiveNoFileComp\n}", "func (c *Client) ListAssets(ctx context.Context, releaseID int64) ([]*github.ReleaseAsset, error) {\n\tresult := []*github.ReleaseAsset{}\n\tpage := 1\n\n\tfor {\n\t\tassets, res, err := c.Repositories.ListReleaseAssets(context.TODO(), c.Owner, c.Repo, releaseID, &github.ListOptions{Page: page})\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to list assets\")\n\t\t}\n\n\t\tif res.StatusCode != http.StatusOK {\n\t\t\treturn nil, errors.Errorf(\"list release assets: invalid status code: %s\", res.Status)\n\t\t}\n\n\t\tresult = append(result, assets...)\n\n\t\tif res.NextPage <= page {\n\t\t\tbreak\n\t\t}\n\n\t\tpage = res.NextPage\n\t}\n\n\treturn result, nil\n}", "func (g *Gatherer) ListReleaseNotes() (*ReleaseNotes, error) {\n\t// Load map providers\n\tmapProviders := []MapProvider{}\n\tfor _, initString := range g.options.MapProviderStrings {\n\t\tprovider, err := NewProviderFromInitString(initString)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"while getting release notes map providers: %w\", err)\n\t\t}\n\t\tmapProviders = append(mapProviders, provider)\n\t}\n\n\tcommits, err := g.listCommits(g.options.Branch, g.options.StartSHA, g.options.EndSHA)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"listing commits: %w\", err)\n\t}\n\n\t// Get the PRs into a temporary results set\n\tresultsTemp, err := g.gatherNotes(commits)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"gathering notes: %w\", err)\n\t}\n\n\t// Cycle the results and add the complete notes, as well as those that\n\t// have a map associated with it\n\tresults := []*Result{}\n\tlogrus.Info(\"Checking PRs for mapped data\")\n\tfor _, res := range resultsTemp {\n\t\t// If the PR has no release note, check if we have to add it\n\t\tif MatchesExcludeFilter(*res.pullRequest.Body) {\n\t\t\tfor _, provider := range mapProviders {\n\t\t\t\tnoteMaps, err := provider.GetMapsForPR(res.pullRequest.GetNumber())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\t\t\"checking if a map exists for PR %d: %w\", res.pullRequest.GetNumber(),\n\t\t\t\t\t\terr)\n\t\t\t\t}\n\t\t\t\tif len(noteMaps) != 0 {\n\t\t\t\t\tlogrus.Infof(\n\t\t\t\t\t\t\"Artificially adding pr #%d because a map for it was found\",\n\t\t\t\t\t\tres.pullRequest.GetNumber(),\n\t\t\t\t\t)\n\t\t\t\t\tresults = append(results, res)\n\t\t\t\t} else {\n\t\t\t\t\tlogrus.Debugf(\n\t\t\t\t\t\t\"Skipping PR #%d because it contains no release note\",\n\t\t\t\t\t\tres.pullRequest.GetNumber(),\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// Append the note as it is\n\t\t\tresults = append(results, res)\n\t\t}\n\t}\n\n\tdedupeCache := map[string]struct{}{}\n\tnotes := NewReleaseNotes()\n\tfor _, result := range results {\n\t\tif g.options.RequiredAuthor != \"\" {\n\t\t\tif result.commit.GetAuthor().GetLogin() != g.options.RequiredAuthor {\n\t\t\t\tlogrus.Infof(\n\t\t\t\t\t\"Skipping release note for PR #%d because required author %q does not match with %q\",\n\t\t\t\t\tresult.pullRequest.GetNumber(), g.options.RequiredAuthor, result.commit.GetAuthor().GetLogin(),\n\t\t\t\t)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tnote, err := g.ReleaseNoteFromCommit(result)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\n\t\t\t\t\"Getting the release note from commit %s (PR #%d): %v\",\n\t\t\t\tresult.commit.GetSHA(),\n\t\t\t\tresult.pullRequest.GetNumber(),\n\t\t\t\terr)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Query our map providers for additional data for the release note\n\t\tfor _, provider := range mapProviders {\n\t\t\tnoteMaps, err := provider.GetMapsForPR(result.pullRequest.GetNumber())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"error while looking up note map: %w\", err)\n\t\t\t}\n\n\t\t\tfor _, noteMap := range noteMaps {\n\t\t\t\tif err := note.ApplyMap(noteMap, g.options.AddMarkdownLinks); err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"applying notemap for PR #%d: %w\", result.pullRequest.GetNumber(), err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif _, ok := dedupeCache[note.Markdown]; !ok {\n\t\t\tnotes.Set(note.PrNumber, note)\n\t\t\tdedupeCache[note.Markdown] = struct{}{}\n\t\t}\n\t}\n\treturn notes, nil\n}", "func (r *queryResolver) GithubReleases(ctx context.Context, after *entgql.Cursor[int], first *int, before *entgql.Cursor[int], last *int, orderBy *ent.GithubReleaseOrder, where *ent.GithubReleaseWhereInput) (*ent.GithubReleaseConnection, error) {\n\treturn ent.FromContext(ctx).GithubRelease.Query().Paginate(\n\t\tctx, after, first, before, last,\n\t\tent.WithGithubReleaseOrder(orderBy),\n\t\tent.WithGithubReleaseFilter(where.Filter),\n\t)\n}", "func (a *Agent) GetReleaseHistory(\n\tctx context.Context,\n\tname string,\n) ([]*release.Release, error) {\n\tctx, span := telemetry.NewSpan(ctx, \"helm-get-release-history\")\n\tdefer span.End()\n\n\ttelemetry.WithAttributes(span,\n\t\ttelemetry.AttributeKV{Key: \"name\", Value: name},\n\t)\n\n\tcmd := action.NewHistory(a.ActionConfig)\n\n\treturn cmd.Run(name)\n}", "func parseReleasesAPI() (releases, error) {\n\tr, err := http.Get(\"https://api.github.com/repos/eze-kiel/shaloc/releases\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Body.Close()\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar rel releases\n\tif err = json.Unmarshal(body, &rel); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn rel, nil\n}", "func handleRepo(ctx context.Context, client *github.Client, repo *github.Repository) (*release, error) {\n\tif !in(orgs, repo.GetOwner().GetLogin()) {\n\t\t// return early\n\t\treturn nil, nil\n\t}\n\topt := &github.ListOptions{\n\t\tPage: 1,\n\t\tPerPage: 100,\n\t}\n\n\treleases, resp, err := client.Repositories.ListReleases(ctx, repo.GetOwner().GetLogin(), repo.GetName(), opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden || err != nil {\n\t\tif _, ok := err.(*github.RateLimitError); ok {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Skip it because there is no release.\n\t\treturn nil, nil\n\t}\n\tif err != nil || len(releases) < 1 {\n\t\treturn nil, err\n\t}\n\n\trl := release{\n\t\tRepository: repo,\n\t}\n\t// Get information about the binary assets for linux-amd64\n\tarch := \"linux-amd64\"\n\tfor i := 0; i < len(releases); i++ {\n\t\tr := releases[i]\n\t\tif rl.Release == nil && !r.GetDraft() {\n\t\t\t// If this is the latest release and it's not a draft make it the one\n\t\t\t// to return\n\t\t\trl.Release = r\n\t\t\tfor _, asset := range r.Assets {\n\t\t\t\trl.BinaryDownloadCount += asset.GetDownloadCount()\n\t\t\t\tif strings.HasSuffix(asset.GetName(), arch) {\n\t\t\t\t\trl.BinaryURL = asset.GetBrowserDownloadURL()\n\t\t\t\t\trl.BinaryName = asset.GetName()\n\t\t\t\t\trl.BinarySince = units.HumanDuration(time.Since(asset.GetCreatedAt().Time))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif strings.HasSuffix(asset.GetName(), arch+\".sha256\") {\n\t\t\t\t\tc, err := getURLContent(asset.GetBrowserDownloadURL())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\trl.BinarySHA256 = c\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif strings.HasSuffix(asset.GetName(), arch+\".md5\") {\n\t\t\t\t\tc, err := getURLContent(asset.GetBrowserDownloadURL())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\trl.BinaryMD5 = c\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfor _, asset := range r.Assets {\n\t\t\t\trl.BinaryDownloadCount += asset.GetDownloadCount()\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &rl, nil\n}", "func NewReleasesCommand() *cobra.Command {\n\n\tcmd := createListCommand(cmdListReleases, \"releases\", \"\")\n\tcmd.Flags().StringVar(&releaseParams.Name, \"name\", \"\", \"Filter releases by name\")\n\tcmd.Flags().StringVar(&releaseParams.Version, \"version\", \"\", \"Filter releases by version\")\n\tcmd.Flags().StringVar(&releaseParams.StackID, \"stack-id\", \"\", \"Filter releases by stack_id\")\n\tcmd.Flags().StringVar(&releaseParams.StackVersionID, \"stack-version-id\", \"\", \"Filter releases by stack_version_id\")\n\tcmd.Flags().StringVar(&releaseParams.ProjectID, \"project-id\", \"\", \"Filter releases by project_id\")\n\tcmd.Flags().StringVar(&releaseParams.LocImageID, \"loc-image-id\", \"\", \"Filter releases by loc_image_id\")\n\tcmd.Flags().StringVar(&releaseParams.BuildStatus, \"build-status\", \"\", \"Filter releases by build_status\")\n\n\t// Get\n\tgetCmd := createGetCommand(cmdGetReleases, \"release\", \"\")\n\tcmd.AddCommand(getCmd)\n\n\t// Create\n\tcreateCmd := NewReleasesCreateCommand()\n\tcmd.AddCommand(createCmd)\n\n\t// Delete\n\tdeleteCmd := createDeleteCommand(cmdDeleteReleases, \"release\", \"\")\n\tcmd.AddCommand(deleteCmd)\n\n\treturn cmd\n}", "func printReleases(m map[string]string) string {\n\trelease := parseUrlforStu(m[\"release\"])\n\tvar output string\n\tfor _, thing := range release.Items {\n\t\toutput += strFormatOut(thing)\n\t\toutput += \"\\n\"\n\t}\n\treturn output\n}", "func (c *singleClient) ListRepository(repo string) ([]string, error) {\n\treturn c.doList(repo, func(repo string, filter ListFilter) (\n\t\ttagmodels.ListResponse, error) {\n\n\t\treturn c.ListRepositoryWithPagination(repo, filter)\n\t})\n}", "func (d *RetryDownloader) GetReleases() ([]*Release, error) {\n\tvar (\n\t\treleases []*Release\n\t\terr error\n\t)\n\n\terr = d.retry(func() error {\n\t\treleases, err = d.Downloader.GetReleases()\n\t\treturn err\n\t})\n\n\treturn releases, err\n}", "func GetRelease(cmd *cobra.Command, args []string) {\n\treq := &helmmanager.ListReleaseReq{}\n\n\tif !flagAll {\n\t\treq.Size = common.GetUint32P(uint32(flagNum))\n\t}\n\tif len(args) > 0 {\n\t\treq.Size = common.GetUint32P(1)\n\t\treq.Name = common.GetStringP(args[0])\n\t}\n\treq.ClusterID = &flagCluster\n\treq.Namespace = &flagNamespace\n\n\tc := newClientWithConfiguration()\n\tr, err := c.Release().List(cmd.Context(), req)\n\tif err != nil {\n\t\tfmt.Printf(\"get release failed, %s\\n\", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tif flagOutput == outputTypeJson {\n\t\tprinter.PrintReleaseInJson(r)\n\t\treturn\n\t}\n\n\tprinter.PrintReleaseInTable(flagOutput == outputTypeWide, r)\n}", "func (c *CompanyValuation) PressReleases(req objects.RequestPressReleases) (prList []objects.PressReleases, err error) {\n\tdata, err := c.Client.Get(\n\t\tfmt.Sprintf(urlAPICompanyValuationPressReleases, req.Symbol),\n\t\tmap[string]string{\"limit\": fmt.Sprint(req.Limit)},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = json.Unmarshal(data.Body(), &prList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn prList, nil\n}", "func NewRepoListReleasesOK() *RepoListReleasesOK {\n\treturn &RepoListReleasesOK{}\n}", "func (api *RestAPI) GetRelease(epicID string) ([]ReleaseItem, error) {\n\tresults := []ReleaseItem{}\n\tissue, err := api.getIssue(epicID)\n\tif err != nil {\n\t\treturn results, err\n\t}\n\n\tscanner := bufio.NewScanner(strings.NewReader(issue.Fields.Description.(string)))\n\tfor scanner.Scan() {\n\t\tline := strings.ToLower(scanner.Text())\n\t\tif strings.Contains(line, \"/app#/projects\") {\n\t\t\tparts := strings.Split(line, \"/\")\n\t\t\tresults = append(results, ReleaseItem{Project: parts[5], Version: parts[7]})\n\t\t}\n\t}\n\treturn results, nil\n}", "func getReleaseCommits(g *u.GithubClient, owner, repo, branch, branchRange string) ([]*github.RepositoryCommit, string, string, error) {\n\t// Get start and release tag/commit based on input branch range\n\tstartTag, releaseTag, err := determineRange(g, owner, repo, branch, branchRange)\n\tif err != nil {\n\t\treturn nil, \"\", \"\", fmt.Errorf(\"failed to determine branch range: %v\", err)\n\t}\n\n\t// Get all tags in the repository\n\ttags, err := g.ListAllTags(owner, repo)\n\tif err != nil {\n\t\treturn nil, \"\", \"\", fmt.Errorf(\"failed to fetch repo tags: %v\", err)\n\t}\n\n\t// Get commits for specified branch and range\n\ttStart, err := g.GetCommitDate(owner, repo, startTag, tags)\n\tif err != nil {\n\t\treturn nil, \"\", \"\", fmt.Errorf(\"failed to get start commit date for %s: %v\", startTag, err)\n\t}\n\ttEnd, err := g.GetCommitDate(owner, repo, releaseTag, tags)\n\tif err != nil {\n\t\treturn nil, \"\", \"\", fmt.Errorf(\"failed to get release commit date for %s: %v\", releaseTag, err)\n\t}\n\n\treleaseCommits, err := g.ListAllCommits(owner, repo, branch, tStart, tEnd)\n\tif err != nil {\n\t\treturn nil, \"\", \"\", fmt.Errorf(\"failed to fetch release repo commits: %v\", err)\n\t}\n\n\treturn releaseCommits, startTag, releaseTag, nil\n}", "func (s *RepositoryClient) ListRepository(opt *RepositoryQuery) ([]RepoRecord, error) {\n\tvar v []RepoRecord\n\tresp, _, errs := s.NewRequest(gorequest.GET, \"\").\n\t\tQuery(*opt).\n\t\tEndStruct(&v)\n\treturn v, CheckResponse(errs, resp, 200)\n}", "func (s *Services) ReleaseDescriptions(ctx context.Context, request *proto.ReleaseDescriptionsRequest) (*proto.ReleaseDescriptionsResponse, error) {\n\tvar result []models.ReleaseDescription\n\tquery := s.DB\n\n\tquery = query.Where(\"release_id = ?\", request.ReleaseId)\n\n\tif err := query.Find(&result).Error; err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\n\tfinalRes := []*proto.ReleaseDescription{}\n\n\tfor i := range result {\n\t\tfinalRes = append(finalRes, result[i].ToProto())\n\t}\n\n\treturn &proto.ReleaseDescriptionsResponse{ReleaseDescriptions: finalRes}, nil\n}", "func (c *FakeClient) ReleaseHistory(rlsName string, max int) ([]*release.Release, error) {\n\treturn c.Rels, nil\n}", "func (c *Config) CheckReleases(readOnly bool) ([]ReleaseList, error) {\n\tif c == nil || c.client == nil {\n\t\treturn nil, errors.New(\"uninitialized client\")\n\t}\n\n\tif err := c.loadStateFile(); err != nil {\n\t\treturn nil, errors.Wrap(err, \"cannot load state file\")\n\t}\n\n\tnewReleases := make(chan ReleaseList)\n\trepoQ := make(chan RepoConfig)\n\tctx := context.Background()\n\n\t// Launch workers\n\tfor i := 0; i < releaseWorkerCount; i++ {\n\t\tgo c.checkReleaseWorker(ctx, i+1, repoQ, newReleases)\n\t}\n\n\t// Queue jobs\n\tgo func() {\n\t\tfor _, r := range c.Repositories {\n\t\t\trepoQ <- r\n\t\t}\n\t\tclose(repoQ)\n\t}()\n\n\t// Collect results\n\tvar newReleaseList []ReleaseList\n\tfor resultCount := len(c.Repositories); resultCount > 0; {\n\t\trel := <-newReleases\n\t\tresultCount--\n\n\t\tif len(rel) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Queue the release for states updates\n\t\tnewReleaseList = append(newReleaseList, rel)\n\t}\n\n\t// Leave now if the result list is empty or if we don't need to save them\n\tif len(newReleaseList) == 0 || readOnly {\n\t\treturn newReleaseList, nil\n\t}\n\n\t// Update repository states\n\tfor _, s := range newReleaseList {\n\t\t// Update states\n\t\tif c.states == nil {\n\t\t\trm := make(map[string]RepoState)\n\t\t\tc.states = &States{Repositories: rm}\n\t\t}\n\t\tc.states.Repositories[s[0].Repo] = *(s[0].RepoState)\n\t}\n\n\t// Save states\n\tlogrus.Debug(\"Saving states...\")\n\tif err := c.writeStateFile(); err != nil {\n\t\treturn newReleaseList, errors.Wrap(err, \"cannot write state file\")\n\t}\n\n\treturn newReleaseList, nil\n}", "func getRepoList(projectID int64) ([]string, error) {\n\t/*\n\t\tuiUser := os.Getenv(\"UI_USR\")\n\t\tif len(uiUser) == 0 {\n\t\t\tuiUser = \"admin\"\n\t\t}\n\t\tuiPwd := os.Getenv(\"UI_PWD\")\n\t\tif len(uiPwd) == 0 {\n\t\t\tuiPwd = \"Harbor12345\"\n\t\t}\n\t*/\n\tuiURL := config.LocalUIURL()\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(\"GET\", uiURL+\"/api/repositories?project_id=\"+strconv.Itoa(int(projectID)), nil)\n\tif err != nil {\n\t\tlog.Errorf(\"Error when creating request: %v\", err)\n\t\treturn nil, err\n\t}\n\t//req.SetBasicAuth(uiUser, uiPwd)\n\treq.AddCookie(&http.Cookie{Name: models.UISecretCookie, Value: config.UISecret()})\n\t//dump, err := httputil.DumpRequest(req, true)\n\t//log.Debugf(\"req: %q\", dump)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Errorf(\"Error when calling UI api to get repositories, error: %v\", err)\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\tlog.Errorf(\"Unexpected status code: %d\", resp.StatusCode)\n\t\tdump, _ := httputil.DumpResponse(resp, true)\n\t\tlog.Debugf(\"response: %q\", dump)\n\t\treturn nil, fmt.Errorf(\"Unexpected status code when getting repository list: %d\", resp.StatusCode)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to read the response body, error: %v\", err)\n\t\treturn nil, err\n\t}\n\tvar repoList []string\n\terr = json.Unmarshal(body, &repoList)\n\treturn repoList, err\n}", "func (c *Client) ReleasesSince(t time.Time) ([]db.Release, error) {\n\tchanges := [][]interface{}{}\n\n\terr := c.client.Call(\"changelog\", []interface{}{t.Unix(), true}, &changes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treleases := []db.Release{}\n\tfor _, change := range changes {\n\t\tif change[3] == \"new release\" {\n\t\t\treleases = append(releases, db.Release{\n\t\t\t\tName: change[0].(string),\n\t\t\t\tVersion: change[1].(string),\n\t\t\t\tTime: time.Unix(change[2].(int64), 0),\n\t\t\t})\n\t\t}\n\n\t}\n\treturn releases, nil\n}", "func (client ArtifactsClient) listRepositories(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/repositories\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListRepositoriesResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\tapiReferenceLink := \"https://docs.oracle.com/iaas/api/#/en/registry/20160918/Repository/ListRepositories\"\n\t\terr = common.PostProcessServiceError(err, \"Artifacts\", \"ListRepositories\", apiReferenceLink)\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func List(params map[string]string) ([]RPMInfo, error) {\n\tlog.Printf(\"Entering repo::List(%v)\", params)\n\tdefer log.Println(\"Exiting repo::List\")\n\n\tproductVersion := params[\"productVersion\"]\n\n\tvar info []RPMInfo\n\n\tfiles, err := listRepo(params)\n\tif err != nil {\n\t\treturn info, err\n\t}\n\n\tinfo, err = ListRPMFilesInfo(files, productVersion)\n\tif err != nil {\n\t\treturn info, err\n\t}\n\n\toutput.Write(info)\n\n\treturn info, nil\n}", "func CheckOnlineReleases(url string) ([]string, error) {\n\n\t// Perform request to get helm releases\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif resp.StatusCode != 200 {\n\t\terr := errors.New(string(body))\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\t// Unmarshall JSON response\n\tvar versions []Version\n\tjson.Unmarshal(body, &versions)\n\n\t// Convert Struct to array and sort it\n\tgithubReleases := []string{}\n\tfor _, v := range versions {\n\t\tgithubReleases = append(githubReleases, v.Tag)\n\t}\n\treturn githubReleases, nil\n}", "func (g *GhostParser) ParseReleases() []data.Release {\n\trel := data.Release{}\n\tse := g.doc.Find(\"table\").Eq(10)\n\n\tse.Find(\"tr\").Each(func(b int, sb *goquery.Selection) {\n\t\tsb.Find(\"td\").Each(func(c int, sc *goquery.Selection) {\n\t\t\tif c == 2 {\n\t\t\t\trel = data.Release{}\n\t\t\t\tg.getUrlAndTagAndName(&rel, sc)\n\t\t\t\tif rel.Name != \"\" {\n\t\t\t\t\trel.Image = g.getImageUrl(rel.Url)\n\t\t\t\t\trel.Time = time.Now().Unix()\n\t\t\t\t\trel.EncodeName()\n\t\t\t\t\tg.checkQual(&rel)\n\t\t\t\t\tif rel.Name != \"\" {\n\t\t\t\t\t\trel.Hits = 0\n\t\t\t\t\t\tg.Rel = append(g.Rel, rel)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t})\n\n\treturn g.Rel\n}", "func ParseGoReleases(shouldLog bool) ([]Release, error) {\n\tlog.Info(\"Releases of go available for download are \")\n\treleases := make([]Release, 0)\n\n\tres, err := http.Get(TAGS_URL)\n\tdefer res.Body.Close()\n\tif err != nil {\n\t\treturn releases, err\n\t}\n\n\tdoc, err := goquery.NewDocumentFromReader(res.Body)\n\tif err != nil {\n\t\treturn releases, err\n\t}\n\tdoc.Find(fmt.Sprintf(\".%s\", \"RefList-item\")).Each(func(i int, s *goquery.Selection) {\n\t\treleaseName := s.Find(\"a\").Text()\n\n\t\tif utils.GOS_REGEXP.FindString(releaseName) != \"\" {\n\t\t\tif shouldLog {\n\t\t\t\tfmt.Println(\" \" + releaseName)\n\t\t\t}\n\t\t\treleases = append(releases, Release{\n\t\t\t\tName: releaseName,\n\t\t\t\tDownloadUrl: fmt.Sprintf(BASE_DOWNLOAD_URL, releaseName),\n\t\t\t})\n\t\t}\n\t})\n\n\treturn releases, nil\n}", "func (c *Client) Get(name string) (*Release, error) {\n\treleases, err := c.List(ListParameters{Filter: name})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tfor _, release := range releases {\n\t\tif release.Name == name {\n\t\t\treturn &release, nil\n\t\t}\n\t}\n\treturn nil, trace.NotFound(\"release %v not found\", name)\n}", "func (g Github) List(ctx context.Context) ([]string, error) {\n\tclient := githubv4.NewClient(\n\t\toauth2.NewClient(\n\t\t\tctx,\n\t\t\toauth2.StaticTokenSource(&oauth2.Token{\n\t\t\t\tAccessToken: g.Token,\n\t\t\t}),\n\t\t),\n\t)\n\n\t// TODO: pagination\n\tvar query struct {\n\t\tSearch struct {\n\t\t\tRepositoryCount githubv4.Int\n\t\t\tPageInfo struct {\n\t\t\t\tEndCursor githubv4.String\n\t\t\t\tStartCursor githubv4.String\n\t\t\t}\n\t\t\tEdges []struct {\n\t\t\t\tNode struct {\n\t\t\t\t\tRepository struct {\n\t\t\t\t\t\tSSHURL githubv4.String\n\t\t\t\t\t} `graphql:\"... on Repository\"`\n\t\t\t\t}\n\t\t\t}\n\t\t} `graphql:\"search(query: $query, type: REPOSITORY, first: 100)\"`\n\t}\n\n\terr := client.Query(ctx, &query, map[string]interface{}{\"query\": githubv4.String(\"user:\" + g.Username)})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"List: %w\", err)\n\t}\n\n\tvar res []string\n\tfor _, edge := range query.Search.Edges {\n\t\tres = append(res, string(edge.Node.Repository.SSHURL))\n\t}\n\n\treturn res, nil\n}", "func (t *TagsService) List(ctx context.Context, repository string, options *ListTagsOptions) (*TagList, *http.Response, error) {\n\tu := fmt.Sprintf(\"repository/%v/tag\", repository)\n\tu, err := addOptions(u, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq, err := t.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\ttags := new(TagList)\n\tresp, err := t.client.Do(ctx, req, tags)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn tags, resp, err\n}", "func (v Repository) Revlist(args ...string) ([]string, error) {\n\tresult := []string{}\n\tcmdArgs := []string{\n\t\t\"git\",\n\t\t\"rev-list\",\n\t}\n\n\tcmdArgs = append(cmdArgs, args...)\n\n\tcmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)\n\tcmd.Dir = v.RepoDir()\n\tcmd.Stdin = nil\n\tcmd.Stderr = nil\n\tout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = cmd.Start(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := bufio.NewReader(out)\n\tfor {\n\t\tline, err := r.ReadString('\\n')\n\t\tline = strings.TrimSpace(line)\n\t\tif len(line) > 0 {\n\t\t\tresult = append(result, line)\n\t\t}\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err = cmd.Wait(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn result, nil\n}", "func listReleaseChannelsHandler(c echo.Context) error {\n\tchannels, err := upgrade_client.ListReleaseChannels()\n\tif err != nil {\n\t\treturn handlers.HttpError(err)\n\t}\n\t// Return a deterministic ordering of channels\n\tsort.Strings(channels)\n\treturn c.JSON(http.StatusOK, channels)\n}", "func (h Client) ListNamespaceReleasesYAML(namespace string) ([]byte, error) {\n\tout, err := h.list(namespace, \"--output\", \"yaml\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []byte(out), nil\n}", "func List(repo name.Repository, options ...Option) ([]string, error) {\n\to, err := makeOptions(options...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newPuller(o).List(o.context, repo)\n}", "func (repo BoshDirectorRepository) DeleteReleases(name string) (apiResponse net.ApiResponse) {\n\tpath := fmt.Sprintf(\"/releases/%s?force=true\", name)\n\tapiResponse = repo.gateway.DeleteResource(repo.config.TargetURL+path, repo.config.Username, repo.config.Password)\n\tif apiResponse.IsNotSuccessful() {\n\t\treturn\n\t}\n\tif !apiResponse.IsRedirection() {\n\t\treturn\n\t}\n\n\tvar taskStatus models.TaskStatus\n\ttaskURL, err := url.Parse(apiResponse.RedirectLocation)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tapiResponse = repo.gateway.GetResource(repo.config.TargetURL+taskURL.Path, repo.config.Username, repo.config.Password, &taskStatus)\n\tif apiResponse.IsNotSuccessful() {\n\t\treturn\n\t}\n\n\t/* Progression should be: queued, progressing, done */\n\t/* TODO task might fail; end states: done, error, cancelled */\n\tfor taskStatus.State != \"done\" {\n\t\ttime.Sleep(1)\n\t\ttaskStatus, apiResponse = repo.GetTaskStatus(taskStatus.ID)\n\t\tif apiResponse.IsNotSuccessful() {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}", "func (r *Repository) GetReleasesURL() string {\n\tif r == nil || r.ReleasesURL == nil {\n\t\treturn \"\"\n\t}\n\treturn *r.ReleasesURL\n}", "func (ghp *GithubProvider) getChangesSinceRelease(release *github.RepositoryRelease, owner string, repo string) ([]changes, error) {\n\n\tresult := []changes{}\n\n\tprOpts := github.PullRequestListOptions{\n\t\tState: \"all\",\n\t\tBase: \"\",\n\t}\n\tprs, _, prErr := ghp.client.PullRequests.List(ghp.ctx, owner, repo, &prOpts)\n\tif prErr != nil {\n\t\treturn nil, prErr\n\t}\n\n\tfor _, pr := range prs {\n\t\tif pr.MergedAt != nil && pr.MergedAt.After(release.CreatedAt.Time) {\n\t\t\tPrNumTmp := strings.Split(pr.GetURL(), \"/\")\n\t\t\tPrNum, _ := strconv.Atoi(PrNumTmp[len(PrNumTmp)-1])\n\t\t\tresult = append(result, changes{\n\t\t\t\tpr.GetTitle(),\n\t\t\t\tpr.GetURL(),\n\t\t\t\tPrNum,\n\t\t\t})\n\t\t}\n\t}\n\n\treturn result, nil\n\n}", "func (s *RepositoryClient) ListRepositoryTags(repoName string) ([]TagResp, error) {\n\tvar v []TagResp\n\tresp, _, errs := s.NewRequest(gorequest.GET, fmt.Sprintf(\"/%s/tags\", repoName)).\n\t\tEndStruct(&v)\n\treturn v, CheckResponse(errs, resp, 200)\n}", "func Releases(db DB) ReleaseQuery {\n\tq := ReleaseQuery{\n\t\tdb: db,\n\t\tbuilder: ReleaseSelect(),\n\t}\n\treturn q\n}", "func (g Gitlab) List(ctx context.Context) ([]string, error) {\n\tclient, err := gitlab.NewClient(\n\t\tg.Token,\n\t\tgitlab.WithBaseURL(g.URL),\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"List: %w\", err)\n\t}\n\n\t// TODO: pagination\n\trepos, resp, err := client.Projects.ListProjects(\n\t\t&gitlab.ListProjectsOptions{\n\t\t\tVisibility: gitlab.Visibility(gitlab.PrivateVisibility),\n\t\t},\n\t\tgitlab.WithContext(ctx),\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"List: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tvar res []string\n\tfor _, r := range repos {\n\t\tres = append(res, r.SSHURLToRepo)\n\t}\n\n\treturn res, nil\n}", "func (mr *MockGithubAssetClientMockRecorder) ListReleases(ctx, opt interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListReleases\", reflect.TypeOf((*MockGithubAssetClient)(nil).ListReleases), ctx, opt)\n}", "func (mr *MockRepoClientMockRecorder) ListReleases() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListReleases\", reflect.TypeOf((*MockRepoClient)(nil).ListReleases))\n}", "func (server *RepositoriesService) ListRepositories(ctx context.Context, project string, opt *ListOpts) (*Repositories, *http.Response, error) {\n\tvar u string\n\tif len(project) == 0 {\n\t\tu = \"rest/api/1.0/repos\"\n\t} else {\n\t\tu = fmt.Sprintf(\"rest/api/1.0/projects/%s/repos\", project)\n\t}\n\n\treq, err := server.v1Client.NewRequest(http.MethodGet, u, nil, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tvar repos Repositories\n\tresp, err := server.v1Client.Do(req, &repos)\n\treturn &repos, resp, err\n}", "func (server *RepositoriesService) ListTags(ctx context.Context, project string, repo string, opt *ListOpts) (*Tags, *http.Response, error) {\n\tu := fmt.Sprintf(\"rest/api/1.0/projects/%s/repos/%s/tags\", project, repo)\n\treq, err := server.v1Client.NewRequest(http.MethodGet, u, nil, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tvar tags Tags\n\tresp, err := server.v1Client.Do(req, &tags)\n\treturn &tags, resp, err\n}", "func GatherReleaseNotes(opts *options.Options) (*ReleaseNotes, error) {\n\tlogrus.Info(\"Gathering release notes\")\n\tgatherer, err := NewGatherer(context.Background(), opts)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"retrieving notes gatherer: %w\", err)\n\t}\n\n\tvar releaseNotes *ReleaseNotes\n\tstartTime := time.Now()\n\tif gatherer.options.ListReleaseNotesV2 {\n\t\tlogrus.Warn(\"EXPERIMENTAL IMPLEMENTATION ListReleaseNotesV2 ENABLED\")\n\t\treleaseNotes, err = gatherer.ListReleaseNotesV2()\n\t} else {\n\t\treleaseNotes, err = gatherer.ListReleaseNotes()\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"listing release notes: %w\", err)\n\t}\n\tlogrus.Infof(\"finished gathering release notes in %v\", time.Since(startTime))\n\n\treturn releaseNotes, nil\n}", "func (in *IstioClient) GetNamespacePodsByRelease(namespace string, release string) (*v1.PodList, error) {\n\tfmt.Println(\"Called method GetNamespacePodsByRelease\")\n\tpodList, err := in.k8s.CoreV1().Pods(namespace).List(meta_v1.ListOptions{LabelSelector: \"release=\" + release})\n\tpods := podList.Items\n\tif err == nil {\n\t\tfor _, pod := range pods {\n\t\t\tfmt.Println(\"Pod found: \", pod.Name, \", release: \", pod.Labels[\"release\"])\n\t\t}\n\t}\n\n\treturn podList, err\n}", "func (g GithubClient) LastReleases(owner, repo string) (map[string]string, error) {\n\tlastRelease := make(map[string]string)\n\n\tr, err := g.ListAllReleases(owner, repo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, release := range r {\n\t\t// Skip draft releases\n\t\tif *release.Draft {\n\t\t\tcontinue\n\t\t}\n\t\t// Alpha release goes only on master branch\n\t\tif strings.Contains(*release.TagName, \"-alpha\") && lastRelease[\"master\"] == \"\" {\n\t\t\tlastRelease[\"master\"] = *release.TagName\n\t\t} else {\n\t\t\tre, _ := regexp.Compile(\"v([0-9]+\\\\.[0-9]+)\\\\.([0-9]+(-.+)?)\")\n\t\t\tversion := re.FindStringSubmatch(*release.TagName)\n\n\t\t\tif version != nil {\n\t\t\t\t// Lastest vx.y.0 release goes on both master and release-vx.y branch\n\t\t\t\tif version[2] == \"0\" && lastRelease[\"master\"] == \"\" {\n\t\t\t\t\tlastRelease[\"master\"] = *release.TagName\n\t\t\t\t}\n\n\t\t\t\tbranchName := \"release-\" + version[1]\n\t\t\t\tif lastRelease[branchName] == \"\" {\n\t\t\t\t\tlastRelease[branchName] = *release.TagName\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn lastRelease, nil\n}", "func GetRelease(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/releases/{id} repository repoGetRelease\n\t// ---\n\t// summary: Get a release\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: id\n\t// in: path\n\t// description: id of the release to get\n\t// type: integer\n\t// format: int64\n\t// required: true\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/Release\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\n\tid := ctx.ParamsInt64(\":id\")\n\trelease, err := repo_model.GetReleaseByID(ctx, id)\n\tif err != nil && !repo_model.IsErrReleaseNotExist(err) {\n\t\tctx.Error(http.StatusInternalServerError, \"GetReleaseByID\", err)\n\t\treturn\n\t}\n\tif err != nil && repo_model.IsErrReleaseNotExist(err) ||\n\t\trelease.IsTag || release.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tif err := release.LoadAttributes(ctx); err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"LoadAttributes\", err)\n\t\treturn\n\t}\n\tctx.JSON(http.StatusOK, convert.ToAPIRelease(ctx, ctx.Repo.Repository, release))\n}", "func (c *client) ListPRCommits(org, repo string, number int) ([]RepositoryCommit, error) {\n\tdurationLogger := c.log(\"ListPRCommits\", org, repo, number)\n\tdefer durationLogger()\n\n\tif c.fake {\n\t\treturn nil, nil\n\t}\n\tvar commits []RepositoryCommit\n\terr := c.readPaginatedResults(\n\t\tfmt.Sprintf(\"/repos/%v/%v/pulls/%d/commits\", org, repo, number),\n\t\tacceptNone,\n\t\torg,\n\t\tfunc() interface{} { // newObj returns a pointer to the type of object to create\n\t\t\treturn &[]RepositoryCommit{}\n\t\t},\n\t\tfunc(obj interface{}) { // accumulate is the accumulation function for paginated results\n\t\t\tcommits = append(commits, *(obj.(*[]RepositoryCommit))...)\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn commits, nil\n}", "func (p Provider) List(ctx context.Context) ([]repository.Handle, error) {\n\trepoList, err := os.Open(p.filename)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not open repository list at %s\", p.filename)\n\t}\n\tdefer repoList.Close()\n\n\tlines := bufio.NewScanner(repoList)\n\tvar res []repository.Handle\n\tfor lines.Scan() {\n\t\towner, repo, err := githuburl.Parse(lines.Text())\n\t\tif err != nil {\n\t\t\tlogrus.Infof(\"Skipping repository plugin: %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\t// todo(corneliusweig): the static provider does not handle PluginName\n\t\tres = append(res, repository.Handle{Owner: owner, Repo: repo})\n\t}\n\n\tif err := lines.Err(); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not scan repository list\")\n\t}\n\n\treturn res, nil\n}", "func (h *handler) Release(ctx context.Context, evt *github.ReleaseEvent) error {\n\tif evt.GetAction() != \"released\" {\n\t\tlogrus.WithField(\"action\", evt.GetAction()).Info(\"ignoring release event\")\n\t\treturn nil\n\t}\n\tnotifyRepos := h.cfg.ReleaseDispatchRepos()\n\tlogrus.WithField(\"repos\", len(notifyRepos)).Info(\"notifying repositories of release\")\n\tif len(notifyRepos) == 0 {\n\t\treturn nil\n\t}\n\n\tgh := repo.NewGitHubClient(h.cfg.GitHubToken)\n\tfeedbackIssue, err := releaseFeedbackIssue(ctx, gh, evt, notifyRepos)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogrus.WithField(\"issue_number\", feedbackIssue.Number).Debug(\"created feedback issue\")\n\n\tdispatchOpts, err := h.releaseDispatchOptions(evt, feedbackIssue)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, notifyRepo := range notifyRepos {\n\t\tnotifyRepoParts := strings.SplitN(notifyRepo, \"/\", 2)\n\t\towner := notifyRepoParts[0]\n\t\tname := notifyRepoParts[1]\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"owner\": owner,\n\t\t\t\"name\": name,\n\t\t}).Debug(\"dispatching release to repository\")\n\t\tif _, _, err := gh.Repositories.Dispatch(ctx, owner, name, dispatchOpts); err != nil {\n\t\t\tlogrus.WithError(err).Warn(\"error dispatching update\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (r *RefsService) ListTags(owner, repoSlug string, opts ...interface{}) (*Refs, *Response, error) {\n\tresult := new(Refs)\n\turlStr := r.client.requestURL(\"/repositories/%s/%s/refs/tags\", owner, repoSlug)\n\turlStr, addOptErr := addQueryParams(urlStr, opts...)\n\tif addOptErr != nil {\n\t\treturn nil, nil, addOptErr\n\t}\n\n\tresponse, err := r.client.execute(\"GET\", urlStr, result, nil)\n\n\treturn result, response, err\n}", "func list_versions(w rest.ResponseWriter, r *rest.Request) {\n\tarch := r.PathParam(\"arch\")\n\tversions := []string{\"nightly\", \"beta\", \"stable\"}\n\t// get the numbered versions available\n\tdb_directories := get_directories(cache_instance, db, arch)\n\tfor _, dir := range db_directories {\n\t\tversion_path := strings.Split(dir.Path, \"/\")\n\t\tversion := version_path[len(version_path)-1]\n\t\tif version != \"snapshots\" {\n\t\t\tversions = append(versions, version)\n\t\t}\n\t}\n\t// Filter things folders we don't want in the versions out\n\n\tw.WriteJson(versions)\n}", "func (responses Responses) GetReleaseNames() []string {\n\tvar releaseNames []string\n\tfor _, response := range responses {\n\t\treleaseNames = append(releaseNames, response.ReleaseName)\n\t}\n\treturn releaseNames\n}", "func ListPRs(ctx context.Context, cfg *v1.Config) ([]int, error) {\n\tct := newClient(ctx, cfg.Github)\n\treturn ct.listOpenPRs(cfg.Github)\n}", "func ReleaseRepository() error {\n\tdefer logging.Logger.Sync()\n\tlogging.Logger.Info(\"Open local repository...\")\n\trepository, err := git.PlainOpen(config.Core.Workdir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogging.Logger.Info(\"Do release work...\")\n\tversion, err := GetBundleVersion()\n\tif err != nil {\n\t\treturn err\n\t}\n\tref, err := repository.Head()\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = repository.CreateTag(fmt.Sprintf(\"v%s\", version), ref.Hash(), &git.CreateTagOptions{\n\t\tTagger: &object.Signature{\n\t\t\tName: config.Git.Name,\n\t\t\tEmail: config.Git.Email,\n\t\t\tWhen: time.Now(),\n\t\t},\n\t\tMessage: fmt.Sprintf(\"release v%s\", version),\n\t})\n\treturn err\n}", "func GetReleasesInRange(ctx context.Context, client models.Client, opts models.ListReleasesOptions, from time.Time, to time.Time) (Releases, error) {\n\treleases, err := GetAllReleases(ctx, client, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfiltered := []Release{}\n\n\tfor i, v := range releases {\n\t\tif v.PublishedAt.After(from) && v.PublishedAt.Before(to) {\n\t\t\tfiltered = append(filtered, releases[i])\n\t\t}\n\t}\n\n\treturn filtered, nil\n}", "func (a *Manager) getRefs(item GenericReleaseConfig) ([]GithubRef, error) {\n\tres := []GithubRef{}\n\trelease := item.HasType(\"release\")\n\tpreRelease := item.HasType(\"pre_release\")\n\tdraftRelease := item.HasType(\"draft_release\")\n\treleases, err := a.listReleases(item)\n\tif err != nil {\n\t\treturn res, errors.Wrapf(err, \"unable to fetch releases from %s/%s\", item.Owner, item.Repo)\n\t}\n\tfor _, r := range releases {\n\t\tif (r.GetPrerelease() == preRelease) ||\n\t\t\t(r.GetDraft() == draftRelease) ||\n\t\t\t(!r.GetPrerelease() && !r.GetDraft() && release) {\n\t\t\tif item.Format.DoesMatch(r.GetTagName()) {\n\t\t\t\tres = append(res, GithubRef{\n\t\t\t\t\tr.GetTagName(),\n\t\t\t\t\tr.GetCreatedAt().Unix(),\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\tif item.HasType(\"tag\") {\n\t\ttags, _, err := a.client.Repositories.ListTags(a.ctx, item.Owner, item.Repo, nil)\n\t\tif err != nil {\n\t\t\treturn res, errors.Wrapf(err, \"unable to fetch tags from %s/%s\", item.Owner, item.Repo)\n\t\t}\n\t\tfor _, t := range tags {\n\t\t\t// 1.\n\t\t\tsha1 := t.GetCommit().GetSHA()\n\t\t\tcommit, _, _ := a.client.Repositories.GetCommit(a.ctx, item.Owner, item.Repo, sha1)\n\t\t\tres = append(res, GithubRef{\n\t\t\t\tt.GetName(),\n\t\t\t\tcommit.GetCommit().GetCommitter().GetDate().Unix(),\n\t\t\t})\n\t\t}\n\t}\n\n\tsort.Slice(res[:], func(i, j int) bool {\n\t\tvi := item.Format.Format(res[i].Ref)\n\t\tvj := item.Format.Format(res[j].Ref)\n\t\tsemvi, erri := semver.NewVersion(vi)\n\t\tsemvj, errj := semver.NewVersion(vj)\n\t\t// release time is used when could not extract semver\n\t\tif erri != nil || errj != nil {\n\t\t\treturn res[i].Time > res[j].Time\n\t\t}\n\t\treturn semvi.Compare(semvj) > 0\n\t})\n\n\treturn res, nil\n}", "func (c APIClient) ListRepo() ([]*pfs.RepoInfo, error) {\n\trequest := &pfs.ListRepoRequest{}\n\trepoInfos, err := c.PfsAPIClient.ListRepo(\n\t\tc.Ctx(),\n\t\trequest,\n\t)\n\tif err != nil {\n\t\treturn nil, grpcutil.ScrubGRPC(err)\n\t}\n\treturn repoInfos.RepoInfo, nil\n}", "func (a *RepoAPI) ls(params interface{}) (resp *rpc.Response) {\n\tm := objx.New(cast.ToStringMap(params))\n\tvar revision []string\n\tif rev := m.Get(\"revision\").Str(); rev != \"\" {\n\t\trevision = []string{rev}\n\t}\n\treturn rpc.Success(util.Map{\n\t\t\"entries\": a.mods.Repo.ListPath(m.Get(\"name\").Str(), m.Get(\"path\").Str(), revision...),\n\t})\n}", "func (s *Services) AiringReleases(ctx context.Context, request *empty.Empty) (*proto.ReleasesListResponse, error) {\n\tquery := s.DB\n\n\tvar result []models.Release\n\n\tquery = query.Where(\"started_airing IS NOT NULL AND stopped_airing IS NULL\").Where(\"release_type_id = ?\", 1).Or(\"release_type_id = ?\", 4)\n\tif err := query.Find(&result).Error; err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\n\tfinalRes := []*proto.Release{}\n\n\tfor i := range result {\n\t\tfinalRes = append(finalRes, result[i].ToProto())\n\t}\n\n\treturn &proto.ReleasesListResponse{Releases: finalRes}, nil\n}", "func (g *GH) ListRepos() {\n\tctx := context.Background()\n\trepos, rsp, err := g.c.Repositories.ListByOrg(ctx, g.org, nil)\n\tif err != nil {\n\t\tlog.Println(\"Unable to List Repos in Org...\", rsp, err)\n\t}\n\tlog.Println(\"Listing Repos..\")\n\tg.repos = repos\n}" ]
[ "0.84172493", "0.7788706", "0.7698476", "0.75473577", "0.74319696", "0.7400406", "0.73194414", "0.7206904", "0.71547836", "0.7135346", "0.70355684", "0.6940326", "0.6883375", "0.6841564", "0.6699606", "0.6596501", "0.65950054", "0.6545528", "0.6530167", "0.652993", "0.6440224", "0.6398807", "0.6363387", "0.63192743", "0.63037896", "0.62957597", "0.6245373", "0.6226502", "0.6220558", "0.6219924", "0.6176138", "0.6168772", "0.6145816", "0.60779333", "0.60320866", "0.602968", "0.60042715", "0.5991518", "0.596445", "0.5955281", "0.59480464", "0.593374", "0.59130627", "0.58848035", "0.588255", "0.5865053", "0.58268374", "0.58225185", "0.578749", "0.57637024", "0.57587767", "0.5696268", "0.568337", "0.5678804", "0.56474644", "0.56434214", "0.5638556", "0.5634638", "0.5585682", "0.55855453", "0.5574915", "0.556868", "0.5550091", "0.5509975", "0.5509673", "0.5490857", "0.54871845", "0.5486485", "0.5467576", "0.5464232", "0.5455767", "0.54509276", "0.54498947", "0.54474103", "0.5408394", "0.54037225", "0.53989786", "0.53989553", "0.5365347", "0.5361977", "0.53566605", "0.5349041", "0.53389156", "0.53379834", "0.53252554", "0.5318812", "0.5314842", "0.52589893", "0.5246939", "0.5241265", "0.5239126", "0.52320105", "0.5219249", "0.5218251", "0.5213763", "0.52029467", "0.51931417", "0.51922745", "0.5186272", "0.5181472" ]
0.77434057
2
UploadAsset uploads specified assets to a given release object
func (c *Client) UploadAsset(ctx context.Context, releaseID int64, filename string) (*github.ReleaseAsset, error) { filename, err := filepath.Abs(filename) if err != nil { return nil, errors.Wrap(err, "failed to get abs path") } f, err := os.Open(filename) if err != nil { return nil, errors.Wrap(err, "failed to open file") } opts := &github.UploadOptions{ // Use base name by default Name: filepath.Base(filename), } var asset *github.ReleaseAsset err = retry.Retry(3, 3*time.Second, func() error { var ( res *github.Response err error ) asset, res, err = c.Repositories.UploadReleaseAsset(context.TODO(), c.Owner, c.Repo, releaseID, opts, f) if err != nil { return errors.Wrapf(err, "failed to upload release asset: %s", filename) } switch res.StatusCode { case http.StatusCreated: return nil case 422: return errors.Errorf( "upload release asset: invalid status code: %s", "422 (this is probably because the asset already uploaded)") default: return errors.Errorf( "upload release asset: invalid status code: %s", res.Status) } }) return asset, err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Release) UploadAsset(path string) error {\n\tfile, err := os.OpenFile(path, os.O_RDONLY, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\tasset, _, err := r.client.Repositories.UploadReleaseAsset(context.Background(), r.owner, r.repository, r.ID, &gogithub.UploadOptions{\n\t\tName: filepath.Base(file.Name()),\n\t}, file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Println(\"uploaded asset \", *asset.Name)\n\treturn nil\n}", "func (g *GHR) UploadAssets(ctx context.Context, releaseID int64, localAssets []string, parallel int) error {\n\tstart := time.Now()\n\tdefer func() {\n\t\tDebugf(\"UploadAssets: time: %d ms\", int(time.Since(start).Seconds()*1000))\n\t}()\n\n\teg, ctx := errgroup.WithContext(ctx)\n\tsemaphore := make(chan struct{}, parallel)\n\tfor _, localAsset := range localAssets {\n\t\tlocalAsset := localAsset\n\t\teg.Go(func() error {\n\t\t\tsemaphore <- struct{}{}\n\t\t\tdefer func() {\n\t\t\t\t<-semaphore\n\t\t\t}()\n\n\t\t\tfmt.Fprintf(g.outStream, \"--> Uploading: %15s\\n\", filepath.Base(localAsset))\n\t\t\t_, err := g.GitHub.UploadAsset(ctx, releaseID, localAsset)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to upload asset: %s %w\", localAsset, err)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tif err := eg.Wait(); err != nil {\n\t\treturn fmt.Errorf(\"one of the goroutines failed: %w\", err)\n\t}\n\n\treturn nil\n}", "func (c *gitlabClient) Upload(\n\tctx *context.Context,\n\treleaseID string,\n\tname string,\n\tfile *os.File,\n) error {\n\tprojectID := ctx.Config.Release.GitLab.Owner + \"/\" + ctx.Config.Release.GitLab.Name\n\n\tlog.WithField(\"file\", file.Name()).Debug(\"uploading file\")\n\tprojectFile, _, err := c.client.Projects.UploadFile(\n\t\tprojectID,\n\t\tfile.Name(),\n\t\tnil,\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"file\": file.Name(),\n\t\t\"url\": projectFile.URL,\n\t}).Debug(\"uploaded file\")\n\n\tgitlabBaseURL := ctx.Config.GitLabURLs.Download\n\t// projectFile from upload: /uploads/<sha>/filename.txt\n\trelativeUploadURL := projectFile.URL\n\tlinkURL := gitlabBaseURL + \"/\" + projectID + relativeUploadURL\n\treleaseLink, _, err := c.client.ReleaseLinks.CreateReleaseLink(\n\t\tprojectID,\n\t\treleaseID,\n\t\t&gitlab.CreateReleaseLinkOptions{\n\t\t\tName: &name,\n\t\t\tURL: &linkURL,\n\t\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"id\": releaseLink.ID,\n\t\t\"url\": releaseLink.URL,\n\t}).Debug(\"created release link\")\n\n\treturn err\n}", "func PutAssets(registry sources.Registry, graphUpdater *knowledge.GraphUpdater, sem *semaphore.Weighted) http.HandlerFunc {\n\treturn handleUpdate(registry, func(ctx context.Context, source string, body io.Reader) error {\n\t\trequestBody := client.PutGraphAssetRequestBody{}\n\t\tif err := json.NewDecoder(body).Decode(&requestBody); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// TODO(c.michaud): verify compatibility of the schema with graph updates\n\t\terr := graphUpdater.InsertAssets(ctx, source, requestBody.Assets)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to insert assets: %v\", err)\n\t\t}\n\t\tlabels := prometheus.Labels{\"source\": source}\n\t\tmetrics.GraphUpdateAssetsInsertedCounter.\n\t\t\tWith(labels).\n\t\t\tAdd(float64(len(requestBody.Assets)))\n\n\t\treturn nil\n\t}, sem, \"insert_assets\")\n}", "func (u *UploadsService) UploadAsset(asset io.ReadCloser, contentType string, contentLength int64) (result *Result) {\n\treturn u.client.upload(u.URL, asset, contentType, contentLength)\n}", "func (b *Bucket) Upload(_ context.Context, name string, r io.Reader) error {\n\tbody, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tb.objects[name] = body\n\treturn nil\n}", "func (h Hosting) UploadHostingAssets(realmClient realm.Client, groupID, appID string, hostingDiffs HostingDiffs, errHandler func(err error)) error {\n\tvar wg sync.WaitGroup\n\n\tjobCh := make(chan func())\n\terrCh := make(chan error)\n\tdoneCh := make(chan struct{})\n\n\tvar errs []error\n\tgo func() {\n\t\tfor err := range errCh {\n\t\t\terrHandler(err)\n\t\t\terrs = append(errs, err)\n\t\t}\n\t\tdoneCh <- struct{}{}\n\t}()\n\n\tfor n := 0; n < numHostingWorkers; n++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfor job := range jobCh {\n\t\t\t\tjob()\n\t\t\t}\n\t\t}()\n\t}\n\n\tassetsDir := filepath.Join(h.RootDir, NameFiles)\n\n\tfor _, added := range hostingDiffs.Added {\n\t\tasset := added // the closure otherwise sees the same value for `added` each iteration\n\t\tjobCh <- func() {\n\t\t\tif err := realmClient.HostingAssetUpload(groupID, appID, assetsDir, asset); err != nil {\n\t\t\t\terrCh <- fmt.Errorf(\"failed to add %s: %w\", asset.FilePath, err)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, deleted := range hostingDiffs.Deleted {\n\t\tasset := deleted // the closure otherwise sees the same value for `added` each iteration\n\t\tjobCh <- func() {\n\t\t\tif err := realmClient.HostingAssetRemove(groupID, appID, asset.FilePath); err != nil {\n\t\t\t\terrCh <- fmt.Errorf(\"failed to remove %s: %w\", asset.FilePath, err)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, modified := range hostingDiffs.Modified {\n\t\tasset := modified // the closure otherwise sees the same value for `added` each iteration\n\t\tjobCh <- func() {\n\t\t\tif asset.AttrsModified && !asset.BodyModified {\n\t\t\t\tif err := realmClient.HostingAssetAttributesUpdate(groupID, appID, asset.FilePath, asset.Attrs...); err != nil {\n\t\t\t\t\terrCh <- fmt.Errorf(\"failed to update attributes for %s: %w\", asset.FilePath, err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err := realmClient.HostingAssetUpload(groupID, appID, assetsDir, asset.HostingAsset); err != nil {\n\t\t\t\t\terrCh <- fmt.Errorf(\"failed to update %s: %w\", asset.FilePath, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tclose(jobCh)\n\twg.Wait()\n\n\tclose(errCh)\n\t<-doneCh\n\n\tif len(errs) > 0 {\n\t\treturn fmt.Errorf(\"%d error(s) occurred while importing hosting assets\", len(errs))\n\t}\n\treturn nil\n}", "func (s *Mortgageplatform) PostAsset(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {\n\tres, err := postAsset(APIstub, args)\n\tif err != nil { return shim.Error(\"PostAsset, \" + err.Error())}\n\n\treturn shim.Success(res)\n }", "func uploadRemoteBoshAssets(dm *enaml.DeploymentManifest, boshClient *enamlbosh.Client, poll bool) (err error) {\n\tvar errStemcells error\n\tvar errReleases error\n\tvar remoteStemcells []enaml.Stemcell\n\tdefer UIPrint(\"remote asset check complete.\")\n\tUIPrint(\"Checking product deployment for remote assets...\")\n\n\tif remoteStemcells, err = stemcellsToUpload(dm.Stemcells, boshClient); err == nil {\n\t\tif errStemcells = uploadRemoteStemcells(remoteStemcells, boshClient, poll); errStemcells != nil {\n\t\t\tlo.G.Info(\"issues processing stemcell: \", errStemcells)\n\t\t}\n\t}\n\n\tif errReleases = uploadRemoteReleases(dm.Releases, boshClient, poll); errReleases != nil {\n\t\tlo.G.Info(\"issues processing release: \", errReleases)\n\t}\n\n\tif errReleases != nil || errStemcells != nil {\n\t\terr = fmt.Errorf(\"stemcell err: %v release err: %v\", errStemcells, errReleases)\n\t}\n\treturn\n}", "func (f *File) Release(ctx context.Context, req *fuse.ReleaseRequest) error {\n\tlog.Infoln(\"Release requested on file\", f.Metadata.PathDisplay)\n\tif f.NeedsUpload {\n\t\t// Entirely reckless\n\t\tgo func() {\n\t\t\tlog.Infoln(\"Uploading file to Dropbox\", f.Metadata.PathDisplay)\n\t\t\tretryNotice := func(err error, duration time.Duration) {\n\t\t\t\tlog.Errorf(\"Retrying %s in %s due to %s\\n\", f.Metadata.PathDisplay, err, duration)\n\t\t\t}\n\t\t\terr := backoff.RetryNotify(func() error {\n\t\t\t\t_, err := f.Client.Upload(f.Metadata.PathDisplay, f.getData())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}, backoff.NewExponentialBackOff(), retryNotice)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Panicln(\"Unable to upload file\", f.Metadata.PathDisplay, err)\n\t\t\t}\n\t\t\tf.NeedsUpload = false\n\t\t}()\n\t}\n\n\treturn nil\n}", "func PrepareReleaseBundle(awsc aws.AwsClients, release *deployer.Release, zip_file_path *string) error {\n\tif err := PrepareRelease(release, zip_file_path); err != nil {\n\t\treturn err\n\t}\n\n\terr := s3.PutFile(\n\t\tawsc.S3Client(nil, nil, nil),\n\t\tzip_file_path,\n\t\trelease.Bucket,\n\t\trelease.LambdaZipPath(),\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// reset CreateAt because it can take a while to upload the lambda\n\trelease.CreatedAt = to.Timep(time.Now())\n\n\t// Uploading the Release to S3 to match SHAs\n\tif err := s3.PutStruct(awsc.S3Client(nil, nil, nil), release.Bucket, release.ReleasePath(), release); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error {\n\tw := b.bkt.Object(name).NewWriter(ctx)\n\n\tif _, err := io.Copy(w, r); err != nil {\n\t\treturn err\n\t}\n\treturn w.Close()\n}", "func (g *GitHub) SetAsset(name string) {\n\tg.releaseAsset = name\n}", "func (c *GitHub) CreateReleaseAsset(ctx context.Context, a git.ReleaseAsset) error {\n\tc.Logger.Debugf(\"Creating a release asset %+v\", a)\n\tf, err := os.Open(a.RealPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not open the file: %w\", err)\n\t}\n\tdefer f.Close()\n\t_, _, err = c.Client.UploadReleaseAsset(ctx, a.Release.Repository.Owner, a.Release.Repository.Name, a.Release.InternalID, &github.UploadOptions{\n\t\tName: a.Name,\n\t}, f)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"GitHub API error: %w\", err)\n\t}\n\treturn nil\n}", "func Upload(apiClient *api.Client, game *models.Game, gamePackage *models.GamePackage, releaseSemver *semver.Version, browserBuild bool, filepath string, filesize int64, checksum string, startByte, chunkSize int64) error {\n\t// Create a new progress bar that starts from the given start byte\n\tbar := pb.New64(filesize).SetMaxWidth(80).Set(pb.Bytes, true).SetTemplateString(\"\")\n\tbar.Add64(startByte)\n\n\t// The bar will be set to visible by the apiClient as soon as it knows it wouldn't print any errors right off the bat\n\tbar.Start()\n\tdefer bar.Finish()\n\n\tfor {\n\t\tresult, err := uploadChunk(apiClient, game, gamePackage, releaseSemver, browserBuild, filepath, filesize, checksum, startByte, chunkSize, bar)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif result.Status == \"complete\" {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Get next chunk\n\t\tstartByte = result.Start\n\t}\n}", "func (c *Client) Upload(relPath, fieldname, filename string, resource interface{}) error {\n\treq, err := c.NewfileUploadRequest(relPath, fieldname, filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err:=c.doGetHeaders(req, resource, true);err!=nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (p Plugin) Exec() error {\n\n\tclient := gitlab.NewClient(nil, p.Config.Token)\n\n\tif err := client.SetBaseURL(parserBaseUrl(p.Commit.Remote, p.Repo.FullName)); err != nil {\n\t\tpanic(err)\n\t}\n\n\tlog.Print(\"url: \" + client.BaseURL().String())\n\n\tlog.Println(\"Uploading assets...\")\n\t//todo: to support many assets\n\tprojectFile, _, err := client.Projects.UploadFile(p.Repo.FullName, normalizePath(p.Config.Asset))\n\n\tlog.Print(\"successful\")\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif p.Build.Event != tagEvent {\n\t\t//todo: accept others events\n\t\treturn errors.New(\"event shoud be equals to tag\")\n\t}\n\n\trel, _, _ := client.Releases.GetRelease(p.Repo.FullName, p.Build.Tag)\n\n\tif rel != nil && rel.TagName != \"\" {\n\t\t//update release\n\t\tupOpts := gitlab.UpdateReleaseOptions{\n\t\t\tDescription: &projectFile.Markdown,\n\t\t\tName: getReleaseName(p),\n\t\t}\n\n\t\t_, _, err = client.Releases.UpdateRelease(p.Repo.FullName, p.Build.Tag, &upOpts)\n\t} else {\n\t\t//create release\n\t\topts := &gitlab.CreateReleaseOptions{\n\t\t\tDescription: &projectFile.Markdown,\n\t\t\tTagName: &p.Build.Tag,\n\t\t\tName: getReleaseName(p),\n\t\t}\n\n\t\t_, _, err = client.Releases.CreateRelease(p.Repo.FullName, opts)\n\n\t}\n\n\treturn err\n}", "func (gc gcsClient) Upload(ctx context.Context, path Path, buf []byte, worldReadable bool, cacheControl string) (*storage.ObjectAttrs, error) {\n\tclient := gc.clientFromPath(path)\n\treturn client.Upload(ctx, path, buf, worldReadable, cacheControl)\n}", "func putObject(ctx *context, onDisk string, uploadKey string) error {\n\t// Setup\n\tsess := session.Must(session.NewSession())\n\t// Ex: $HOME/temp/blast/db/README\n\tlog.Print(\"File upload. Source: \" + onDisk)\n\tlocal, err := ctx.os.Open(onDisk)\n\tif err != nil {\n\t\treturn handle(\"Error in opening file on disk.\", err)\n\t}\n\tdefer func() {\n\t\tif err = local.Close(); err != nil {\n\t\t\terrOut(\"Error in closing local file\", err)\n\t\t}\n\t}()\n\n\t// Upload to S3\n\tuploader := s3manager.NewUploader(sess)\n\toutput, err := uploader.Upload(&s3manager.UploadInput{\n\t\tBody: local,\n\t\tBucket: aws.String(ctx.bucket),\n\t\tKey: aws.String(uploadKey),\n\t})\n\tawsOutput(fmt.Sprintf(\"%#v\", output))\n\tif err != nil && !strings.Contains(err.Error(),\n\t\t\"IllegalLocationConstraintException\") {\n\t\treturn handle(fmt.Sprintf(\"Error in file upload of %s to S3.\", onDisk), err)\n\t}\n\n\t// Remove file locally after upload finished\n\tif err = ctx.os.Remove(onDisk); err != nil {\n\t\treturn handle(\"Error in deleting temporary file on local disk.\", err)\n\t}\n\treturn err\n}", "func (s *storageClient) upload(ctx context.Context, r io.Reader, objectName string) error {\n\tw := s.client.Bucket(s.bucketName).Object(objectName).NewWriter(ctx)\n\tif _, err := io.Copy(w, r); err != nil {\n\t\treturn fmt.Errorf(\"error while uploading to GCS object %q: %w\", objectName, err)\n\t}\n\t// The actual upload might happen after Close is called so we need to capture any errors.\n\tif err := w.Close(); err != nil {\n\t\treturn fmt.Errorf(\"error finishing upload to GCS object %q: %w\", objectName, err)\n\t}\n\treturn nil\n}", "func (cs *CloudStorage) Upload(ctx context.Context, user *domain.User,\n\tbucket string, attrs *domain.ImgAttrs, r io.Reader, prefix string) error {\n\twc := cs.Bucket(bucket).Object(prefix).NewWriter(ctx)\n\tif _, err := io.Copy(wc, r); err != nil {\n\t\treturn err\n\t}\n\tif err := wc.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func uploadTestObject(bucketName, objName string, n []byte) error {\n\t// Create non-wrapped client to create test object.\n\tctx := context.Background()\n\tc, err := NewClient(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"storage.NewClient: %v\", err)\n\t}\n\tobj := c.Bucket(bucketName).Object(objName)\n\tw := obj.NewWriter(ctx)\n\tif _, err := w.Write(n); err != nil {\n\t\treturn fmt.Errorf(\"writing test object: %v\", err)\n\t}\n\tif err := w.Close(); err != nil {\n\t\treturn fmt.Errorf(\"closing object: %v\", err)\n\t}\n\treturn nil\n}", "func (cache *SiaCacheLayer) PutObject(bucket string, objectName string, size int64, purgeAfter int64, srcFile string) *SiaServiceError {\n\tcache.debugmsg(\"SiaCacheLayer.PutObject\")\n\n\t// Before inserting to DB, there is a very rare chance that the object already exists in DB\n\t// from a failed upload and Minio crashed or was killed before DB updated to reflect. So just in case\n\t// we will check if the object exists and has a not uploaded status. If so, we will delete that\n\t// record and then continue as normal.\n\tobjInfo, e := cache.GetObjectInfo(bucket, objectName)\n\tif e == nil {\n\t\t// Object does exist. If uploaded, return error. If not uploaded, delete it and continue.\n\t\tif objInfo.Uploaded.Unix() > 0 {\n\t\t\treturn siaErrorObjectAlreadyExists\n\t\t}\n\t\te = cache.dbDeleteObject(bucket, objectName)\n\t\tif e != nil {\n\t\t\treturn e\n\t\t}\n\t}\n\n\terr := cache.dbInsertObject(bucket, objectName, size, time.Now().Unix(), 0, purgeAfter, srcFile, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Tell Sia daemon to upload the object\n\tsiaObj := cache.getSiaObjectName(bucket, objectName)\n\tderr := post(cache.SiadAddress, \"/renter/upload/\"+siaObj, \"source=\"+srcFile)\n\tif derr != nil {\n\t\tcache.dbDeleteObject(bucket, objectName)\n\t\treturn &SiaServiceError{Code: \"SiaErrorDaemon\", Message: derr.Error()}\n\t}\n\n\t// Need to wait for upload to complete unless background uploading is enabled\n\tif (!cache.BackgroundUpload) {\n\t\terr = cache.waitTillSiaUploadCompletes(siaObj)\n\t\tif err != nil {\n\t\t\tcache.dbDeleteObject(bucket, objectName)\n\t\t\treturn err\n\t\t}\n\n\t\t// Mark object as uploaded\n\t\terr = cache.dbUpdateObjectUploadedStatus(bucket, objectName, 1)\n\t\tif err != nil {\n\t\t\tcache.dbDeleteObject(bucket, objectName)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (u *User)BuyAsset(APIstub shim.ChaincodeStubInterface, a Asset, args []string) error {\n\tif len(args) != 2 { return errors.New(\"BuyAsset - incorrect number of arguments. Expecting 2\")}\n\n\tpaymentid := args[0]\n\tpaypercent := args[1]\n\tpercentage, parseErr := strconv.ParseFloat(paypercent, 32)\n\tif parseErr != nil { return errors.New(\"BuyAsset - \" + parseErr.Error()) }\n\t\n\tif a.Owned == u.Username {\n\t\treturn errors.New(\"BuyAsset, user can NOT buy its own asset --- \")\n\t}\n\t// add buyer info into Asset\n\tremaining := 1 - a.Status\n\tif percentage > remaining { \n\t\treturn errors.New(\"BuyAsset - No enough share for this asset '\" + a.Key + \"'\")\n\t}\n\ta.Status = remaining - percentage // update asset remaining share\n\ta.Buyerspercent[u.Username] = a.Buyerspercent[u.Username] + percentage\n\t// calculate payment plan\n\ttotalPayWithInterest := percentage * math.Pow((1+a.Interestrate), float64(a.Period)) * a.Worth\n\tpayPer := totalPayWithInterest / float64(a.Period)\n\n\t// add asset to user's paymentmethod's Payssets list\n\tpaymentmethod := u.Info.Paymentmethodlist[paymentid]\n\tif paymentmethod.Accountnumber == \"\" {\n\t\treturn errors.New(\"BuyAsset - payment not exists\")\n\t}\n\n\tvar record Payassetrecord\n\trecord = paymentmethod.Payassets[a.Key];\n\tif (Payassetrecord{}) != record {\n\t\treturn errors.New(\"BuyAsset - you have bought this asset already.\")\n\t} \n\t//p, _ := strconv.ParseInt(a.Period, 10, 32)\n\trecord = Payassetrecord{Period: int(a.Period), Amountper:payPer}\n\tpaymentmethod.Payassets[a.Key] = record\n\n\t// Put asset and user back to ledger\n\tassetAsbytes, _ := json.Marshal(a)\n\tuserAsbytes, _ := json.Marshal(u)\n\n\tif err := APIstub.PutState(a.Key, assetAsbytes); err != nil {\n\t\treturn errors.New(\"BuyAsset - \" + err.Error())\n\t}\n\tif err := APIstub.PutState(u.Username, userAsbytes); err != nil {\n\t\treturn errors.New(\"BuyAsset - \" + err.Error())\n\t}\n\n\treturn nil\n}", "func (g *gcs) Upload(ctx context.Context, localPath string, remotePath string) (err error) {\n\tgcsObject := g.bucket.Object(remotePath)\n\tgcsWriter := gcsObject.NewWriter(g.context)\n\n\tsourceFile, err := os.Open(localPath)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer sourceFile.Close()\n\n\tif _, err = io.Copy(gcsWriter, sourceFile); err != nil {\n\t\treturn\n\t}\n\n\tif err = gcsWriter.Close(); err != nil {\n\t\tgcsObject.Delete(g.context)\n\t\treturn\n\t}\n\n\tif err = gcsObject.ACL().Set(g.context, storage.AllUsers, storage.RoleReader); err != nil {\n\t\tgcsObject.Delete(g.context)\n\t\treturn\n\t}\n\n\treturn\n}", "func UploadMod(url string, path string, mod config.Mod) error {\n\tmodname := strings.ToLower(mod.Name)\n\treleaseVer := strings.ToLower(mod.ReleaseVersion)\n\n\tmodfilepath := fmt.Sprintf(\"releases/%s.%s.so\", modname, releaseVer)\n\tif len(path) != 0 {\n\t\tmodfilepath = fmt.Sprintf(\"%s/%s\", path, modfilepath)\n\t}\n\n\tfmt.Println(modfilepath)\n\n\tfile, errFile := os.Open(modfilepath)\n\tif errFile != nil {\n\t\treturn errors.New(\"open-file \" + errFile.Error())\n\t}\n\tdefer file.Close()\n\n\tbody := &bytes.Buffer{}\n\twriter := multipart.NewWriter(body)\n\tpart, errCreateFormFile := writer.CreateFormFile(\"file\", filepath.Base(modfilepath))\n\tif errCreateFormFile != nil {\n\t\treturn errors.New(\"create-form-file \" + errCreateFormFile.Error())\n\t}\n\n\t_, errCopyFile := io.Copy(part, file)\n\twriter.Close()\n\tif errCopyFile != nil {\n\t\treturn errors.New(\"copy-file \" + errCopyFile.Error())\n\t}\n\n\treq, errNewRequest := http.NewRequest(http.MethodPost, url, body)\n\tif errNewRequest != nil {\n\t\treturn errors.New(\"create-new-request-for-upload \" + errNewRequest.Error())\n\t}\n\n\tauthKey := fmt.Sprintf(\"Bearer %s\", mod.SecretKey)\n\treq.Header.Set(\"Content-Type\", writer.FormDataContentType())\n\treq.Header.Set(config.XFastSecretKeyHeader, authKey)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn errors.New(\"call-api-for-upload \" + err.Error())\n\t}\n\tdefer resp.Body.Close()\n\n\tparser := util.NewParser()\n\tparser.SetResponseBody(resp.Body)\n\n\tstatus, err := parser.Status()\n\tif err != nil {\n\t\treturn errors.New(\"parser-resp-for-upload \" + err.Error())\n\t}\n\n\tif strings.ToUpper(status) == \"ERROR\" || strings.ToUpper(status) == \"FAILED\" {\n\t\treturn errors.New(\"failed to upload module\")\n\t}\n\n\treturn nil\n}", "func (s *Service) doUpload(ctx *context.Context, fileBytes []byte, url string) *types.Error {\n\tbefore := func(asFunc func(interface{}) bool) error {\n\t\treq := &s3manager.UploadInput{}\n\t\tok := asFunc(&req)\n\t\tif !ok {\n\t\t\treturn errors.New(\"invalid s3 type\")\n\t\t}\n\t\treq.ACL = aws.String(\"public-read\")\n\t\treturn nil\n\t}\n\tbw, err := s.bucket.NewWriter(*ctx, url, &blob.WriterOptions{\n\t\tBeforeWrite: before,\n\t})\n\tif err != nil {\n\t\treturn &types.Error{\n\t\t\tPath: \".UploaderService->doUpload()\",\n\t\t\tMessage: err.Error(),\n\t\t\tError: err,\n\t\t\tType: \"golang-error\",\n\t\t}\n\t}\n\n\t_, err = bw.Write(fileBytes)\n\tif err != nil {\n\t\treturn &types.Error{\n\t\t\tPath: \".UploaderService->doUpload()\",\n\t\t\tMessage: err.Error(),\n\t\t\tError: err,\n\t\t\tType: \"golang-error\",\n\t\t}\n\t}\n\n\tif err = bw.Close(); err != nil {\n\t\treturn &types.Error{\n\t\t\tPath: \".UploaderService->doUpload()\",\n\t\t\tMessage: err.Error(),\n\t\t\tError: err,\n\t\t\tType: \"golang-error\",\n\t\t}\n\t}\n\n\treturn nil\n}", "func (u *Uploader) Upload(reader io.Reader, input s3.PutObjectInput) error {\n\treturn errors.New(\"unimplemented\")\n}", "func (b *OSSBackend) Upload(blobID string, blobPath string) error {\n\tblobID = b.objectPrefix + blobID\n\tif exist, err := b.bucket.IsObjectExist(blobID); err != nil {\n\t\treturn err\n\t} else if exist {\n\t\treturn nil\n\t}\n\n\tvar stat os.FileInfo\n\tstat, err := os.Stat(blobPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tblobSize := stat.Size()\n\n\tvar needMultiparts bool = false\n\t// Blob size bigger than 100MB, apply multiparts upload.\n\tif blobSize >= multipartsUploadThreshold {\n\t\tneedMultiparts = true\n\t}\n\n\tstart := time.Now()\n\n\tif needMultiparts {\n\t\tlogrus.Debugf(\"Upload %s using multiparts method\", blobID)\n\t\tchunks, err := oss.SplitFileByPartNum(blobPath, splitPartsCount)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\timur, err := b.bucket.InitiateMultipartUpload(blobID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar parts []oss.UploadPart\n\n\t\tg := new(errgroup.Group)\n\t\tfor _, chunk := range chunks {\n\t\t\tck := chunk\n\t\t\tg.Go(func() error {\n\t\t\t\tp, err := b.bucket.UploadPartFromFile(imur, blobPath, ck.Offset, ck.Size, ck.Number)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t// TODO: We don't verify data part MD5 from ETag right now.\n\t\t\t\t// But we can do it if we have to.\n\t\t\t\tparts = append(parts, p)\n\t\t\t\treturn nil\n\t\t\t})\n\t\t}\n\n\t\tif err := g.Wait(); err != nil {\n\t\t\treturn errors.Wrap(err, \"Uploading parts failed\")\n\t\t}\n\n\t\t_, err = b.bucket.CompleteMultipartUpload(imur, parts)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\treader, err := os.Open(blobPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer reader.Close()\n\t\terr = b.bucket.PutObject(blobID, reader)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tend := time.Now()\n\telapsed := end.Sub(start)\n\tlogrus.Debugf(\"Uploading blob %s costs %s\", blobID, elapsed)\n\n\treturn err\n}", "func (u *LocalUploader) Upload(path string, object []byte) error {\n\tf, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\tif _, err := fmt.Fprintln(f, string(object)); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func AddAsset(c router.Context) (interface{}, error) {\n\t// get the data from the request and parse it as structure\n\tdata := c.Param(`data`).(Asset)\n\n\t// set the default values for the fields\n\tdata.DocType = utils.DocTypeAsset\n\n\t// Validate the inputed data\n\terr := data.Validate()\n\tif err != nil {\n\t\tif _, ok := err.(validation.InternalError); ok {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, status.ErrStatusUnprocessableEntity.WithValidationError(err.(validation.Errors))\n\t}\n\n\tstub := c.Stub()\n\ttxID := stub.GetTxID()\n\tuserAsBytes, _ := stub.GetState(data.UserID)\n\tuser := User{}\n\n\terr = json.Unmarshal(userAsBytes, &user)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif user.WalletBalance < utils.AddAssetFee {\n\t\treturn nil, status.ErrInternal.WithMessage(fmt.Sprintf(\"You don't have enough coins to purchase this asset.\"))\n\t}\n\n\t// check asset code already exists\n\tqueryString := fmt.Sprintf(\"{\\\"selector\\\":{\\\"code\\\":\\\"%s\\\",\\\"doc_type\\\":\\\"%s\\\"}}\", data.Code, utils.DocTypeAsset)\n\tasset, _, err := utils.Get(c, queryString, \"\")\n\tif asset != nil {\n\t\treturn nil, status.ErrBadRequest.WithMessage(fmt.Sprintf(\"Symbol %s already exists!\", data.Code))\n\t}\n\n\t// check asset label already exists\n\tqueryString1 := fmt.Sprintf(\"{\\\"selector\\\":{\\\"label\\\":\\\"%s\\\",\\\"doc_type\\\":\\\"%s\\\"}}\", data.Label, utils.DocTypeAsset)\n\tassetLabel, _, err := utils.Get(c, queryString1, \"\")\n\tif assetLabel != nil {\n\t\treturn nil, status.ErrBadRequest.WithMessage(fmt.Sprintf(\"Name %s already exists!\", data.Label))\n\t}\n\n\terr = c.State().Put(txID, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tuser.WalletBalance = user.WalletBalance - utils.AddAssetFee\n\n\tcreatedAt := time.Now().Format(time.RFC3339)\n\t// add asset transaction\n\tvar addAssetTransaction = Transaction{UserID: data.UserID, Type: utils.Send, Code: utils.WalletCoinSymbol, AssetLabel: data.Label, Quantity: utils.AddAssetFee, DocType: utils.DocTypeTransaction, CreatedAt: createdAt, AddressValue: \"\", LabelValue: \"\", AddressBookLabel: \"Original\", TxnType: utils.AssetCreatedTxn}\n\terr = c.State().Put(txID+strconv.Itoa(1), addAssetTransaction)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponseBody := ResponseAddAsset{ID: txID, Balance: user.WalletBalance, Symbol: user.Symbol}\n\n\t// Save the data and return the response\n\treturn responseBody, c.State().Put(data.UserID, user)\n}", "func (group *UploadGroup) Upload(contents string, bucket string, filename string, month time.Time) {\n\tgroup.wg.Add(1)\n\tuploadResult := UploadResult{\n\t\tmake(chan *s3manager.UploadOutput, 1),\n\t\tmake(chan error, 1),\n\t}\n\n\tgo func(outc chan *s3manager.UploadOutput, ec chan error) {\n\t\tdefer group.wg.Done()\n\t\toutput, err := Upload(contents, bucket, filename, month)\n\t\tec <- err\n\t\toutc <- output\n\t}(uploadResult.S3Output, uploadResult.Err)\n\n\tgroup.Outputs = append(group.Outputs, uploadResult)\n}", "func (fake fakeFileUpload) UploadFilePutObject(ctx context.Context, request UploadFileRequest) (response UploadResponse, err error) {\n\tresponse = UploadResponse{\n\t\tType: SinglepartUpload,\n\t}\n\n\treturn\n}", "func (a *API) UploadFile(filepath, bucket, path string, force bool) error {\n\tbucketClient, err := a.oss.Bucket(bucket)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting bucket %q: %v\", bucket, err)\n\t}\n\n\tif !force {\n\t\t// TODO: Switch to head object whenever the library actually adds the call :(\n\t\tobjects, err := bucketClient.ListObjects()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"listing objects in bucket: %v\", err)\n\t\t}\n\n\t\tfor _, object := range objects.Objects {\n\t\t\t// Already exists, inform & re-use\n\t\t\tif object.Key == path {\n\t\t\t\tplog.Infof(\"object already exists and force is false\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\t// Use 1000K part size with 10 coroutines to speed up the upload\n\tplog.Infof(\"uploading oss://%v/%v\", bucket, path)\n\treturn bucketClient.UploadFile(path, filepath, 1000*1024, oss.Routines(10))\n}", "func (s *SmartContract) TransferAsset(ctx contractapi.TransactionContextInterface, id string, newOwner string) error {\n\n\tasset, err := s.ReadAsset(ctx, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclientID, err := s.GetSubmittingClientIdentity(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif clientID != asset.Owner {\n\t\treturn fmt.Errorf(\"submitting client not authorized to update asset, does not own asset\")\n\t}\n\n\tasset.Owner = newOwner\n\tassetJSON, err := json.Marshal(asset)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ctx.GetStub().PutState(id, assetJSON)\n}", "func (r *MediaService) Upload(resourceName string, media *Media) *MediaUploadCall {\n\tc := &MediaUploadCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.resourceName = resourceName\n\tc.media = media\n\treturn c\n}", "func (r *MediaService) Upload(resourceName string, media *Media) *MediaUploadCall {\n\tc := &MediaUploadCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.resourceName = resourceName\n\tc.media = media\n\treturn c\n}", "func (n *Repository) UploadArtifact(ar *Artifact, hashs ...string) error {\n\tpomURL := n.generateURL(ar, suffixPom)\n\tif err := n.upload(pomURL, bytes.NewReader(ar.Pom), \"\"); err != nil {\n\t\treturn err\n\t}\n\n\tfOpen, err := os.Open(ar.File)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfileURL := n.generateURL(ar, ar.extension())\n\tif err := n.upload(fileURL, fOpen, ar.ContentType); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, h := range hashs {\n\t\tif iGetIt := n.hash[h]; iGetIt != nil {\n\t\t\tn.uploadHash(ar, iGetIt)\n\t\t} else {\n\t\t\turlIssue := generateURLIssue(h)\n\t\t\tlog.Logger.Printf(\"%q is not managed by the client %q\", h, urlIssue)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (gcs *GSCClient) Upload(b []byte, bucket, fileName string) (string, error) {\n\tr := bytes.NewReader(b)\n\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\tdefer cancel()\n\twc := gcs.client.Bucket(bucket).Object(fileName).NewWriter(ctx)\n\tif _, err := io.Copy(wc, r); err != nil {\n\t\treturn \"\", fmt.Errorf(\"falha ao copiar conteúdo de arquivo local para o bucket no GCS (%s/%s), erro %q\", bucket, fileName, err)\n\t}\n\tif err := wc.Close(); err != nil {\n\t\treturn \"\", fmt.Errorf(\"falha ao fechar storate.Writter object (%s/%s), erro %q\", bucket, fileName, err)\n\t}\n\treturn fmt.Sprintf(\"%s/%s\", bucket, fileName), nil\n}", "func PutObject(item io.Reader, headers map[string]*string, urlPath *string) error {\n\treq, err := http.NewRequest(\"PUT\", tea.StringValue(urlPath), item)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, v := range headers {\n\t\treq.Header.Add(k, tea.StringValue(v))\n\t}\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn errors.New(\"Upload file failed.\")\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode >= 400 && resp.StatusCode < 600 {\n\t\tbodyStr, err := util.ReadAsString(resp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trespMap := ossutil.GetErrMessage(bodyStr)\n\t\tif respMap[\"Code\"] != nil && respMap[\"Code\"].(string) == \"CallbackFailed\" {\n\t\t\treturn nil\n\t\t}\n\t\treturn tea.NewSDKError(map[string]interface{}{\n\t\t\t\"code\": respMap[\"Code\"],\n\t\t\t\"message\": respMap[\"Message\"],\n\t\t\t\"data\": map[string]interface{}{\n\t\t\t\t\"httpCode\": resp.StatusCode,\n\t\t\t\t\"requestId\": respMap[\"RequestId\"],\n\t\t\t\t\"hostId\": respMap[\"HostId\"],\n\t\t\t},\n\t\t})\n\t}\n\n\treturn nil\n}", "func (rc *Controller) enqueueRelease(obj interface{}) {\n\tkey, err := rc.keyForObj(obj)\n\tif err != nil {\n\t\tglog.Errorf(\"Can't get obj key: %v\", err)\n\t\treturn\n\t}\n\n\tglog.V(4).Infof(\"Enqueue: %s\", key)\n\t// key must be a string\n\trc.queue.Add(key)\n}", "func commitFileUpload(\n\tctx context.Context,\n\tfs *fsMutable,\n\tchans commitChans,\n\tbundleUploadWaitGroup *sync.WaitGroup,\n\tcaFs cafs.Fs,\n\tuploadTask commitUploadTask) {\n\tdefer bundleUploadWaitGroup.Done()\n\tfile, err := fs.localCache.OpenFile(getPathToBackingFile(uploadTask.inodeID),\n\t\tos.O_RDONLY|os.O_SYNC, fileDefaultMode)\n\tif err != nil {\n\t\tselect {\n\t\tcase chans.error <- err:\n\t\t\tfs.l.Error(\"Commit: backing fs open() error on file upload\",\n\t\t\t\tzap.Error(err),\n\t\t\t\tzap.String(\"filename\", uploadTask.name))\n\t\tcase <-chans.done:\n\t\t}\n\t\treturn\n\t}\n\t// written, key, keys, duplicate, err =\n\tputRes, err := caFs.Put(ctx, file)\n\tif err != nil {\n\t\tselect {\n\t\tcase chans.error <- err:\n\t\t\tfs.l.Error(\"Commit: cafs Put() error on file upload\",\n\t\t\t\tzap.Error(err),\n\t\t\t\tzap.String(\"filename\", uploadTask.name))\n\t\tcase <-chans.done:\n\t\t}\n\t\treturn\n\t}\n\tbe := model.BundleEntry{\n\t\tHash: putRes.Key.String(),\n\t\tNameWithPath: uploadTask.name,\n\t\tFileMode: 0, // #TODO: #35 file mode support\n\t\tSize: uint64(putRes.Written),\n\t}\n\tselect {\n\tcase chans.bundleEntry <- be:\n\tcase <-chans.done:\n\t}\n\n}", "func commitFileUpload(\n\tctx context.Context,\n\tfs *fsMutable,\n\tchans commitChans,\n\tbundleUploadWaitGroup *sync.WaitGroup,\n\tcaFs cafs.Fs,\n\tuploadTask commitUploadTask) {\n\tdefer bundleUploadWaitGroup.Done()\n\tfile, err := fs.localCache.OpenFile(getPathToBackingFile(uploadTask.inodeID),\n\t\tos.O_RDONLY|os.O_SYNC, fileDefaultMode)\n\tif err != nil {\n\t\tselect {\n\t\tcase chans.error <- err:\n\t\t\tfs.l.Error(\"Commit: backing fs open() error on file upload\",\n\t\t\t\tzap.Error(err),\n\t\t\t\tzap.String(\"filename\", uploadTask.name))\n\t\tcase <-chans.done:\n\t\t}\n\t\treturn\n\t}\n\t// written, key, keys, duplicate, err =\n\tputRes, err := caFs.Put(ctx, file)\n\tif err != nil {\n\t\tselect {\n\t\tcase chans.error <- err:\n\t\t\tfs.l.Error(\"Commit: cafs Put() error on file upload\",\n\t\t\t\tzap.Error(err),\n\t\t\t\tzap.String(\"filename\", uploadTask.name))\n\t\tcase <-chans.done:\n\t\t}\n\t\treturn\n\t}\n\tbe := model.BundleEntry{\n\t\tHash: putRes.Key.String(),\n\t\tNameWithPath: uploadTask.name,\n\t\tFileMode: 0, // #TODO: #35 file mode support\n\t\tSize: uint64(putRes.Written),\n\t}\n\tselect {\n\tcase chans.bundleEntry <- be:\n\tcase <-chans.done:\n\t}\n\n}", "func (c *Client) Upload(src []byte, dest string) error {\n\n\tdestUrl, err := url.Parse(fmt.Sprintf(\"files/%s/%s\", c.Username, dest))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, _, err = c.sendRequest(\"PUT\", c.Url.ResolveReference(destUrl).String(), src, nil, nil)\n\n\treturn err\n}", "func setAssetTag(requestHandler common.RequestHandler) endpointHandler {\n\treturn func(httpWriter http.ResponseWriter, httpRequest *http.Request) error {\n\t\tlog.Trace(\"resource/asset_tag:setAssetTag() Entering\")\n\t\tdefer log.Trace(\"resource/asset_tag:setAssetTag() Leaving\")\n\n\t\tlog.Debugf(\"resource/asset_tag:setAssetTag() Request: %s\", httpRequest.URL.Path)\n\n\t\tvar tagWriteRequest taModel.TagWriteRequest\n\n\t\tcontentType := httpRequest.Header.Get(\"Content-Type\")\n\t\tif contentType != \"application/json\" {\n\t\t\tlog.Errorf(\"resource/asset_tag:setAssetTag( %s - Invalid content-type '%s'\", message.InvalidInputBadParam, contentType)\n\t\t\treturn &common.EndpointError{Message: \"Invalid content-type\", StatusCode: http.StatusBadRequest}\n\t\t}\n\n\t\tdata, err := ioutil.ReadAll(httpRequest.Body)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Errorf(\"resource/asset_tag:setAssetTag() %s - Error reading request body for request: %s\", message.AppRuntimeErr, httpRequest.URL.Path)\n\t\t\treturn &common.EndpointError{Message: \"Error parsing request\", StatusCode: http.StatusBadRequest}\n\t\t}\n\n\t\tdec := json.NewDecoder(bytes.NewReader(data))\n\t\tdec.DisallowUnknownFields()\n\t\terr = dec.Decode(&tagWriteRequest)\n\t\tif err != nil {\n\t\t\tsecLog.WithError(err).Errorf(\"resource/asset_tag:setAssetTag() %s - Error marshaling json data: %s for request: %s\", message.InvalidInputBadParam, string(data), httpRequest.URL.Path)\n\t\t\treturn &common.EndpointError{Message: \"Error processing request\", StatusCode: http.StatusBadRequest}\n\t\t}\n\n\t\terr = requestHandler.DeployAssetTag(&tagWriteRequest)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Errorf(\"resource/asset_tag:setAssetTag() %s - Error while deploying asset tag\", message.AppRuntimeErr)\n\t\t\treturn err\n\t\t}\n\t\thttpWriter.WriteHeader(http.StatusOK)\n\t\treturn nil\n\t}\n}", "func (a *API) UploadObject(r io.Reader, bucket, path string, force bool) error {\n\ts3uploader := s3manager.NewUploaderWithClient(a.s3)\n\n\tif !force {\n\t\t_, err := a.s3.HeadObject(&s3.HeadObjectInput{\n\t\t\tBucket: &bucket,\n\t\t\tKey: &path,\n\t\t})\n\t\tif err != nil {\n\t\t\tif !s3IsNotFound(err) {\n\t\t\t\treturn fmt.Errorf(\"unable to head object %v/%v: %v\", bucket, path, err)\n\t\t\t}\n\t\t} else {\n\t\t\tplog.Infof(\"skipping upload since force was not set: s3://%v/%v\", bucket, path)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t_, err := s3uploader.Upload(&s3manager.UploadInput{\n\t\tBody: r,\n\t\tBucket: aws.String(bucket),\n\t\tKey: aws.String(path),\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error uploading s3://%v/%v: %v\", bucket, path, err)\n\t}\n\treturn err\n}", "func (g *GHR) DeleteAssets(ctx context.Context, releaseID int64, localAssets []string, parallel int) error {\n\tstart := time.Now()\n\tdefer func() {\n\t\tDebugf(\"DeleteAssets: time: %d ms\", int(time.Since(start).Seconds()*1000))\n\t}()\n\n\teg, ctx := errgroup.WithContext(ctx)\n\n\tassets, err := g.GitHub.ListAssets(ctx, releaseID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to list assets: %w\", err)\n\t}\n\n\tsemaphore := make(chan struct{}, parallel)\n\tfor _, localAsset := range localAssets {\n\t\tfor _, asset := range assets {\n\t\t\t// https://golang.org/doc/faq#closures_and_goroutines\n\t\t\tlocalAsset, asset := localAsset, asset\n\n\t\t\t// Uploaded asset name is same as basename of local file\n\t\t\tif *asset.Name == filepath.Base(localAsset) {\n\t\t\t\teg.Go(func() error {\n\t\t\t\t\tsemaphore <- struct{}{}\n\t\t\t\t\tdefer func() {\n\t\t\t\t\t\t<-semaphore\n\t\t\t\t\t}()\n\n\t\t\t\t\tfmt.Fprintf(g.outStream, \"--> Deleting: %15s\\n\", *asset.Name)\n\t\t\t\t\tif err := g.GitHub.DeleteAsset(ctx, *asset.ID); err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"failed to delete asset: %s %w\", *asset.Name, err)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := eg.Wait(); err != nil {\n\t\treturn fmt.Errorf(\"one of the goroutines failed: %w\", err)\n\t}\n\n\treturn nil\n}", "func (c *Client) Upload(namespace, name string, src io.Reader) error {\n\tuploadPath := path.Join(c.config.RootDirectory, c.config.UploadDirectory, uuid.NewV4().String())\n\tblobPath, err := c.pather.BlobPath(name)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"blob path: %s\", err)\n\t}\n\tif err := c.webhdfs.Create(uploadPath, src); err != nil {\n\t\treturn err\n\t}\n\tif err := c.webhdfs.Mkdirs(path.Dir(blobPath)); err != nil {\n\t\treturn err\n\t}\n\treturn c.webhdfs.Rename(uploadPath, blobPath)\n}", "func (sc *ScreenlyClient) Post(payload *AssetPayload) (*Asset, error) {\n\tb := new(bytes.Buffer)\n\terr := json.NewEncoder(b).Encode(payload)\n\tif err == nil {\n\t\tpath := \"assets\"\n\t\tresponse, err := sc.doHttp(\"POST\", path, b)\n\t\tif err == nil {\n\t\t\treceived := &Asset{}\n\t\t\t//io.Copy(os.Stdout, response.Body)\n\t\t\terr = json.NewDecoder(response.Body).Decode(received)\n\t\t\tif err == nil {\n\t\t\t\treturn received, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, err\n}", "func Upload(contents string, bucket string, filename string, month time.Time) (*(s3manager.UploadOutput), error) {\n\n\treader := strings.NewReader(contents)\n\n\tkey := s3KeyScheme(month, filename)\n\n\t// Upload the file to S3\n\tresult, err := uploader.Upload(&s3manager.UploadInput{\n\t\tBucket: aws.String(bucket),\n\t\tKey: aws.String(key),\n\t\tBody: reader,\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to upload file, %v\", err)\n\t}\n\treturn result, nil\n}", "func (this *Artifact) Upload(filename string) {\n\t// Build file upload request\n\n\tfilepath := path.Join(filename, this.File)\n\tfile, err := os.Open(filepath)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to open input file %s: %s\\n\", filepath, err.Error())\n\t\tos.Exit(1)\n\t}\n\treq, err := http.NewRequest(\"PUT\", this.getEndpoint(), file)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to build REST request: %s\\n\", err.Error())\n\t\tos.Exit(1)\n\t}\n\treq.SetBasicAuth(bintray.Username, bintray.APIKey)\n\treq.Header.Add(\"X-Bintray-Override\", boolToString(this.Override))\n\treq.Header.Add(\"X-Bintray-Publish\", boolToString(this.Publish))\n\tif this.Type == \"Debian\" {\n\t\tthis.addDebianHeaders(req)\n\t}\n\n\t// Set up an HTTP client with a bundled root CA certificate (borrowed from Ubuntu 14.04)\n\t// This is necessary because the required certificate is missing from the root image and\n\t// without it the upload fails with \"x509: failed to load system roots and no roots provided\"\n\n\tclient := http.Client{}\n\tpool := x509.NewCertPool()\n\tif pemCerts, err := ioutil.ReadFile(\"/etc/ssl/certs/ca-certificates.crt\"); err != nil {\n\t\tfmt.Printf(\"Unable to read ca-certificates.crt: %s\\n\", err.Error())\n\t\tos.Exit(1)\n\t} else {\n\t\tpool.AppendCertsFromPEM(pemCerts)\n\t\tclient.Transport = &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tRootCAs: pool,\n\t\t\t\tInsecureSkipVerify: bintray.Insecure,\n\t\t\t},\n\t\t}\n\t}\n\tif bintray.Debug {\n\t\tdumpRequest(\"DEBUG HTTP Request\", req)\n\t}\n\n\t// Execute the upload request and format the response\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tfmt.Printf(\"Upload request failed: %s\\n\", err.Error())\n\t\tdumpRequest(\"Failing request\", req)\n\t\tos.Exit(1)\n\t}\n\tdefer resp.Body.Close()\n\trespBody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to read request response: %s\\n\", err.Error())\n\t\tdumpRequest(\"Failing request\", req)\n\t\tos.Exit(1)\n\t}\n\tmessageText := new(MessageText)\n\tif len(respBody) > 0 {\n\t\tjson.Unmarshal(respBody, messageText)\n\t}\n\tif resp.StatusCode > 299 {\n\t\terrorText := fmt.Sprintf(\"Error %d\", resp.StatusCode)\n\t\thttpErrorText := http.StatusText(resp.StatusCode)\n\t\tif len(httpErrorText) > 0 {\n\t\t\terrorText += \" \" + httpErrorText\n\t\t}\n\t\tif len(messageText.Message) > 0 {\n\t\t\terrorText += \" - \" + messageText.Message\n\t\t}\n\t\tfmt.Printf(\"%s\\n\", errorText)\n\t\tdumpRequest(\"Failing request\", req)\n\t\tos.Exit(1)\n\t}\n\n\tif len(messageText.Message) == 0 && len(respBody) > 0 {\n\t\tmessageText.Message = string(respBody)\n\t}\n\tfmt.Printf(\"Result: %s\\n\", messageText.Message)\n\tif messageText.Message != \"success\" {\n\t\tif this.Override || !strings.Contains(messageText.Message, \"already exists\") {\n\t\t\tdumpRequest(\"\\nRequest was:\", req)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}", "func UploadData(ctx context.Context, data []byte, dataType string) error {\n\n\t//Gets access grant stored in .env\n\tvar envs map[string]string\n\tenvs, err := godotenv.Read(\".env\")\n\n\tif err != nil {\n\t\tlog.Fatal(\"Error loading .env file\")\n\t}\n\n\taccessGrant := envs[\"STORJACCESSGRANT\"]\n\n\t// Parse the Access Grant.\n\taccess, err := uplink.ParseAccess(accessGrant)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not parse access grant: %v\", err)\n\t}\n\n\t// Creates a project using our access\n\tproject, err := uplink.OpenProject(ctx, access)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not open project: %v\", err)\n\t}\n\tdefer project.Close()\n\n\t// Creates the bucketName and objectKey variables to be used later\n\tvar bucketName string = \"bucket2\"\n\tvar objectKey string = time.Now().Format(\"2006-01-02 15:04:05\")\n\tobjectKey = strings.Replace(objectKey, \":\", \"-\", -1)\n\tobjectKey = strings.Replace(objectKey, \" \", \"_\", -1)\n\tobjectKey = objectKey + dataType\n\n\t// Ensure the desired Bucket within the Project is created.\n\t_, err = project.EnsureBucket(ctx, bucketName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not ensure bucket: %v\", err)\n\t}\n\n\t// Intitiate the upload of our Object to the specified bucket and key.\n\tupload, err := project.UploadObject(ctx, bucketName, objectKey, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not initiate upload: %v\", err)\n\t}\n\n\t// Copy the data to the upload.\n\tbuf := bytes.NewBuffer(data)\n\t_, err = io.Copy(upload, buf)\n\tif err != nil {\n\t\t_ = upload.Abort()\n\t\treturn fmt.Errorf(\"could not upload data: %v\", err)\n\t}\n\n\t// Commit the uploaded object.\n\terr = upload.Commit()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not commit uploaded object: %v\", err)\n\t}\n\n\tprint(\"\\nNew name of the uploaded file is: \" + objectKey + \"\\n\\n\")\n\tprint(\"\\n now the files in the bucket are as follows: \\n\")\n\n\t// // Test to see what is uploaded in our bucket\n\t// objects := project.ListObjects(ctx, \"bucket1\", nil)\n\t// for objects.Next() {\n\t// \titem := objects.Item()\n\t// \tfmt.Println(item.IsPrefix, item.Key)\n\t// }\n\t// if err := objects.Err(); err != nil {\n\t// \treturn err\n\t// }\n\n\treturn nil\n}", "func (h *Handler) Upload(source string, destination string) {\n\tlog.Warn(\"generic doesn't support file upload\")\n}", "func (s *PublicStorageServer) Upload(ctx context.Context, input *pbs.File) (*pbs.FileURL, error) {\n\tvar obj file.MinioObj\n\tobj.FromPublicFile(input)\n\n\tinfo, err := services.MinioClient.PutObject(context.Background(), \"public\", obj.ObjectName, bytes.NewReader(obj.File), -1, minio.PutObjectOptions{ContentType: obj.Option.ContentType})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Printf(\"Upload public file %s has been success\", obj.ObjectName)\n\n\treturn &pbs.FileURL{\n\t\tUrl: info.Location,\n\t}, nil\n}", "func (c *Client) CommitUpload(request *CommitUploadRequest) (response *CommitUploadResponse, err error) {\n if request == nil {\n request = NewCommitUploadRequest()\n }\n response = NewCommitUploadResponse()\n err = c.Send(request, response)\n return\n}", "func (s *StorageService) Upload(\n\tuserID *mytype.OID,\n\tfile io.Reader,\n\tcontentType string,\n\tsize int64,\n) (*UploadResponse, error) {\n\t// Hash of the file contents to be used as the s3 object 'key'.\n\thash := sha1.New()\n\tio.Copy(hash, file)\n\tkey := fmt.Sprintf(\"%x\", hash.Sum(nil))\n\n\tobjectName := fmt.Sprintf(\n\t\t\"%s/%s/%s/%s\",\n\t\tkey[:2],\n\t\tkey[3:5],\n\t\tkey[6:8],\n\t\tkey[9:],\n\t)\n\tobjectPath := strings.Join([]string{\n\t\tuserID.Short,\n\t\tobjectName,\n\t}, \"/\")\n\n\t_, err := s.svc.StatObject(\n\t\ts.bucket,\n\t\tobjectPath,\n\t\tminio.StatObjectOptions{},\n\t)\n\tif err != nil {\n\t\tminioError := minio.ToErrorResponse(err)\n\t\tif minioError.Code != \"NoSuchKey\" {\n\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\treturn nil, err\n\t\t}\n\t\tn, err := s.svc.PutObject(\n\t\t\ts.bucket,\n\t\t\tobjectPath,\n\t\t\tfile,\n\t\t\tsize,\n\t\t\tminio.PutObjectOptions{ContentType: contentType},\n\t\t)\n\t\tif err != nil {\n\t\t\tmylog.Log.WithError(err).Error(util.Trace(\"\"))\n\t\t\treturn nil, err\n\t\t}\n\n\t\tmylog.Log.WithField(\"size\", n).Info(util.Trace(\"uploaded new file\"))\n\t\treturn &UploadResponse{\n\t\t\tKey: key,\n\t\t\tIsNewObject: true,\n\t\t}, nil\n\t}\n\n\tmylog.Log.WithField(\"key\", key).Info(util.Trace(\"\"))\n\treturn &UploadResponse{\n\t\tKey: key,\n\t\tIsNewObject: false,\n\t}, nil\n}", "func (s Service) Upload(ctx context.Context, bucket, key string,\n\tfile io.Reader) error {\n\n\t// Get the size.\n\tbuf := &bytes.Buffer{}\n\tsize, err := io.Copy(buf, file)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = s.client.PutObject(ctx, bucket, key, buf, size,\n\t\tminiogo.PutObjectOptions{ContentType: \"application/octet-stream\"})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func uploadObject(\n\tclient *gophercloud.ServiceClient,\n\tcontainerName string,\n\tobjectName string,\n\topts *UploadOpts,\n\torigObject *originalObject,\n\tsourceFileInfo os.FileInfo) (*UploadResult, error) {\n\tuploadResult := &UploadResult{\n\t\tAction: \"upload_action\",\n\t\tContainer: containerName,\n\t\tObject: objectName,\n\t}\n\n\t// manifestData contains information about existing objects.\n\tvar manifestData []Manifest\n\n\t// oldObjectManifest is the existing object's manifest.\n\tvar oldObjectManifest string\n\n\t// oldSLOManifestPaths is a list of the old object segment's manifest paths.\n\tvar oldSLOManifestPaths []string\n\n\t// newSLOManifestPaths is a list of the new object segment's manifest paths.\n\tvar newSLOManifestPaths []string\n\n\tif origObject != nil {\n\t\torigHeaders := origObject.headers\n\t\torigMetadata := origObject.metadata\n\t\tisSLO := origHeaders.StaticLargeObject\n\n\t\tif opts.Changed || opts.SkipIdentical || !opts.LeaveSegments {\n\t\t\tvar err error\n\n\t\t\t// If the below conditionals are met, get the manifest data of\n\t\t\t// the existing object.\n\t\t\tif opts.SkipIdentical || (isSLO && !opts.LeaveSegments) {\n\t\t\t\tmo := GetManifestOpts{\n\t\t\t\t\tContainerName: containerName,\n\t\t\t\t\tContentLength: origHeaders.ContentLength,\n\t\t\t\t\tETag: origHeaders.ETag,\n\t\t\t\t\tObjectManifest: origHeaders.ObjectManifest,\n\t\t\t\t\tObjectName: objectName,\n\t\t\t\t\tStaticLargeObject: origHeaders.StaticLargeObject,\n\t\t\t\t}\n\n\t\t\t\tmanifestData, err = GetManifest(client, mo)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"unable to get manifest for %s/%s: %s\", containerName, objectName, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// If SkipIdentical is enabled, compare the md5sum/etag of each\n\t\t\t// piece of the manifest to determine if the objects are the same.\n\t\t\tif opts.SkipIdentical {\n\t\t\t\tok, err := IsIdentical(manifestData, opts.Path)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error comparing object %s/%s and path %s: %s\", containerName, objectName, opts.Path, err)\n\t\t\t\t}\n\n\t\t\t\tif ok {\n\t\t\t\t\tuploadResult.Status = \"skip-identical\"\n\t\t\t\t\tuploadResult.Success = true\n\t\t\t\t\treturn uploadResult, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// If the source object is a local file and Changed is enabled,\n\t\t// compare the mtime and content length to determine if the objects\n\t\t// are the same.\n\t\tif opts.Path != \"\" && opts.Changed {\n\t\t\tvar mtMatch bool\n\t\t\tif v, ok := origMetadata[\"Mtime\"]; ok {\n\t\t\t\tif v == opts.Metadata[\"Mtime\"] {\n\t\t\t\t\tmtMatch = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar fSizeMatch bool\n\t\t\tif origHeaders.ContentLength == sourceFileInfo.Size() {\n\t\t\t\tfSizeMatch = true\n\t\t\t}\n\n\t\t\tif mtMatch && fSizeMatch {\n\t\t\t\tuploadResult.Status = \"skip-changed\"\n\t\t\t\tuploadResult.Success = true\n\t\t\t\treturn uploadResult, nil\n\t\t\t}\n\t\t}\n\n\t\t// If LeaveSegments is set to false (default), keep\n\t\t// track of the paths of the original object's segments\n\t\t// so they can be deleted later.\n\t\tif !opts.LeaveSegments {\n\t\t\toldObjectManifest = origHeaders.ObjectManifest\n\n\t\t\tif isSLO {\n\t\t\t\tfor _, data := range manifestData {\n\t\t\t\t\tsegPath := strings.TrimSuffix(data.Name, \"/\")\n\t\t\t\t\tsegPath = strings.TrimPrefix(segPath, \"/\")\n\t\t\t\t\toldSLOManifestPaths = append(oldSLOManifestPaths, segPath)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Segment upload\n\tif opts.Path != \"\" && opts.SegmentSize > 0 && (sourceFileInfo.Size() > opts.SegmentSize) {\n\t\tvar uploadSegmentResults []uploadSegmentResult\n\t\tuploadResult.LargeObject = true\n\n\t\tvar segStart int64\n\t\tvar segIndex int\n\t\tfSize := sourceFileInfo.Size()\n\t\tsegSize := opts.SegmentSize\n\n\t\tfor segStart < fSize {\n\t\t\tvar segName string\n\n\t\t\tif segStart+segSize > fSize {\n\t\t\t\tsegSize = fSize - segStart\n\t\t\t}\n\n\t\t\tif opts.UseSLO {\n\t\t\t\tsegName = fmt.Sprintf(\"%s/slo/%s/%d/%d/%08d\",\n\t\t\t\t\tobjectName, opts.Metadata[\"Mtime\"], fSize, opts.SegmentSize, segIndex)\n\t\t\t} else {\n\t\t\t\tsegName = fmt.Sprintf(\"%s/%s/%d/%d/%08d\",\n\t\t\t\t\tobjectName, opts.Metadata[\"Mtime\"], fSize, opts.SegmentSize, segIndex)\n\t\t\t}\n\n\t\t\tuso := &uploadSegmentOpts{\n\t\t\t\tChecksum: opts.Checksum,\n\t\t\t\tPath: opts.Path,\n\t\t\t\tObjectName: objectName,\n\t\t\t\tSegmentContainer: opts.SegmentContainer,\n\t\t\t\tSegmentIndex: segIndex,\n\t\t\t\tSegmentName: segName,\n\t\t\t\tSegmentSize: segSize,\n\t\t\t\tSegmentStart: segStart,\n\t\t\t}\n\n\t\t\tresult, err := uploadSegment(client, uso)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tuploadSegmentResults = append(uploadSegmentResults, *result)\n\n\t\t\tsegIndex += 1\n\t\t\tsegStart += segSize\n\t\t}\n\n\t\tif opts.UseSLO {\n\t\t\tuploadOpts := &uploadSLOManifestOpts{\n\t\t\t\tResults: uploadSegmentResults,\n\t\t\t\tContainerName: containerName,\n\t\t\t\tObjectName: objectName,\n\t\t\t\tMetadata: opts.Metadata,\n\t\t\t}\n\n\t\t\terr := uploadSLOManifest(client, uploadOpts)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tfor _, result := range uploadSegmentResults {\n\t\t\t\tsegPath := strings.TrimSuffix(result.Location, \"/\")\n\t\t\t\tsegPath = strings.TrimPrefix(segPath, \"/\")\n\t\t\t\tnewSLOManifestPaths = append(newSLOManifestPaths, segPath)\n\t\t\t}\n\t\t} else {\n\t\t\tnewObjectManifest := fmt.Sprintf(\"%s/%s/%s/%d/%d/\",\n\t\t\t\turl.QueryEscape(opts.SegmentContainer), url.QueryEscape(objectName),\n\t\t\t\topts.Metadata[\"Mtime\"], fSize, opts.SegmentSize)\n\n\t\t\tif oldObjectManifest != \"\" {\n\t\t\t\tif strings.TrimSuffix(oldObjectManifest, \"/\") == strings.TrimSuffix(newObjectManifest, \"/\") {\n\t\t\t\t\toldObjectManifest = \"\"\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcreateOpts := objects.CreateOpts{\n\t\t\t\tContent: strings.NewReader(\"\"),\n\t\t\t\tContentLength: 0,\n\t\t\t\tMetadata: opts.Metadata,\n\t\t\t\tObjectManifest: newObjectManifest,\n\t\t\t}\n\n\t\t\tres := objects.Create(client, containerName, objectName, createOpts)\n\t\t\tif res.Err != nil {\n\t\t\t\treturn nil, res.Err\n\t\t\t}\n\t\t}\n\t} else if opts.UseSLO && opts.SegmentSize > 0 && opts.Path == \"\" {\n\t\t// Streaming segment upload\n\t\tvar segIndex int\n\t\tvar uploadSegmentResults []uploadSegmentResult\n\n\t\tfor {\n\t\t\tsegName := fmt.Sprintf(\"%s/slo/%s/%d/%08d\",\n\t\t\t\tobjectName, opts.Metadata[\"Mtime\"], opts.SegmentSize, segIndex)\n\n\t\t\t// Checksum is not passed here because it's always done during streaming.\n\t\t\tuso := &uploadSegmentOpts{\n\t\t\t\tContent: opts.Content,\n\t\t\t\tContainerName: containerName,\n\t\t\t\tObjectName: objectName,\n\t\t\t\tSegmentContainer: opts.SegmentContainer,\n\t\t\t\tSegmentIndex: segIndex,\n\t\t\t\tSegmentName: segName,\n\t\t\t\tSegmentSize: opts.SegmentSize,\n\t\t\t}\n\n\t\t\tuploadSegmentResult, err := uploadStreamingSegment(client, uso)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"error uploading segment %d of %s/%s: %s\", segIndex, containerName, objectName, err)\n\t\t\t}\n\n\t\t\tif !uploadSegmentResult.Success {\n\t\t\t\treturn nil, fmt.Errorf(\"Problem uploading segment %d of %s/%s\", segIndex, containerName, objectName)\n\t\t\t}\n\n\t\t\tif uploadSegmentResult.Size != 0 {\n\t\t\t\tuploadSegmentResults = append(uploadSegmentResults, *uploadSegmentResult)\n\t\t\t}\n\n\t\t\tif uploadSegmentResult.Complete {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tsegIndex += 1\n\t\t}\n\n\t\tif len(uploadSegmentResults) > 0 {\n\t\t\tif uploadSegmentResults[0].Location != fmt.Sprintf(\"/%s/%s\", containerName, objectName) {\n\t\t\t\tuploadOpts := &uploadSLOManifestOpts{\n\t\t\t\t\tResults: uploadSegmentResults,\n\t\t\t\t\tContainerName: containerName,\n\t\t\t\t\tObjectName: objectName,\n\t\t\t\t\tMetadata: opts.Metadata,\n\t\t\t\t}\n\n\t\t\t\terr := uploadSLOManifest(client, uploadOpts)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error uploading SLO manifest for %s/%s: %s\", containerName, objectName, err)\n\t\t\t\t}\n\n\t\t\t\tfor _, result := range uploadSegmentResults {\n\t\t\t\t\tnewSLOManifestPaths = append(newSLOManifestPaths, result.Location)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tuploadResult.LargeObject = false\n\t\t\t}\n\t\t}\n\t} else {\n\t\tvar reader io.Reader\n\t\tvar contentLength int64\n\t\tuploadResult.LargeObject = false\n\n\t\tif opts.Path != \"\" {\n\t\t\tf, err := os.Open(opts.Path)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdefer f.Close()\n\n\t\t\treader = f\n\t\t\tcontentLength = sourceFileInfo.Size()\n\t\t} else {\n\t\t\treader = opts.Content\n\t\t}\n\n\t\tvar eTag string\n\t\tif opts.Checksum {\n\t\t\thash := md5.New()\n\t\t\tbuf := bytes.NewBuffer([]byte{})\n\t\t\t_, err := io.Copy(io.MultiWriter(hash, buf), reader)\n\t\t\tif err != nil && err != io.EOF {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\teTag = fmt.Sprintf(\"%x\", hash.Sum(nil))\n\t\t\treader = bytes.NewReader(buf.Bytes())\n\t\t}\n\n\t\tvar noETag bool\n\t\tif !opts.Checksum {\n\t\t\tnoETag = true\n\t\t}\n\n\t\tcreateOpts := objects.CreateOpts{\n\t\t\tContent: reader,\n\t\t\tContentLength: contentLength,\n\t\t\tMetadata: opts.Metadata,\n\t\t\tETag: eTag,\n\t\t\tNoETag: noETag,\n\t\t}\n\n\t\tcreateHeader, err := objects.Create(client, containerName, objectName, createOpts).Extract()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif opts.Checksum {\n\t\t\tif createHeader.ETag != eTag {\n\t\t\t\terr := fmt.Errorf(\"upload verification failed: md5 mismatch, local %s != remote %s\", eTag, createHeader.ETag)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tif oldObjectManifest != \"\" || len(oldSLOManifestPaths) > 0 {\n\t\tdelObjectMap := make(map[string][]string)\n\t\tif oldObjectManifest != \"\" {\n\t\t\tvar oldObjects []string\n\n\t\t\tparts := strings.SplitN(oldObjectManifest, \"/\", 2)\n\t\t\tsContainer := parts[0]\n\t\t\tsPrefix := parts[1]\n\n\t\t\tsPrefix = strings.TrimRight(sPrefix, \"/\") + \"/\"\n\n\t\t\tlistOpts := objects.ListOpts{\n\t\t\t\tPrefix: sPrefix,\n\t\t\t}\n\t\t\tallPages, err := objects.List(client, sContainer, listOpts).AllPages()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tallObjects, err := objects.ExtractNames(allPages)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tfor _, o := range allObjects {\n\t\t\t\toldObjects = append(oldObjects, o)\n\t\t\t}\n\n\t\t\tdelObjectMap[sContainer] = oldObjects\n\t\t}\n\n\t\tif len(oldSLOManifestPaths) > 0 {\n\t\t\tfor _, segToDelete := range oldSLOManifestPaths {\n\t\t\t\tvar oldObjects []string\n\n\t\t\t\tvar exists bool\n\t\t\t\tfor _, newSeg := range newSLOManifestPaths {\n\t\t\t\t\tif segToDelete == newSeg {\n\t\t\t\t\t\texists = true\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Only delete the old segment if it's not part of the new segment.\n\t\t\t\tif !exists {\n\t\t\t\t\tparts := strings.SplitN(segToDelete, \"/\", 2)\n\t\t\t\t\tsContainer := parts[0]\n\t\t\t\t\tsObject := parts[1]\n\n\t\t\t\t\tif _, ok := delObjectMap[sContainer]; ok {\n\t\t\t\t\t\toldObjects = delObjectMap[sContainer]\n\t\t\t\t\t}\n\n\t\t\t\t\toldObjects = append(oldObjects, sObject)\n\t\t\t\t\tdelObjectMap[sContainer] = oldObjects\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor sContainer, oldObjects := range delObjectMap {\n\t\t\tfor _, oldObject := range oldObjects {\n\t\t\t\tres := objects.Delete(client, sContainer, oldObject, nil)\n\t\t\t\tif res.Err != nil {\n\t\t\t\t\treturn nil, res.Err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tuploadResult.Status = \"uploaded\"\n\tuploadResult.Success = true\n\treturn uploadResult, nil\n}", "func (c APIClient) PutObject(_r io.Reader, tags ...string) (object *pfs.Object, _ int64, retErr error) {\n\tr := grpcutil.ReaderWrapper{_r}\n\tw, err := c.newPutObjectWriteCloser(tags...)\n\tif err != nil {\n\t\treturn nil, 0, grpcutil.ScrubGRPC(err)\n\t}\n\tdefer func() {\n\t\tif err := w.Close(); err != nil && retErr == nil {\n\t\t\tretErr = grpcutil.ScrubGRPC(err)\n\t\t}\n\t\tif retErr == nil {\n\t\t\tobject = w.object\n\t\t}\n\t}()\n\tbuf := grpcutil.GetBuffer()\n\tdefer grpcutil.PutBuffer(buf)\n\twritten, err := io.CopyBuffer(w, r, buf)\n\tif err != nil {\n\t\treturn nil, 0, grpcutil.ScrubGRPC(err)\n\t}\n\t// return value set by deferred function\n\treturn nil, written, nil\n}", "func (s3Uploader S3Uploader) Upload(id string, f io.ReadSeeker, hours int) (url string, err error) {\n\tfullKey := aws.String(s3Uploader.S3Key + strconv.Itoa(rand.Intn(999999)) + \"_\" + base64.StdEncoding.EncodeToString([]byte(id)))\n\t_, err = s3Uploader.S3SVC.PutObject(&s3.PutObjectInput{\n\t\tBucket: aws.String(s3Uploader.S3Bucket),\n\t\tKey: fullKey,\n\t\tBody: f,\n\t})\n\n\tif err != nil {\n\t\treturn url, errors.Wrap(err, \"Failed to upload object to S3\")\n\t}\n\n\treq, _ := s3Uploader.S3SVC.GetObjectRequest(&s3.GetObjectInput{\n\t\tBucket: aws.String(s3Uploader.S3Bucket),\n\t\tKey: fullKey,\n\t})\n\n\turlStr, err := req.Presign(24 * time.Hour)\n\tif err != nil {\n\t\treturn urlStr, errors.Wrap(err, \"Failed to sign request\")\n\t}\n\n\treturn urlStr, nil\n}", "func CreateAssetObject(args []string) (AssetObject, error) {\n\t// S001 LHTMO bosch\n\tvar err error\n\tvar myAsset AssetObject\n\n\t// Check there are 3 Arguments provided as per the the struct\n\tif len(args) != 3 {\n\t\tfmt.Println(\"CreateAssetObject(): Incorrect number of arguments. Expecting 3 \")\n\t\treturn myAsset, errors.New(\"CreateAssetObject(): Incorrect number of arguments. Expecting 3 \")\n\t}\n\n\t// Validate Serialno is an integer\n\n\t_, err = strconv.Atoi(args[0])\n\tif err != nil {\n\t\tfmt.Println(\"CreateAssetObject(): SerialNo should be an integer create failed! \")\n\t\treturn myAsset, errors.New(\"CreateAssetbject(): SerialNo should be an integer create failed. \")\n\t}\n\n\tmyAsset = AssetObject{args[0], args[1], args[2]}\n\n\tfmt.Println(\"CreateAssetObject(): Asset Object created: \", myAsset.Serialno, myAsset.Partno, myAsset.Owner)\n\treturn myAsset, nil\n}", "func (s *replayService) Upload(destination string, mode os.FileMode, content []byte) error {\n\ts.storage[destination] = content\n\treturn nil\n}", "func writeAssetToPart(assetObj Asset, writer *multipart.Writer) error {\n\t//Send a request to get the asset file\n\turl := assetObj.URL\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tlog.Fatalln(\"Could not get the asset from the url: \", err)\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\t//Copy the asset to a byte buffer to find the size of the asset\n\tbuf := &bytes.Buffer{}\n\tnRead, err := io.Copy(buf, resp.Body)\n\tif err != nil {\n\t\tfmt.Println(\"Could not copy the responsBody to the buffer: \", err)\n\t\treturn err\n\t}\n\n\t//Create the MIMEHeader and create a new part\n\th := make(textproto.MIMEHeader)\n\th.Set(\"Content-Type\", assetObj.MIMEType)\n\th.Set(\"Content-Length\", strconv.FormatInt(nRead, 10)) //len(data)\n\th.Set(\"Content-Transfer-Encoding\", \"quoted-printable\")\n\th.Set(\"Content-Name\", assetObj.ID.Hex())\n\tfw, err := writer.CreatePart(h)\n\tif err != nil {\n\t\tlog.Fatalln(\"Could not create form field: \", err)\n\t\treturn err\n\t}\n\n\t//Create the encoding writer and write the data to it.\n\tw := quotedprintable.NewWriter(fw)\n\tw.Binary = true //Tell it to treat the data as binary\n\tw.Write(buf.Bytes())\n\n\treturn nil\n}", "func (sc *ScreenlyClient) Put(id string, payload *AssetPayload) (*Asset, error) {\n\tb := new(bytes.Buffer)\n\terr := json.NewEncoder(b).Encode(payload)\n\tif err == nil {\n\t\tpath := fmt.Sprintf(\"assets/%s\", id)\n\t\tresponse, err := sc.doHttp(\"PUT\", path, b)\n\t\tif err == nil {\n\t\t\treceived := &Asset{}\n\t\t\t//io.Copy(os.Stdout, response.Body)\n\t\t\terr = json.NewDecoder(response.Body).Decode(received)\n\t\t\tif err == nil {\n\t\t\t\treturn received, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, err\n}", "func ApplyAsset(h *resource.Helper, assetPath string, hLog log.FieldLogger) error {\n\tassetLog := hLog.WithField(\"asset\", assetPath)\n\tassetLog.Debug(\"reading asset\")\n\tasset := assets.MustAsset(assetPath)\n\tassetLog.Debug(\"applying asset\")\n\tresult, err := h.Apply(asset)\n\tif err != nil {\n\t\tassetLog.WithError(err).Error(\"error applying asset\")\n\t\treturn err\n\t}\n\tassetLog.Infof(\"asset applied successfully: %v\", result)\n\treturn nil\n}", "func (n *UserNode) Upload(src *os.File, dst, name, hash string, size int64, seas []string) {\n\tdone := make(chan bool)\n\ttag := tpCrypto.SHA512HexFromBytes([]byte(dst + name + hash))\n\tuploadInfo := &userUploadInfo{\n\t\tsrc: src,\n\t\tpackages: int64(math.Ceil(float64(size) / float64(lib.PackageSize))),\n\t\toperations: make(map[p2pPeer.ID]*tpUser.Operation),\n\t\tdone: done,\n\t}\n\tseaIDs := make([]p2pPeer.ID, 0)\n\tfor _, s := range seas {\n\t\tseaPub, err := p2pCrypto.UnmarshalSecp256k1PublicKey(tpCrypto.HexToBytes(s))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tseaID, err := p2pPeer.IDFromPublicKey(seaPub)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tseaIDs = append(seaIDs, seaID)\n\t\tuploadInfo.operations[seaID] = n.GenerateOperation(s, dst, name, hash, size)\n\t}\n\tn.uploadInfos.Lock()\n\tn.uploadInfos.m[tag] = uploadInfo\n\tn.uploadInfos.Unlock()\n\tfor _, seaID := range seaIDs {\n\t\terr := n.SendUploadQuery(seaID, tag, size)\n\t\tif err != nil {\n\t\t\tdelete(uploadInfo.operations, seaID)\n\t\t\tcontinue\n\t\t}\n\t}\n\tgo func(info *userUploadInfo) {\n\t\tuploadInfo.Lock()\n\t\tif len(uploadInfo.operations) == 0 {\n\t\t\tdone <- true\n\t\t}\n\t\tuploadInfo.Unlock()\n\t}(uploadInfo)\n\t<-done\n\tlib.Logger.WithFields(logrus.Fields{\n\t\t\"tag\": tag,\n\t}).Info(\"fragment upload finish\")\n\tn.uploadInfos.Lock()\n\tdelete(n.uploadInfos.m, tag)\n\tn.uploadInfos.Unlock()\n}", "func TestUpload(t *testing.T) {\n\toss := NewService(\"<<api key>>\", \"<<secret key>>\")\n\toss.SetEndPoint(\"oss-cn-shanghai.aliyuncs.com\")\n\toss.SetBucket(\"dong-feng\")\n\n\topts1 := &UploadOptions{\n\t\tObjectName: \"test\",\n\t\tPublic: true,\n\t\tIsFolder: true,\n\t}\n\n\tresp := oss.Upload(opts1)\n\tif resp.Error != nil {\n\t\tt.Error(resp.Error)\n\t}\n\n\topts2 := &UploadOptions{\n\t\tObjectName: \"../test/index.html\",\n\t\tPublic: true,\n\t\tParentFolder: \"test\",\n\t}\n\n\tresp = oss.Upload(opts2)\n\tif resp.Error != nil {\n\t\tt.Error(resp.Error)\n\t}\n}", "func (bs *Blobstore) Upload(node *node.Node, source string) error {\n\n\tdest := bs.path(node)\n\t// ensure parent path exists\n\tif err := os.MkdirAll(filepath.Dir(dest), 0700); err != nil {\n\t\treturn errors.Wrap(err, \"Decomposedfs: oCIS blobstore: error creating parent folders for blob\")\n\t}\n\n\tif err := os.Rename(source, dest); err == nil {\n\t\treturn nil\n\t}\n\n\t// Rename failed, file needs to be copied.\n\tfile, err := os.Open(source)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Decomposedfs: oCIS blobstore: Can not open source file to upload\")\n\t}\n\tdefer file.Close()\n\n\tf, err := os.OpenFile(dest, os.O_CREATE|os.O_WRONLY, 0700)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"could not open blob '%s' for writing\", bs.path(node))\n\t}\n\n\tw := bufio.NewWriter(f)\n\t_, err = w.ReadFrom(file)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"could not write blob '%s'\", bs.path(node))\n\t}\n\n\treturn w.Flush()\n}", "func (s s3Repository) putTagsOnS3Object(key, uuid, fileName string) {\n\n\ttags := []*s3.Tag{\n\t\t&s3.Tag{\n\t\t\tKey: aws.String(\"uuid\"),\n\t\t\tValue: aws.String(uuid),\n\t\t},\n\t\t&s3.Tag{\n\t\t\tKey: aws.String(\"file-name\"),\n\t\t\tValue: aws.String(fileName),\n\t\t},\n\t}\n\n\t_, err := s.s3repo.PutObjectTagging(&s3.PutObjectTaggingInput{\n\t\tBucket: aws.String(s.bucketName),\n\t\tKey: aws.String(key),\n\t\tTagging: &s3.Tagging{\n\t\t\tTagSet: tags,\n\t\t},\n\t})\n\n\tif err != nil {\n\t\tlog.Printf(\"Error on PutObjectTagging %s\\n\", err)\n\t\tlog.Fatal(err)\n\t}\n}", "func (h *S3Handler) uploadToS3(p *Project, wg *sync.WaitGroup) {\n\n\tdefer recovery()\n\t// get image content\n\tdefer wg.Done()\n\tvar arr []string\n\n\tfor _, url := range(p.Image) {\n\n\t\tbuffer:= img(url)\n\t\tif len(*buffer) > 0 {\n\t\t\t// upload to s3\n\n\t\t\ts := strings.Split(url, \"/\")\n\t\t\turl_key := s[len(s)-1]\n\n\t\t\tmyurl := fmt.Sprintf(\"https://%s.s3.ap-south-1.amazonaws.com/%s\", S3_BUCKET, url_key)\n\n\t\t\t_, err := s3.New(h.Session).PutObject(&s3.PutObjectInput{\n\n\t\t\t\tBucket: aws.String(S3_BUCKET),\n\t\t\t\tKey: aws.String(url_key),\n\t\t\t\tACL: aws.String(\"public-read\"),\n\t\t\t\tBody: bytes.NewReader(*buffer),\n\t\t\t\tContentLength: aws.Int64(int64(len(*buffer))),\n\t\t\t\tContentType: aws.String(http.DetectContentType(*buffer)),\n\t\t\t\tContentDisposition: aws.String(\"attachment\"),\n\t\t\t})\n\n\t\t\tif err != nil{\n\t\t\t\tpanic(\"********************************************************************************************************************************************\")\n\n\n\t\t\t}else {\n\t\t\t\tarr = append(arr, myurl)\n\t\t\t\t//fmt.Println(akki)\n\n\t\t\t}\n\t\t}\n\n\n\t}\n\n\t// update to mongodb\n\tobjID, _ := primitive.ObjectIDFromHex(p.ID)\n\n\tfilter := bson.M{\"_id\": bson.M{\"$eq\": objID}}\n\n\n\tupdate := bson.M{\n\t\t\"$set\": bson.M{\n\t\t\t\"new_image_urlss\": arr,\n\n\t\t},\n\t}\n\tupdateResult, _ := h.Collection.UpdateOne(context.TODO(), filter, update)\n\n\tfmt.Println(updateResult)\n\n\n}", "func (b *Bucket) Upload(file *os.File, filename string) error {\n\t// set session\n\tsess, err := b.setSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tuploader := b.newUploaderfunc(sess)\n\t_, err = uploader.Upload(&s3manager.UploadInput{\n\t\tBucket: aws.String(b.BucketName),\n\t\tKey: aws.String(crPath + filename),\n\t\tBody: file,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error uploading %s to bucket %s : %s\", filename, b.BucketName, err.Error())\n\t}\n\n\treturn nil\n}", "func (is *ObjectStorage) FinishBlobUpload(repo, uuid string, body io.Reader, dstDigest godigest.Digest) error {\n\tif err := dstDigest.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tsrc := is.BlobUploadPath(repo, uuid)\n\n\t// complete multiUploadPart\n\tfileWriter, err := is.store.Writer(context.Background(), src, true)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", src).Msg(\"failed to open blob\")\n\n\t\treturn zerr.ErrBadBlobDigest\n\t}\n\n\tif err := fileWriter.Commit(); err != nil {\n\t\tis.log.Error().Err(err).Msg(\"failed to commit file\")\n\n\t\treturn err\n\t}\n\n\tif err := fileWriter.Close(); err != nil {\n\t\tis.log.Error().Err(err).Msg(\"failed to close file\")\n\n\t\treturn err\n\t}\n\n\tfileReader, err := is.store.Reader(context.Background(), src, 0)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", src).Msg(\"failed to open file\")\n\n\t\treturn zerr.ErrUploadNotFound\n\t}\n\n\tdefer fileReader.Close()\n\n\tsrcDigest, err := godigest.FromReader(fileReader)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", src).Msg(\"failed to open blob\")\n\n\t\treturn zerr.ErrBadBlobDigest\n\t}\n\n\tif srcDigest != dstDigest {\n\t\tis.log.Error().Str(\"srcDigest\", srcDigest.String()).\n\t\t\tStr(\"dstDigest\", dstDigest.String()).Msg(\"actual digest not equal to expected digest\")\n\n\t\treturn zerr.ErrBadBlobDigest\n\t}\n\n\tdst := is.BlobPath(repo, dstDigest)\n\n\tvar lockLatency time.Time\n\n\tis.Lock(&lockLatency)\n\tdefer is.Unlock(&lockLatency)\n\n\tif is.dedupe && fmt.Sprintf(\"%v\", is.cache) != fmt.Sprintf(\"%v\", nil) {\n\t\tif err := is.DedupeBlob(src, dstDigest, dst); err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"src\", src).Str(\"dstDigest\", dstDigest.String()).\n\t\t\t\tStr(\"dst\", dst).Msg(\"unable to dedupe blob\")\n\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tif err := is.store.Move(context.Background(), src, dst); err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"src\", src).Str(\"dstDigest\", dstDigest.String()).\n\t\t\t\tStr(\"dst\", dst).Msg(\"unable to finish blob\")\n\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (b *Bucket) Upload(_ context.Context, name string, r io.Reader) error {\n\tsize, err := objstore.TryToGetSize(r)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"getting size of %s\", name)\n\t}\n\n\tpartNums, lastSlice := int(math.Floor(float64(size)/partSize)), size%partSize\n\tif partNums == 0 {\n\t\tbody, err := bce.NewBodyFromSizedReader(r, lastSlice)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to create SizedReader for %s\", name)\n\t\t}\n\n\t\tif _, err := b.client.PutObject(b.name, name, body, nil); err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to upload %s\", name)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tresult, err := b.client.BasicInitiateMultipartUpload(b.name, name)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to initiate MultipartUpload for %s\", name)\n\t}\n\n\tuploadEveryPart := func(partSize int64, part int, uploadId string) (string, error) {\n\t\tbody, err := bce.NewBodyFromSizedReader(r, partSize)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tetag, err := b.client.UploadPart(b.name, name, uploadId, part, body, nil)\n\t\tif err != nil {\n\t\t\tif err := b.client.AbortMultipartUpload(b.name, name, uploadId); err != nil {\n\t\t\t\treturn etag, err\n\t\t\t}\n\t\t\treturn etag, err\n\t\t}\n\t\treturn etag, nil\n\t}\n\n\tvar parts []api.UploadInfoType\n\n\tfor part := 1; part <= partNums; part++ {\n\t\tetag, err := uploadEveryPart(partSize, part, result.UploadId)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to upload part %d for %s\", part, name)\n\t\t}\n\t\tparts = append(parts, api.UploadInfoType{PartNumber: part, ETag: etag})\n\t}\n\n\tif lastSlice != 0 {\n\t\tetag, err := uploadEveryPart(lastSlice, partNums+1, result.UploadId)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to upload the last part for %s\", name)\n\t\t}\n\t\tparts = append(parts, api.UploadInfoType{PartNumber: partNums + 1, ETag: etag})\n\t}\n\n\tif _, err := b.client.CompleteMultipartUploadFromStruct(b.name, name, result.UploadId, &api.CompleteMultipartUploadArgs{Parts: parts}); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to set %s upload completed\", name)\n\t}\n\treturn nil\n}", "func uploadToObject(o *schwift.Object, content io.Reader, opts *schwift.UploadOptions, ropts *schwift.RequestOptions) error {\n\t_, err := o.Headers()\n\tif err != nil && !schwift.Is(err, http.StatusNotFound) {\n\t\treturn err\n\t}\n\treturn o.Upload(content, opts, ropts)\n}", "func (p *Packager) Upload(r io.ReadSeeker, key string) (string, error) {\n\tfmt.Printf(\"Uploading %s\\n\", key)\n\n\tinput := &s3.PutObjectInput{\n\t\tBucket: aws.String(p.S3Bucket),\n\t\tBody: r,\n\t\tKey: aws.String(key),\n\t}\n\n\tif p.KMSKeyID != \"\" {\n\t\tinput.ServerSideEncryption = aws.String(\"aws:kms\")\n\t\tinput.SSEKMSKeyId = aws.String(p.KMSKeyID)\n\t}\n\n\t_, err := p.svc.PutObject(input)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"s3://%s/%s\", p.S3Bucket, key), nil\n}", "func IssueAsset(hclient *equator.Client, seed, code, amount, destination string) error {\n\tkp, err := keypair.Parse(seed)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttx, err := b.Transaction(\n\t\tb.SourceAccount{AddressOrSeed: seed},\n\t\tb.TestNetwork,\n\t\tb.AutoSequence{SequenceProvider: hclient},\n\t\tb.Payment(\n\t\t\tb.Destination{AddressOrSeed: destination},\n\t\t\tb.CreditAmount{\n\t\t\t\tCode: code,\n\t\t\t\tIssuer: kp.Address(),\n\t\t\t\tAmount: amount,\n\t\t\t},\n\t\t),\n\t)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"building tx\")\n\t}\n\t_, err = SignAndSubmitTx(hclient, tx, seed)\n\treturn err\n}", "func (client *Client) Upload(action string, params url.Values, header http.Header, multipartBody io.Reader) (*Response, error) {\r\n\treturn client.Request(\"POST\", action, params, header, multipartBody)\r\n}", "func (s *Service) Upload(params *UploadParams) (*Media, *http.Response, error) {\n\tvar resp *http.Response\n\tvar err error\n\tvar twitterMediaID *Media\n\n\ttwitterMediaID, resp, err = s.mediaInit(params)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\tresp, err = s.mediaAppend(twitterMediaID, params)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\tresp, err = s.mediaFinalize(twitterMediaID.MediaID)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn twitterMediaID, resp, nil\n}", "func write_asset(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\tvar err error\n\tfmt.Println(\"starting write asset\")\n\n\tif len(args) < 16 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting at least 16\")\n\t}\n\n\tid := args[0]\n\t//check if asset id already exists\n\t_, err = get_asset(stub, id)\n\tif err == nil {\n\t\tfmt.Println(\"This asset already exists - \" + id)\n\t\treturn nil, errors.New(\"This asset already exists - \" + id)\n\t}\n\n\t//build the asset json string manually\n\tstr := `{\"assetId\": \"` + args[0] + `\",\n\t\t\"assetType\": \"` + args[1] + `\",\n\t\t\"category\": \"` + args[2] + `\",\n\t\t\"assetClass\": \"` + args[3] + `\",\n\t\t\"assetTraceData\":[ {\n\t\t\t\"owner\": \"` + args[4] + `\", \n\t\t\t\"status\": \"` + args[5] + `\",\n\t\t\t\"moveDateTime\": \"` + args[6] + `\",\n\t\t\t\"location\": \"` + args[7] + `\",\n\t\t\t\"geoLocation\": \"` + args[8] + `\"\n\t\t}],\t\t\n\t\t\"assetData\": {\n\t\t\t\"information\": {\n\t\t\t\t\"assetName\": \"` + args[9] + `\",\n\t\t\t\t\"company\": \"` + args[10] + `\",\n\t\t\t\t\"packingType\": \"` + args[11] + `\",\n\t\t\t\t\"packageSize\": \"` + args[12] + `\",\n\t\t\t\t\"mfgDate\": \"` + args[13] + `\",\n\t\t\t\t\"lotNumber\": \"` + args[14] + `\",\n\t\t\t\t\"expiryDate\": \"` + args[15] + `\"\n\t\t\t}\n\t\t}\t\t\t\n\t}`\n\n\tfmt.Println(\"Input PharmaAsset Object - \" + str)\n\tvar pharmaAsset PharmaAsset\n\terr = json.Unmarshal([]byte(str), &pharmaAsset)\n\tif err != nil {\n\t\tfmt.Println(\"Error while unmarshalling \" + err.Error())\n\t\treturn nil, errors.New(err.Error())\n\t}\n\t//fmt.Printf(\"PharmaAsset Object after un-marshalling:\\n%s\", pharmaAsset)\n\tif len(args) > 16 {\n\t\tfor i := 16; i < len(args); i = i + 2 {\n\t\t\tvar child AssetChildren\n\t\t\tchild.AssetId = args[i]\n\t\t\tchild.AssetType = args[i+1]\n\t\t\tpharmaAsset.AssetData.Children = append(pharmaAsset.AssetData.Children, child)\n\t\t}\n\t}\n\n\tinputByteStr, err := json.Marshal(pharmaAsset)\n\tif err != nil {\n\t\tfmt.Println(\"Error while marshalling \" + err.Error())\n\t\treturn nil, errors.New(err.Error())\n\t}\n\n\terr = stub.PutState(id, inputByteStr) //store asset with id as key\n\tif err != nil {\n\t\treturn nil, errors.New(err.Error())\n\t}\n\n\tfmt.Println(\"- end write asset\")\n\treturn nil, nil\n}", "func (s *Service) Upload(ctx context.Context, uploadKey, uploadToken, contentType string, data []byte) (result *model.Result, err error) {\n\tif !s.verify(uploadKey, uploadToken) {\n\t\terr = ecode.AccessDenied\n\t\treturn\n\t}\n\tkey, secret, bucket := s.authorizeInfo(uploadKey)\n\tif contentType == \"\" {\n\t\tcontentType = http.DetectContentType(data)\n\t}\n\tlocation, etag, err := s.bfs.Upload(ctx, key, secret, contentType, bucket, \"\", \"\", data)\n\tif err != nil {\n\t\treturn\n\t}\n\tresult = &model.Result{\n\t\tLocation: location,\n\t\tEtag: etag,\n\t}\n\treturn\n}", "func (a *UploadLimitFilter) PutObject(ctx context.Context, node *tree.Node, reader io.Reader, requestData *PutRequestData) (int64, error) {\n\n\tsize, exts := a.getUploadLimits()\n\tif size > 0 && requestData.Size > size {\n\t\treturn 0, errors.Forbidden(VIEWS_LIBRARY_NAME, fmt.Sprintf(\"Upload limit is %d\", size))\n\t}\n\tif len(exts) > 0 {\n\t\t// Beware, Ext function includes the leading dot\n\t\tnodeExt := filepath.Ext(node.GetPath())\n\t\tallowed := false\n\t\tfor _, e := range exts {\n\t\t\tif \".\"+strings.ToLower(e) == strings.ToLower(nodeExt) {\n\t\t\t\tallowed = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !allowed {\n\t\t\treturn 0, errors.Forbidden(VIEWS_LIBRARY_NAME, fmt.Sprintf(\"Extension %s is not allowed!\", nodeExt))\n\t\t}\n\t}\n\n\treturn a.next.PutObject(ctx, node, reader, requestData)\n}", "func WriteAsset(name string, data []byte) {\n\tencoded := base64.StdEncoding.EncodeToString([]byte(data))\n\n\t// Add to list of asset names if needed\n\tif _, ok := assets[name]; !ok {\n\t\tassetNames = append(assetNames, name)\n\t}\n\n\t// Store assset\n\tassets[name] = Asset{Name: name, Compressed: false, Data: encoded}\n}", "func TransferAsset(c router.Context) (interface{}, error) {\n\t// get the data from the request and parse it as structure\n\tdata := c.Param(`data`).(GetTransaction)\n\n\t// Validate the inputed data\n\terr := data.Validate()\n\tif err != nil {\n\t\tif _, ok := err.(validation.InternalError); ok {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, status.ErrStatusUnprocessableEntity.WithValidationError(err.(validation.Errors))\n\t}\n\n\t// check receiver data\n\tqueryRecevierString := fmt.Sprintf(\"{\\\"selector\\\": {\\\"user_addresses\\\": {\\\"$elemMatch\\\": {\\\"value\\\": \\\"%s\\\"}},\\\"doc_type\\\":\\\"%s\\\"}}\", data.To, utils.DocTypeUser)\n\treceiverData, receiverID, err5 := utils.Get(c, queryRecevierString, fmt.Sprintf(\"Receiver %s does not exist!\", data.To))\n\tif err5 != nil {\n\t\treturn nil, err5\n\t}\n\n\treceiver := User{}\n\terr = json.Unmarshal(receiverData, &receiver)\n\tif err != nil {\n\t\treturn nil, status.ErrInternal.WithError(err)\n\t}\n\n\tvar receiverOwnLabel string\n\tfor i := range receiver.UserAddresses {\n\t\tif receiver.UserAddresses[i].Value == data.To {\n\t\t\treceiverOwnLabel = receiver.UserAddresses[i].Label\n\t\t}\n\t}\n\n\t// check sender data\n\tquerySenderString := fmt.Sprintf(\"{\\\"selector\\\":{\\\"_id\\\":\\\"%s\\\",\\\"doc_type\\\":\\\"%s\\\"}}\", data.From, utils.DocTypeUser)\n\tsenderData, _, err6 := utils.Get(c, querySenderString, fmt.Sprintf(\"You account %s does not exist!\", data.From))\n\tif err6 != nil {\n\t\treturn nil, err6\n\t}\n\tsender := User{}\n\terr = json.Unmarshal(senderData, &sender)\n\tif err != nil {\n\t\treturn nil, status.ErrInternal.WithError(err)\n\t}\n\n\tif sender.WalletBalance < utils.TransferAssetFee {\n\t\treturn nil, status.ErrInternal.WithMessage(fmt.Sprintf(\"You don't have enough coins to transfer the asset.\"))\n\t}\n\n\tfor i := range sender.UserAddresses {\n\t\tif sender.UserAddresses[i].Value == data.To {\n\t\t\treturn nil, status.ErrInternal.WithMessage(fmt.Sprintf(\"You can't transfer asset to yourself!\"))\n\t\t}\n\t}\n\n\t// check sender asset data\n\tqueryString := fmt.Sprintf(\"{\\\"selector\\\":{\\\"code\\\":\\\"%s\\\",\\\"user_id\\\":\\\"%s\\\",\\\"doc_type\\\":\\\"%s\\\"}}\", data.Code, data.From, utils.DocTypeAsset)\n\tsenderAssetData, senderAssetKey, err2 := utils.Get(c, queryString, fmt.Sprintf(\"Symbol %s does not exist!\", data.Code))\n\tif senderAssetData == nil {\n\t\treturn nil, err2\n\t}\n\tsenderAsset := Asset{}\n\terr = json.Unmarshal(senderAssetData, &senderAsset)\n\tif err != nil {\n\t\treturn nil, status.ErrInternal.WithError(err)\n\t}\n\tif data.Quantity > senderAsset.Quantity {\n\t\treturn nil, status.ErrInternal.WithMessage(fmt.Sprintf(\"Quantity should be less or equal to %d\", senderAsset.Quantity))\n\t}\n\n\tstub := c.Stub()\n\ttxID := stub.GetTxID()\n\tdata.CreatedAt = time.Now().Format(time.RFC3339)\n\n\tvar receiverLabel, senderLabel string\n\t// check label of receiver in sender's address book\n\treceiverLabelString := fmt.Sprintf(\"{\\\"selector\\\":{\\\"user_id\\\":\\\"%s\\\",\\\"address\\\":\\\"%s\\\",\\\"label\\\":\\\"%s\\\",\\\"doc_type\\\":\\\"%s\\\"}}\", data.From, data.To, data.Label, utils.DocTypeAddressBook)\n\treceiverLabelData, _, err6 := utils.Get(c, receiverLabelString, fmt.Sprintf(\"Label of receiver does not exist!\"))\n\n\t//If label does not exist in address book then save it into db\n\tif receiverLabelData == nil {\n\t\t// check if label is unique\n\t\tcheckUniqueString := fmt.Sprintf(\"{\\\"selector\\\":{\\\"user_id\\\":\\\"%s\\\",\\\"label\\\":\\\"%s\\\",\\\"doc_type\\\":\\\"%s\\\"}}\", data.From, data.Label, utils.DocTypeAddressBook)\n\t\tuniqueLabelData, _, err := utils.Get(c, checkUniqueString, fmt.Sprintf(\"This label already exists!\"))\n\t\tif uniqueLabelData != nil {\n\t\t\treturn nil, status.ErrInternal.WithMessage(fmt.Sprintf(\"This label already exists!\"))\n\t\t}\n\n\t\tlabelTxn := AddressBook{UserID: data.From, Address: data.To, Label: data.Label, DocType: utils.DocTypeAddressBook}\n\t\treceiverLabel = data.Label\n\t\t// Save the data\n\t\terr = c.State().Put(txID, labelTxn)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\n\t\taddressLabel := AddressBook{}\n\t\terr = json.Unmarshal(receiverLabelData, &addressLabel)\n\t\tif err != nil {\n\t\t\treturn nil, status.ErrInternal.WithError(err)\n\t\t}\n\t\treceiverLabel = addressLabel.Label\n\t}\n\n\t// check label of sender in receiver's address book\n\tsenderLabelString := fmt.Sprintf(\"{\\\"selector\\\":{\\\"user_id\\\":\\\"%s\\\",\\\"address\\\":\\\"%s\\\",\\\"doc_type\\\":\\\"%s\\\"}}\", receiver.UserAddresses[0].UserID, sender.Address, utils.DocTypeAddressBook)\n\tsenderLabelData, _, err6 := utils.Get(c, senderLabelString, fmt.Sprintf(\"Label of sender does not exist!\"))\n\n\t//If label does not exist in address book\n\tif senderLabelData == nil {\n\t\tsenderLabel = \"N/A\"\n\t} else {\n\n\t\taddressLabel1 := AddressBook{}\n\t\terr = json.Unmarshal(senderLabelData, &addressLabel1)\n\t\tif err != nil {\n\t\t\treturn nil, status.ErrInternal.WithError(err)\n\t\t}\n\t\tsenderLabel = addressLabel1.Label\n\t}\n\n\t// sender transactions\n\tvar senderTransaction = Transaction{UserID: data.From, Type: utils.Send, Code: data.Code, AssetLabel: senderAsset.Label, Quantity: data.Quantity, DocType: utils.DocTypeTransaction, CreatedAt: data.CreatedAt, AddressValue: data.To, LabelValue: receiverOwnLabel, AddressBookLabel: receiverLabel, TxnType: utils.AssetTxnType}\n\terr = c.State().Put(txID+strconv.Itoa(1), senderTransaction)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// receiver transactions\n\tvar receiveTransaction = Transaction{UserID: receiverID, Type: utils.Receive, Code: data.Code, AssetLabel: senderAsset.Label, Quantity: data.Quantity, DocType: utils.DocTypeTransaction, CreatedAt: data.CreatedAt, AddressValue: sender.Address, LabelValue: \"Original\", AddressBookLabel: senderLabel, TxnType: utils.AssetTxnType}\n\terr = c.State().Put(txID+strconv.Itoa(2), receiveTransaction)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsenderAsset.Quantity = senderAsset.Quantity - data.Quantity\n\n\t// update sender asset data\n\terr = c.State().Put(senderAssetKey, senderAsset)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// check receiver asset data\n\tqueryReceiverDataString := fmt.Sprintf(\"{\\\"selector\\\":{\\\"code\\\":\\\"%s\\\",\\\"user_id\\\":\\\"%s\\\",\\\"doc_type\\\":\\\"%s\\\"}}\", data.Code, receiverID, utils.DocTypeAsset)\n\treceiverAssetData, receiveAssetKey, _ := utils.Get(c, queryReceiverDataString, \"\")\n\tif receiverAssetData == nil {\n\t\t// add to receiver asset\n\t\tvar receiveAsset = Asset{UserID: receiverID, Code: data.Code, Label: senderAsset.Label, Quantity: data.Quantity, DocType: utils.DocTypeAsset}\n\t\terr = c.State().Put(txID+strconv.Itoa(3), receiveAsset)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\treceiverAsset := Asset{}\n\t\terr = json.Unmarshal(receiverAssetData, &receiverAsset)\n\t\tif err != nil {\n\t\t\treturn nil, status.ErrInternal.WithError(err)\n\t\t}\n\t\t// update receiver asset\n\t\treceiverAsset.Quantity = receiverAsset.Quantity + data.Quantity\n\t\terr = c.State().Put(receiveAssetKey, receiverAsset)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tsender.WalletBalance = sender.WalletBalance - utils.TransferAssetFee\n\n\t// Transfer asset transaction\n\tvar transferAssetTransaction = Transaction{UserID: data.From, Type: utils.Send, Code: utils.WalletCoinSymbol, AssetLabel: senderAsset.Label, Quantity: utils.TransferAssetFee, DocType: utils.DocTypeTransaction, CreatedAt: data.CreatedAt, AddressValue: data.To, LabelValue: receiverOwnLabel, AddressBookLabel: receiverLabel, TxnType: utils.AssetTransferredTxn}\n\terr = c.State().Put(txID+strconv.Itoa(4), transferAssetTransaction)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponseBody := ResponseAddAsset{ID: txID, Balance: sender.WalletBalance, Symbol: sender.Symbol}\n\n\t// Save the data and return the response\n\treturn responseBody, c.State().Put(data.From, sender)\n}", "func UploadAction(w http.ResponseWriter, r *http.Request) {\n\n\tpageVars := PageVars{}\n\taddPageVars(r, &pageVars)\n\n\tif len(pageVars.BName) <= 0 {\n\t\thttp.Redirect(w, r, \"/objectlist?bucketName=\"+pageVars.BName+\"&prefix=\"+pageVars.Prefix+\"&errorM=Invalid bucket name\", http.StatusSeeOther)\n\t} else {\n\t\tbucket := aws.String(pageVars.BName)\n\t\t// Maximum upload of 1024 MB files\n\t\tr.ParseMultipartForm(1024 << 20)\n\n\t\t// Get handler for filename, size and headers\n\t\tfile, handler, err := r.FormFile(\"uploadfile\")\n\n\t\t// close file after func\n\t\tdefer file.Close()\n\n\t\tif err != nil {\n\t\t\thttp.Redirect(w, r, \"/objectlist?bucketName=\"+pageVars.BName+\"&prefix=\"+pageVars.Prefix+\"&errorM=Error uploading the file\", http.StatusSeeOther)\n\t\t} else {\n\t\t\tfn := handler.Filename\n\t\t\tif len(pageVars.Prefix) > 0 {\n\t\t\t\tfn = pageVars.Prefix + \"/\" + fn\n\t\t\t}\n\t\t\tfilename := aws.String(fn)\n\n\t\t\tuploader := s3manager.NewUploader(sess)\n\n\t\t\t_, err = uploader.Upload(&s3manager.UploadInput{\n\t\t\t\tBucket: bucket,\n\t\t\t\tKey: filename,\n\t\t\t\tBody: file,\n\t\t\t})\n\n\t\t\tif err != nil {\n\t\t\t\tif awsErr, ok := err.(awserr.Error); ok {\n\t\t\t\t\thttp.Redirect(w, r, \"/objectlist?bucketName=\"+pageVars.BName+\"&prefix=\"+pageVars.Prefix+\"&errorM=\"+awsErr.Message(), http.StatusSeeOther)\n\t\t\t\t} else {\n\t\t\t\t\thttp.Redirect(w, r, \"/objectlist?bucketName=\"+pageVars.BName+\"&prefix=\"+pageVars.Prefix+\"&errorM=Error in uploading to S3\", http.StatusSeeOther)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\thttp.Redirect(w, r, \"/objectlist?bucketName=\"+pageVars.BName+\"&prefix=\"+pageVars.Prefix+\"&successM=Successfully uploaded\", http.StatusSeeOther)\n\t\t\t}\n\t\t}\n\t}\n\n}", "func UploadAndDownloadData(ctx context.Context,\n\tsatelliteAddress, apiKey, passphrase, bucketName, uploadKey string,\n\tdataToUpload []byte) error {\n\n\t// Request access grant to the satellite with the API key and passphrase.\n\tmyConfig := uplink.Config{\n\t\tDialContext: func(ctx context.Context, network, address string) (net.Conn, error) {\n\t\t\tfmt.Println(\"dial context\")\n\t\t\taddressParts := strings.Split(address, \":\")\n\t\t\tport, _ := strconv.Atoi(addressParts[1])\n\t\t\treturn NewJsConn(addressParts[0], port)\n\t\t},\n\t}\n\taccess, err := myConfig.RequestAccessWithPassphrase(ctx, satelliteAddress, apiKey, passphrase)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not request access grant: %v\", err)\n\t}\n\tfmt.Println(\"\\n\\n>>> access grant requested successfully <<<\\n\")\n\t/*\n\t\taccess, err := uplink.RequestAccessWithPassphrase(ctx, satelliteAddress, apiKey, passphrase)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not request access grant: %v\", err)\n\t\t}\n\t*/\n\n\t// Open up the Project we will be working with.\n\tproject, err := myConfig.OpenProject(ctx, access)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not open project: %v\", err)\n\t}\n\tdefer project.Close()\n\n\t// Ensure the desired Bucket within the Project is created.\n\t_, err = project.EnsureBucket(ctx, bucketName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not ensure bucket: %v\", err)\n\t}\n\n\t// Intitiate the upload of our Object to the specified bucket and key.\n\tupload, err := project.UploadObject(ctx, bucketName, uploadKey, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not initiate upload: %v\", err)\n\t}\n\n\t// Copy the data to the upload.\n\tbuf := bytes.NewBuffer(dataToUpload)\n\t_, err = io.Copy(upload, buf)\n\tif err != nil {\n\t\t_ = upload.Abort()\n\t\treturn fmt.Errorf(\"could not upload data: %v\", err)\n\t}\n\n\t// Commit the uploaded object.\n\terr = upload.Commit()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not commit uploaded object: %v\", err)\n\t}\n\n\t// Initiate a download of the same object again\n\tdownload, err := project.DownloadObject(ctx, bucketName, uploadKey, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not open object: %v\", err)\n\t}\n\tdefer download.Close()\n\n\t// Read everything from the download stream\n\treceivedContents, err := ioutil.ReadAll(download)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not read data: %v\", err)\n\t}\n\n\t// Check that the downloaded data is the same as the uploaded data.\n\tif !bytes.Equal(receivedContents, dataToUpload) {\n\t\treturn fmt.Errorf(\"got different object back: %q != %q\", dataToUpload, receivedContents)\n\t}\n\tfmt.Printf(\"**** got back \\\"%s\\\" ****\\n\", string(receivedContents))\n\n\treturn nil\n}", "func (s *storageClient) uploadArtifacts(ctx context.Context, manifest []byte, tarballPath, remoteDir string) error {\n\tf, err := os.Open(tarballPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to open configs tarball file %q: %w\", tarballPath, err)\n\t}\n\tdefer f.Close()\n\n\tif err := s.upload(ctx, bytes.NewBuffer(manifest), fmt.Sprintf(\"%s/manifest.json\", remoteDir)); err != nil {\n\t\treturn fmt.Errorf(\"error uploading manifest to GCS: %w\", err)\n\t}\n\n\tif err := s.upload(ctx, f, fmt.Sprintf(\"%s/rbe_default.tar\", remoteDir)); err != nil {\n\t\treturn fmt.Errorf(\"error uploading configs tarball to GCS: %w\", err)\n\t}\n\treturn nil\n}", "func CreateAsset(\n\ttrx storage.Transaction,\n\tissueTxId merkle.Digest,\n\tissueBlockNumber uint64,\n\tassetId transactionrecord.AssetIdentifier,\n\tnewOwner *account.Account,\n) {\n\t// ensure single threaded\n\ttoLock.Lock()\n\tdefer toLock.Unlock()\n\n\tnewData := &AssetOwnerData{\n\t\ttransferBlockNumber: issueBlockNumber,\n\t\tissueTxId: issueTxId,\n\t\tissueBlockNumber: issueBlockNumber,\n\t\tassetId: assetId,\n\t}\n\n\t// store to database\n\tcreate(trx, issueTxId, newData, newOwner)\n}", "func (s Space) UploadFile(ctx context.Context, fp, env, prefix string) (objectName string, err error) {\n\tbucket, err := service.GetBucket(env)\n\tif err != nil {\n\t\treturn\n\t}\n\tfilename := filepath.Base(fp)\n\tobjectName = path.Join(prefix, filename)\n\n\t_, err = s.PutFile(ctx, bucket, objectName, fp, PutObjectOptions{\n\t\tContentType: \"application/octet-stream\",\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(s.tags) == 0 {\n\t\treturn\n\t}\n\terr = s.PutTag(ctx, bucket, objectName, s.tags)\n\treturn\n}", "func (f *File) Release(ctx context.Context, req *fuse.ReleaseRequest) error {\n\tf.fs.logger.Debugf(\"File Release request\")\n\n\tf.offset = 0\n\tif f.body != nil {\n\t\terr := f.body.Close()\n\t\tif err != nil {\n\t\t\tf.fs.logger.Printf(\"File release failed: %v\\n\", err)\n\t\t}\n\t}\n\treturn nil\n}", "func uploadFile(ctx context.Context, bucket, object string, content []byte, opts ...UploadOptionFunc) error {\n\tclient, err := storage.NewClient(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"storage.NewClient: %v\", err)\n\t}\n\tdefer client.Close()\n\n\tbr := bytes.NewReader(content)\n\n\twc := client.Bucket(bucket).Object(object).NewWriter(ctx)\n\tfor _, f := range opts {\n\t\tf(wc)\n\t}\n\n\tif _, err = io.Copy(wc, br); err != nil {\n\t\treturn fmt.Errorf(\"io.Copy: %v\", err)\n\t}\n\tif err := wc.Close(); err != nil {\n\t\treturn fmt.Errorf(\"Writer.Close: %v\", err)\n\t}\n\treturn nil\n}", "func (g *gsutilUploader) UploadJSON(data interface{}, tempFileName, gcsObjPath string) error {\n\tjsonBytes, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := ioutil.WriteFile(tempFileName, jsonBytes, 0644); err != nil {\n\t\treturn err\n\t}\n\n\t// Upload the written file.\n\treturn g.UploadBytes(nil, tempFileName, prefixGCS(gcsObjPath))\n}", "func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {\n return storage.clients[threadIndex].UploadFile(filePath, content, storage.UploadRateLimit / len(storage.clients))\n}", "func (c *Client) Upload(ctx context.Context, f io.Reader, u *url.URL, param *soap.Upload) error {\n\tp := *param\n\tp.Headers = c.authHeaders(p.Headers)\n\treturn c.Client.Upload(ctx, f, u, &p)\n}", "func (h *Handler) Put(bucket string, dplObj DeployableObject) error {\n\tif dplObj.isEmpty() {\n\t\tklog.V(5).Infof(\"got an empty deployableObject to put to object store\")\n\t\treturn nil\n\t}\n\n\treq := h.Client.PutObjectRequest(&s3.PutObjectInput{\n\t\tBucket: &bucket,\n\t\tKey: &dplObj.Name,\n\t\tBody: bytes.NewReader(dplObj.Content),\n\t})\n\n\treq.HTTPRequest.Header.Set(DeployableGenerateNameMeta, dplObj.GenerateName)\n\treq.HTTPRequest.Header.Set(DeployableVersionMeta, dplObj.Version)\n\n\tresp, err := req.Send(context.Background())\n\tif err != nil {\n\t\tklog.Error(\"Failed to send Put request. error: \", err)\n\t\treturn err\n\t}\n\n\tklog.V(5).Info(\"Put Success\", resp)\n\n\treturn nil\n}", "func CreateRelease(res http.ResponseWriter, req *http.Request) {\n\tres.Header().Set(\"Content-Type\", \"application/json\")\n\tc := Release{\"relid\", \"http://ispw:8080/ispw/ispw/releases/relid\"}\n\toutgoingJSON, err := json.Marshal(c)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(res, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tres.WriteHeader(http.StatusCreated)\n\tfmt.Fprint(res, string(outgoingJSON))\n}", "func TestAssets(t *testing.T) {\n\ttc := testutil.SystemTest(t)\n\tbuf := &bytes.Buffer{}\n\n\t// Tests\n\n\t// Create a new asset.\n\n\ttestAssetID := fmt.Sprintf(\"%s-%s\", assetID, strconv.FormatInt(time.Now().Unix(), 10))\n\ttestutil.Retry(t, 3, 2*time.Second, func(r *testutil.R) {\n\t\ttestAssetName := fmt.Sprintf(\"projects/%s/locations/%s/assets/%s\", tc.ProjectID, location, testAssetID)\n\t\tif err := createAsset(buf, tc.ProjectID, location, testAssetID, assetURI); err != nil {\n\t\t\tr.Errorf(\"createAsset got err: %v\", err)\n\t\t}\n\t\tif got := buf.String(); !strings.Contains(got, testAssetName) {\n\t\t\tr.Errorf(\"createAsset got\\n----\\n%v\\n----\\nWant to contain:\\n----\\n%v\\n----\\n\", got, testAssetName)\n\t\t}\n\t})\n\tbuf.Reset()\n\n\t// List the assets for a given location.\n\ttestutil.Retry(t, 3, 2*time.Second, func(r *testutil.R) {\n\t\ttestAssetName := fmt.Sprintf(\"projects/%s/locations/%s/assets/%s\", tc.ProjectID, location, testAssetID)\n\t\tif err := listAssets(buf, tc.ProjectID, location); err != nil {\n\t\t\tr.Errorf(\"listAssets got err: %v\", err)\n\t\t}\n\t\tif got := buf.String(); !strings.Contains(got, testAssetName) {\n\t\t\tr.Errorf(\"listAssets got\\n----\\n%v\\n----\\nWant to contain:\\n----\\n%v\\n----\\n\", got, testAssetName)\n\t\t}\n\t})\n\tbuf.Reset()\n\n\t// Get the asset.\n\ttestutil.Retry(t, 3, 2*time.Second, func(r *testutil.R) {\n\t\ttestAssetName := fmt.Sprintf(\"projects/%s/locations/%s/assets/%s\", tc.ProjectID, location, testAssetID)\n\t\tif err := getAsset(buf, tc.ProjectID, location, testAssetID); err != nil {\n\t\t\tr.Errorf(\"getAsset got err: %v\", err)\n\t\t}\n\t\tif got := buf.String(); !strings.Contains(got, testAssetName) {\n\t\t\tr.Errorf(\"getAsset got\\n----\\n%v\\n----\\nWant to contain:\\n----\\n%v\\n----\\n\", got, testAssetName)\n\t\t}\n\t})\n\tbuf.Reset()\n\n\t// Delete the asset.\n\ttestutil.Retry(t, 3, 2*time.Second, func(r *testutil.R) {\n\t\tif err := deleteAsset(buf, tc.ProjectID, location, testAssetID); err != nil {\n\t\t\tr.Errorf(\"deleteAsset got err: %v\", err)\n\t\t}\n\t\tif got := buf.String(); !strings.Contains(got, deleteAssetResponse) {\n\t\t\tr.Errorf(\"deleteAsset got\\n----\\n%v\\n----\\nWant to contain:\\n----\\n%v\\n----\\n\", got, deleteAssetResponse)\n\t\t}\n\t})\n\tbuf.Reset()\n\tt.Logf(\"\\nTestAssets() completed\\n\")\n}", "func (h *Handler) Upload(ctx context.Context, sessionID session.ID, reader io.Reader) (string, error) {\n\tpath := h.path(sessionID)\n\th.Logger.Debugf(\"Uploading %s.\", path)\n\n\t// Make sure we don't overwrite an existing recording.\n\t_, err := h.gcsClient.Bucket(h.Config.Bucket).Object(path).Attrs(ctx)\n\tif err != storage.ErrObjectNotExist {\n\t\tif err != nil {\n\t\t\treturn \"\", convertGCSError(err)\n\t\t}\n\t\treturn \"\", trace.AlreadyExists(\"recording for session %q already exists in GCS\", sessionID)\n\t}\n\n\twriter := h.gcsClient.Bucket(h.Config.Bucket).Object(path).NewWriter(ctx)\n\tstart := time.Now()\n\t_, err = io.Copy(writer, reader)\n\t// Always close the writer, even if upload failed.\n\tcloseErr := writer.Close()\n\tif err == nil {\n\t\terr = closeErr\n\t}\n\tuploadLatencies.Observe(time.Since(start).Seconds())\n\tuploadRequests.Inc()\n\tif err != nil {\n\t\treturn \"\", convertGCSError(err)\n\t}\n\treturn fmt.Sprintf(\"%v://%v/%v\", teleport.SchemeGCS, h.Bucket, path), nil\n}", "func (s3as S3ApiService) UploadSaveGame(signedURL string, file io.Reader) error {\n\treq, err := http.NewRequest(\"PUT\", signedURL, file)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp, err := s3as.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"%#v\", resp)\n\n\treturn nil\n}", "func (c *Client) UploadToS3(postData *RFPresignedPostData, filePath string) error {\n\tvar req *http.Request\n\tfileName := filepath.Base(filePath)\n\n\tf, err := os.Open(filePath)\n\tdefer f.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfileSize := int64(fi.Size())\n\n\tcontentLength := postData.emptyMultipartSize(\"file\", fileName) + fileSize\n\n\treadBody, writeBody := io.Pipe()\n\tdefer readBody.Close()\n\n\twriter := multipart.NewWriter(writeBody)\n\n\t// Do the writes async streamed\n\terrChan := make(chan error, 1)\n\tgo func() {\n\t\tdefer writeBody.Close()\n\n\t\t// Add the required fields from S3\n\t\twriteFields(postData.RequiredFields, writer)\n\n\t\tpart, err := writer.CreateFormFile(\"file\", fileName)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\t\tif _, err := io.CopyN(part, f, fileSize); err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\t\terrChan <- writer.Close()\n\t}()\n\n\t// Create the Request\n\turl := postData.URL\n\treq, err = http.NewRequest(\"POST\", url, readBody)\n\tif err != nil {\n\t\t<-errChan\n\t\treturn err\n\t}\n\treq.Header.Set(\"Content-Type\", writer.FormDataContentType())\n\treq.ContentLength = contentLength\n\n\t// Perform the upload\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\t<-errChan\n\t\treturn err\n\t}\n\n\tstatus := resp.StatusCode\n\tif status >= 300 {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\n\t\t<-errChan\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn fmt.Errorf(\"There was an error uploading your file - %v: %v\", fileName, string(body))\n\t}\n\n\treturn nil\n}", "func (c *s3Client) ShareUpload(isRecursive bool, expires time.Duration, contentType string) (map[string]string, *probe.Error) {\n\tbucket, object := c.url2BucketAndObject()\n\tp := minio.NewPostPolicy()\n\tif e := p.SetExpires(time.Now().UTC().Add(expires)); e != nil {\n\t\treturn nil, probe.NewError(e)\n\t}\n\tif strings.TrimSpace(contentType) != \"\" || contentType != \"\" {\n\t\t// No need to verify for error here, since we have stripped out spaces.\n\t\tp.SetContentType(contentType)\n\t}\n\tif e := p.SetBucket(bucket); e != nil {\n\t\treturn nil, probe.NewError(e)\n\t}\n\tif isRecursive {\n\t\tif e := p.SetKeyStartsWith(object); e != nil {\n\t\t\treturn nil, probe.NewError(e)\n\t\t}\n\t} else {\n\t\tif e := p.SetKey(object); e != nil {\n\t\t\treturn nil, probe.NewError(e)\n\t\t}\n\t}\n\tm, e := c.api.PresignedPostPolicy(p)\n\treturn m, probe.NewError(e)\n}" ]
[ "0.7155658", "0.67493165", "0.62648684", "0.6197935", "0.6143918", "0.5810587", "0.5751974", "0.5732749", "0.5682815", "0.56697494", "0.5604278", "0.558865", "0.5574292", "0.55547374", "0.5546097", "0.55362713", "0.5516788", "0.54986763", "0.5481066", "0.5473339", "0.54333305", "0.54071844", "0.5354635", "0.535145", "0.53431726", "0.5338685", "0.53315103", "0.53296053", "0.53179127", "0.5279554", "0.5269428", "0.52391016", "0.523258", "0.5228018", "0.5216378", "0.52035385", "0.52035385", "0.5188335", "0.51821804", "0.5163439", "0.51464134", "0.5138329", "0.5138329", "0.5130834", "0.51291287", "0.51286983", "0.51278746", "0.5124298", "0.511773", "0.5112136", "0.50955456", "0.5087909", "0.50828564", "0.50773484", "0.5068865", "0.50606924", "0.5057963", "0.5053034", "0.5040827", "0.50406754", "0.50381786", "0.5037576", "0.5036664", "0.5021241", "0.5008906", "0.49992815", "0.4991297", "0.4989751", "0.49877107", "0.49812862", "0.4976282", "0.49554974", "0.49532294", "0.49439353", "0.49393758", "0.49341235", "0.49278677", "0.491927", "0.49180603", "0.49159366", "0.4912216", "0.4906755", "0.4903528", "0.49014306", "0.49013072", "0.48987597", "0.4897359", "0.4887894", "0.48753858", "0.48673794", "0.48625374", "0.48586264", "0.48511952", "0.48485276", "0.48478517", "0.4847817", "0.48194522", "0.48129863", "0.48078477", "0.48054907" ]
0.682041
1
ListAssets lists assets associated with a given release
func (c *Client) ListAssets(ctx context.Context, releaseID int64) ([]*github.ReleaseAsset, error) { result := []*github.ReleaseAsset{} page := 1 for { assets, res, err := c.Repositories.ListReleaseAssets(context.TODO(), c.Owner, c.Repo, releaseID, &github.ListOptions{Page: page}) if err != nil { return nil, errors.Wrap(err, "failed to list assets") } if res.StatusCode != http.StatusOK { return nil, errors.Errorf("list release assets: invalid status code: %s", res.Status) } result = append(result, assets...) if res.NextPage <= page { break } page = res.NextPage } return result, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ListReleases(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/releases repository repoListReleases\n\t// ---\n\t// summary: List a repo's releases\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: draft\n\t// in: query\n\t// description: filter (exclude / include) drafts, if you dont have repo write access none will show\n\t// type: boolean\n\t// - name: pre-release\n\t// in: query\n\t// description: filter (exclude / include) pre-releases\n\t// type: boolean\n\t// - name: per_page\n\t// in: query\n\t// description: page size of results, deprecated - use limit\n\t// type: integer\n\t// deprecated: true\n\t// - name: page\n\t// in: query\n\t// description: page number of results to return (1-based)\n\t// type: integer\n\t// - name: limit\n\t// in: query\n\t// description: page size of results\n\t// type: integer\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/ReleaseList\"\n\tlistOptions := utils.GetListOptions(ctx)\n\tif listOptions.PageSize == 0 && ctx.FormInt(\"per_page\") != 0 {\n\t\tlistOptions.PageSize = ctx.FormInt(\"per_page\")\n\t}\n\n\topts := repo_model.FindReleasesOptions{\n\t\tListOptions: listOptions,\n\t\tIncludeDrafts: ctx.Repo.AccessMode >= perm.AccessModeWrite || ctx.Repo.UnitAccessMode(unit.TypeReleases) >= perm.AccessModeWrite,\n\t\tIncludeTags: false,\n\t\tIsDraft: ctx.FormOptionalBool(\"draft\"),\n\t\tIsPreRelease: ctx.FormOptionalBool(\"pre-release\"),\n\t}\n\n\treleases, err := repo_model.GetReleasesByRepoID(ctx, ctx.Repo.Repository.ID, opts)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetReleasesByRepoID\", err)\n\t\treturn\n\t}\n\trels := make([]*api.Release, len(releases))\n\tfor i, release := range releases {\n\t\tif err := release.LoadAttributes(ctx); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"LoadAttributes\", err)\n\t\t\treturn\n\t\t}\n\t\trels[i] = convert.ToAPIRelease(ctx, ctx.Repo.Repository, release)\n\t}\n\n\tfilteredCount, err := repo_model.CountReleasesByRepoID(ctx.Repo.Repository.ID, opts)\n\tif err != nil {\n\t\tctx.InternalServerError(err)\n\t\treturn\n\t}\n\n\tctx.SetLinkHeader(int(filteredCount), listOptions.PageSize)\n\tctx.SetTotalCountHeader(filteredCount)\n\tctx.JSON(http.StatusOK, rels)\n}", "func cmdListReleases(ccmd *cobra.Command, args []string) {\n\taplSvc := apl.NewClient()\n\toutput := runListCommand(&releaseParams, aplSvc.Releases.List)\n\tif output != nil {\n\t\tfields := []string{\"ID\", \"StackID\", \"Version\", \"CreatedTime\"}\n\t\tprintTableResultsCustom(output.([]apl.Release), fields)\n\t}\n}", "func (c *Client) ListAssets(ctx context.Context, params *ListAssetsInput, optFns ...func(*Options)) (*ListAssetsOutput, error) {\n\tif params == nil {\n\t\tparams = &ListAssetsInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"ListAssets\", params, optFns, addOperationListAssetsMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*ListAssetsOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}", "func (c *Client) List(p ListParameters) ([]Release, error) {\n\tresponse, err := c.client.ListReleases(p.Options()...) // TODO Paging.\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tvar releases []Release\n\tif response != nil && response.Releases != nil {\n\t\tfor _, item := range response.Releases {\n\t\t\treleases = append(releases, *(fromHelm(item)))\n\t\t}\n\t}\n\treturn releases, nil\n}", "func (s *sensuAssetLister) List(selector labels.Selector) (ret []*v1beta1.SensuAsset, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1beta1.SensuAsset))\n\t})\n\treturn ret, err\n}", "func (c *Client) ListReleases(ctx context.Context) ([]*github.RepositoryRelease, error) {\n\tresult := []*github.RepositoryRelease{}\n\tpage := 1\n\tfor {\n\t\tassets, res, err := c.Repositories.ListReleases(context.TODO(), c.Owner, c.Repo, &github.ListOptions{Page: page})\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to list releases\")\n\t\t}\n\t\tif res.StatusCode != http.StatusOK {\n\t\t\treturn nil, errors.Errorf(\"list repository releases: invalid status code: %s\", res.Status)\n\t\t}\n\t\tresult = append(result, assets...)\n\t\tif res.NextPage <= page {\n\t\t\tbreak\n\t\t}\n\t\tpage = res.NextPage\n\t}\n\treturn result, nil\n}", "func (r *GitLabRelease) ListReleases(ctx context.Context) ([]string, error) {\n\tversions := []string{}\n\topt := &gitlab.ListReleasesOptions{\n\t\tPerPage: 100, // max\n\t}\n\n\tfor {\n\t\treleases, resp, err := r.api.ProjectListReleases(ctx, r.owner, r.project, opt)\n\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to list releases for %s/%s: %s\", r.owner, r.project, err)\n\t\t}\n\n\t\tfor _, release := range releases {\n\t\t\tv := tagNameToVersion(release.TagName)\n\t\t\tversions = append(versions, v)\n\t\t}\n\t\tif resp.NextPage == 0 {\n\t\t\tbreak\n\t\t}\n\t\topt.Page = resp.NextPage\n\t}\n\n\treturn versions, nil\n}", "func ListAssetNames() []string {\n\treturn assetNames\n}", "func (hc *Actions) ListReleases() ([]api.Stack, error) {\n\tactList := action.NewList(hc.Config)\n\treleases, err := actList.Run()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult := []api.Stack{}\n\tfor _, rel := range releases {\n\t\tresult = append(result, api.Stack{\n\t\t\tID: rel.Name,\n\t\t\tName: rel.Name,\n\t\t\tStatus: string(rel.Info.Status),\n\t\t})\n\t}\n\treturn result, nil\n}", "func (s *GiteaSource) ListReleases(owner, repo string) ([]SourceRelease, error) {\n\terr := checkOwnerRepoParameters(owner, repo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trels, res, err := s.api.ListReleases(owner, repo, gitea.ListReleasesOptions{})\n\tif err != nil {\n\t\tlog.Printf(\"API returned an error response: %s\", err)\n\t\tif res != nil && res.StatusCode == 404 {\n\t\t\t// 404 means repository not found or release not found. It's not an error here.\n\t\t\tlog.Print(\"API returned 404. Repository or release not found\")\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\treleases := make([]SourceRelease, len(rels))\n\tfor i, rel := range rels {\n\t\treleases[i] = NewGiteaRelease(rel)\n\t}\n\treturn releases, nil\n}", "func (s *ReleaseTagService) List(ctx context.Context, releaseID int64) ([]*ReleaseTagResponse, error) {\n\tquery := \"%24filter=release/id+eq+%27\" + strconv.FormatInt(releaseID, 10) + \"%27\"\n\treturn s.GetWithQuery(ctx, query)\n}", "func (s sensuAssetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.SensuAsset, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1beta1.SensuAsset))\n\t})\n\treturn ret, err\n}", "func (c *Client) ListPackageVersionAssets(ctx context.Context, params *ListPackageVersionAssetsInput, optFns ...func(*Options)) (*ListPackageVersionAssetsOutput, error) {\n\tif params == nil {\n\t\tparams = &ListPackageVersionAssetsInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"ListPackageVersionAssets\", params, optFns, addOperationListPackageVersionAssetsMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*ListPackageVersionAssetsOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}", "func releases(ctx context.Context, c *github.Client, org string, project string) ([]*release, error) {\n\tvar result []*release\n\n\topts := &github.ListOptions{PerPage: 100}\n\n\tklog.Infof(\"Downloading releases for %s/%s ...\", org, project)\n\n\tfor page := 1; page != 0; {\n\t\topts.Page = page\n\t\trs, resp, err := c.Repositories.ListReleases(ctx, org, project, opts)\n\t\tif err != nil {\n\t\t\treturn result, err\n\t\t}\n\n\t\tpage = resp.NextPage\n\t\tuntil := time.Now()\n\n\t\tfor _, r := range rs {\n\t\t\tname := r.GetName()\n\t\t\tif name == \"\" {\n\t\t\t\tname = r.GetTagName()\n\t\t\t}\n\n\t\t\trel := &release{\n\t\t\t\tName: name,\n\t\t\t\tDraft: r.GetDraft(),\n\t\t\t\tPrerelease: r.GetPrerelease(),\n\t\t\t\tPublishedAt: r.GetPublishedAt().Time,\n\t\t\t\tActiveUntil: until,\n\t\t\t\tDownloads: map[string]int{},\n\t\t\t\tDownloadRatios: map[string]float64{},\n\t\t\t}\n\n\t\t\tfor _, a := range r.Assets {\n\t\t\t\tif ignoreAssetRe.MatchString(a.GetName()) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\trel.Downloads[a.GetName()] = a.GetDownloadCount()\n\t\t\t\trel.DownloadsTotal += int64(a.GetDownloadCount())\n\t\t\t}\n\n\t\t\tif !rel.Draft && !rel.Prerelease {\n\t\t\t\tuntil = rel.PublishedAt\n\t\t\t}\n\n\t\t\tresult = append(result, rel)\n\t\t}\n\t}\n\n\tfor _, r := range result {\n\t\tr.DaysActive = r.ActiveUntil.Sub(r.PublishedAt).Hours() / 24\n\t\tr.DownloadsPerDay = float64(r.DownloadsTotal) / r.DaysActive\n\n\t\tfor k, v := range r.Downloads {\n\t\t\tr.DownloadRatios[k] = float64(v) / float64(r.DownloadsTotal)\n\t\t}\n\t}\n\n\treturn result, nil\n}", "func (client *Client) DescribeAssetList(request *DescribeAssetListRequest) (response *DescribeAssetListResponse, err error) {\n\tresponse = CreateDescribeAssetListResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (a *Agent) ListReleases(\n\tctx context.Context,\n\tnamespace string,\n\tfilter *types.ReleaseListFilter,\n) ([]*release.Release, error) {\n\tctx, span := telemetry.NewSpan(ctx, \"helm-list-releases\")\n\tdefer span.End()\n\n\ttelemetry.WithAttributes(span,\n\t\ttelemetry.AttributeKV{Key: \"namespace\", Value: namespace},\n\t)\n\n\tlsel := fmt.Sprintf(\"owner=helm,status in (%s)\", strings.Join(filter.StatusFilter, \",\"))\n\n\t// list secrets\n\tsecretList, err := a.K8sAgent.Clientset.CoreV1().Secrets(namespace).List(\n\t\tcontext.Background(),\n\t\tv1.ListOptions{\n\t\t\tLabelSelector: lsel,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, telemetry.Error(ctx, span, err, \"error getting secret list\")\n\t}\n\n\t// before decoding to helm release, only keep the latest releases for each chart\n\tlatestMap := make(map[string]corev1.Secret)\n\n\tfor _, secret := range secretList.Items {\n\t\trelName, relNameExists := secret.Labels[\"name\"]\n\n\t\tif !relNameExists {\n\t\t\tcontinue\n\t\t}\n\n\t\tid := fmt.Sprintf(\"%s/%s\", secret.Namespace, relName)\n\n\t\tif currLatest, exists := latestMap[id]; exists {\n\t\t\t// get version\n\t\t\tcurrVersionStr, currVersionExists := currLatest.Labels[\"version\"]\n\t\t\tversionStr, versionExists := secret.Labels[\"version\"]\n\n\t\t\tif versionExists && currVersionExists {\n\t\t\t\tcurrVersion, currErr := strconv.Atoi(currVersionStr)\n\t\t\t\tversion, err := strconv.Atoi(versionStr)\n\t\t\t\tif currErr == nil && err == nil && currVersion < version {\n\t\t\t\t\tlatestMap[id] = secret\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlatestMap[id] = secret\n\t\t}\n\t}\n\n\tchartList := []string{}\n\tres := make([]*release.Release, 0)\n\n\tfor _, secret := range latestMap {\n\t\trel, isErr, err := kubernetes.ParseSecretToHelmRelease(secret, chartList)\n\n\t\tif !isErr && err == nil {\n\t\t\tres = append(res, rel)\n\t\t}\n\t}\n\n\treturn res, nil\n}", "func (c *Client) List() ([]*release.Release, error) {\n\tlist := action.NewList(c.actionConfig)\n\treturn list.Run()\n}", "func GetAssets(fs afero.Fs, args []string) []asset.Asset {\n\tassets := make([]asset.Asset, 0)\n\targuments := make([]string, 0)\n\n\tfor _, arg := range args {\n\t\t// split arguments by space, new line, comma, pipe\n\t\tif len(strings.Split(arg, \" \")) > 1 {\n\t\t\targuments = append(arguments, strings.Split(arg, \" \")...)\n\t\t} else if len(strings.Split(arg, \"\\n\")) > 1 {\n\t\t\targuments = append(arguments, strings.Split(arg, \"\\n\")...)\n\t\t} else if len(strings.Split(arg, \",\")) > 1 {\n\t\t\targuments = append(arguments, strings.Split(arg, \",\")...)\n\t\t} else if len(strings.Split(arg, \"|\")) > 1 {\n\t\t\targuments = append(arguments, strings.Split(arg, \"|\")...)\n\t\t} else {\n\t\t\targuments = append(arguments, arg)\n\t\t}\n\t}\n\n\tfor _, argument := range arguments {\n\t\tfiles, err := afero.Glob(fs, filepath.Clean(argument))\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfor _, file := range files {\n\t\t\tif file != \".\" {\n\t\t\t\tasset := asset.Asset{\n\t\t\t\t\tName: filepath.Base(file),\n\t\t\t\t\tPath: file,\n\t\t\t\t}\n\n\t\t\t\tassets = append(assets, asset)\n\t\t\t}\n\t\t}\n\t}\n\treturn assets\n}", "func (s *SmartContract) GetAssetByRange(ctx contractapi.TransactionContextInterface, startKey string, endKey string) ([]*Asset, error) {\n\n\tresultsIterator, err := ctx.GetStub().GetPrivateDataByRange(assetCollection, startKey, endKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resultsIterator.Close()\n\n\tresults := []*Asset{}\n\n\tfor resultsIterator.HasNext() {\n\t\tresponse, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar asset *Asset\n\t\terr = json.Unmarshal(response.Value, &asset)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to unmarshal JSON: %v\", err)\n\t\t}\n\n\t\tresults = append(results, asset)\n\t}\n\n\treturn results, nil\n\n}", "func (s *SmartContract) GetAssetByRange(ctx contractapi.TransactionContextInterface, startKey string, endKey string) ([]*Asset, error) {\n\n\tresultsIterator, err := ctx.GetStub().GetPrivateDataByRange(assetCollection, startKey, endKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resultsIterator.Close()\n\n\tresults := []*Asset{}\n\n\tfor resultsIterator.HasNext() {\n\t\tresponse, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar asset *Asset\n\t\terr = json.Unmarshal(response.Value, &asset)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to unmarshal JSON: %v\", err)\n\t\t}\n\n\t\tresults = append(results, asset)\n\t}\n\n\treturn results, nil\n\n}", "func Assets(exec boil.Executor, mods ...qm.QueryMod) assetQuery {\n\tmods = append(mods, qm.From(\"\\\"audit\\\".\\\"asset\\\"\"))\n\treturn assetQuery{NewQuery(exec, mods...)}\n}", "func (a *API) AssetsByTag(ctx context.Context, params AssetsByTagParams) (*AssetsResult, error) {\n\tres := &AssetsResult{}\n\t_, err := a.get(ctx, api.BuildPath(assets, params.AssetType, tags, params.Tag), params, res)\n\n\treturn res, err\n}", "func (a *assets) ListAssetFS() []AssetFS {\n\treturn a.a\n}", "func (c Provider) Releases(params []string) (rs *results.ResultSet, err error) {\n\t// Query the API\n\tid := strings.Join(strings.Split(params[1], \"/\"), \"%2f\")\n\turl := fmt.Sprintf(TagsEndpoint, params[0], id)\n\tvar tags Tags\n\tif err = util.FetchJSON(url, \"releases\", &tags); err != nil {\n\t\treturn\n\t}\n\trs = tags.Convert(params[0], params[1])\n\treturn\n}", "func (c Provider) Releases(params []string) (rs *results.ResultSet, err error) {\n\tname := params[0]\n\t// Query the API\n\turl := fmt.Sprintf(SeriesAPI, name)\n\tvar seriesList SeriesList\n\tif err = util.FetchJSON(url, \"series\", &seriesList); err != nil {\n\t\treturn\n\t}\n\t// Proccess Releases\n\tvar lrs Releases\n\tfor _, s := range seriesList.Entries {\n\t\t// Only Active Series\n\t\tif !s.Active {\n\t\t\tcontinue\n\t\t}\n\t\t// Only stable or supported\n\t\tswitch s.Status {\n\t\tcase \"Active Development\":\n\t\tcase \"Current Stable Release\":\n\t\tcase \"Supported\":\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t\turl := fmt.Sprintf(ReleasesAPI, name, s.Name)\n\t\tvar vl VersionList\n\t\tif err = util.FetchJSON(url, \"releases\", &vl); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor i := len(vl.Versions) - 1; i >= 0; i-- {\n\t\t\tr := vl.Versions[i]\n\t\t\turl := fmt.Sprintf(FilesAPI, name, s.Name, r.Number)\n\t\t\tvar fl FileList\n\t\t\tif err = util.FetchJSON(url, \"files\", &fl); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar lr Release\n\t\t\tfor _, f := range fl.Files {\n\t\t\t\tif f.Type != \"Code Release Tarball\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tlr.name = name\n\t\t\t\tlr.series = s.Name\n\t\t\t\tlr.release = r.Number\n\t\t\t\tlr.uploaded = f.Uploaded\n\t\t\t}\n\t\t\tlrs = append(lrs, lr)\n\t\t}\n\t}\n\tif len(lrs) == 0 {\n\t\terr = results.NotFound\n\t\treturn\n\t}\n\trs = lrs.Convert(name)\n\terr = nil\n\treturn\n}", "func (s *SmartContract) GetAllAssets(ctx contractapi.TransactionContextInterface) ([]*Asset, error) {\n\t// range query with empty string for startKey and endKey does an\n\t// open-ended query of all assets in the chaincode namespace.\n\tresultsIterator, err := ctx.GetStub().GetStateByRange(\"\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resultsIterator.Close()\n\n\tvar assets []*Asset\n\tfor resultsIterator.HasNext() {\n\t\tqueryResponse, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar asset Asset\n\t\terr = json.Unmarshal(queryResponse.Value, &asset)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tassets = append(assets, &asset)\n\t}\n\n\treturn assets, nil\n}", "func cmdGetReleases(ccmd *cobra.Command, args []string) {\n\taplSvc := apl.NewClient()\n\toutput := runGetCommand(args, aplSvc.Releases.Get)\n\tif output != nil {\n\t\tfields := []string{\"ID\", \"StackID\", \"Version\", \"CreatedTime\"}\n\t\tprintTableResultsCustom(output.(apl.Release), fields)\n\t}\n}", "func (app *application) getAssets(w http.ResponseWriter, r *http.Request) {\n\tdata, err := app.assets.GetAssets()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tj, err := json.Marshal(data)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(j)\n}", "func (a *Client) GetAssets(params *GetAssetsParams, authInfo runtime.ClientAuthInfoWriter) (*GetAssetsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetAssetsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"GetAssets\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/v1/Assets\",\n\t\tProducesMediaTypes: []string{\"application/json\", \"text/json\", \"text/plain\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetAssetsReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetAssetsOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for GetAssets: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (a *API) Assets(ctx context.Context, params AssetsParams) (*AssetsResult, error) {\n\tres := &AssetsResult{}\n\t_, err := a.get(ctx, api.BuildPath(assets, params.AssetType, params.DeliveryType), params, res)\n\n\treturn res, err\n}", "func (m *MockGitClient) GetReleaseAssetsByTag(ctx context.Context, owner, repo, version string) ([]formula_updater_types.ReleaseAsset, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetReleaseAssetsByTag\", ctx, owner, repo, version)\n\tret0, _ := ret[0].([]formula_updater_types.ReleaseAsset)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (scl *SimpleConfigurationLayer) SetReleaseAssets(releaseAssets *map[string]*ent.Attachment) {\n\tscl.ReleaseAssets = releaseAssets\n}", "func (p *Project) Releases() []Release {\n\treturn p.releases\n}", "func (a Artifacts) List() ([]string, error) {\n\tswitch a.Files.(type) {\n\tcase string:\n\t\t// TODO: Apply glob-pattern here\n\t\treturn []string{a.Files.(string)}, nil\n\tcase []string:\n\t\treturn a.Files.([]string), nil\n\t}\n\n\treturn []string{}, nil\n}", "func (s *Services) AiringReleases(ctx context.Context, request *empty.Empty) (*proto.ReleasesListResponse, error) {\n\tquery := s.DB\n\n\tvar result []models.Release\n\n\tquery = query.Where(\"started_airing IS NOT NULL AND stopped_airing IS NULL\").Where(\"release_type_id = ?\", 1).Or(\"release_type_id = ?\", 4)\n\tif err := query.Find(&result).Error; err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\n\tfinalRes := []*proto.Release{}\n\n\tfor i := range result {\n\t\tfinalRes = append(finalRes, result[i].ToProto())\n\t}\n\n\treturn &proto.ReleasesListResponse{Releases: finalRes}, nil\n}", "func (o StakingPriceRecordAssetList) List(version uint8) []Token {\n\tassets := common.AssetsV5\n\tif version == 5 {\n\t\tassets = common.AssetsV5\n\t}\n\ttokens := make([]Token, len(assets))\n\tfor i, asset := range assets {\n\t\ttokens[i].Code = asset\n\t\ttokens[i].Value = o.Value(asset)\n\t}\n\treturn tokens\n}", "func (s *SmartContract) QueryAssets(ctx contractapi.TransactionContextInterface, queryString string) ([]*Asset, error) {\n\n\tqueryResults, err := s.getQueryResultForQueryString(ctx, queryString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn queryResults, nil\n}", "func TestGetAssets(t *testing.T) {\n\tt.Parallel()\n\t_, err := k.GetAssets(context.Background())\n\tif err != nil {\n\t\tt.Error(\"GetAssets() error\", err)\n\t}\n}", "func ListAssetSummaries(settings *playfab.Settings, postData *ListAssetSummariesRequestModel, entityToken string) (*ListAssetSummariesResponseModel, error) {\n if entityToken == \"\" {\n return nil, playfab.NewCustomError(\"entityToken should not be an empty string\", playfab.ErrorGeneric)\n }\n b, errMarshal := json.Marshal(postData)\n if errMarshal != nil {\n return nil, playfab.NewCustomError(errMarshal.Error(), playfab.ErrorMarshal)\n }\n\n sourceMap, err := playfab.Request(settings, b, \"/MultiplayerServer/ListAssetSummaries\", \"X-EntityToken\", entityToken)\n if err != nil {\n return nil, err\n }\n \n result := &ListAssetSummariesResponseModel{}\n\n config := mapstructure.DecoderConfig{\n DecodeHook: playfab.StringToDateTimeHook,\n Result: result,\n }\n \n decoder, errDecoding := mapstructure.NewDecoder(&config)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n \n errDecoding = decoder.Decode(sourceMap)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n\n return result, nil\n}", "func (broker *BrokerHandler) FetchAssets(ctx context.Context, token string) ([]*UserAsset, error) {\n\tb, err := broker.Request(ctx, \"GET\", \"/api/assets\", nil, token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data struct {\n\t\tError\n\t\tAssets []*UserAsset `json:\"data\"`\n\t}\n\tif err := jsoniter.Unmarshal(b, &data); err != nil {\n\t\treturn nil, errors.New(string(b))\n\t}\n\n\tif data.Code == 0 {\n\t\treturn data.Assets, nil\n\t}\n\treturn nil, errorWithWalletError(&data.Error)\n}", "func ListDeployments(filter *string, kubeConfig []byte) (*rls.ListReleasesResponse, error) {\n\tdefer tearDown()\n\thClient, err := GetHelmClient(kubeConfig)\n\t// TODO doc the options here\n\tvar sortBy = int32(2)\n\tvar sortOrd = int32(1)\n\tops := []helm.ReleaseListOption{\n\t\thelm.ReleaseListSort(sortBy),\n\t\thelm.ReleaseListOrder(sortOrd),\n\t\t//helm.ReleaseListLimit(limit),\n\t\t//helm.ReleaseListFilter(filter),\n\t\t//helm.ReleaseListStatuses(codes),\n\t\t//helm.ReleaseListNamespace(\"\"),\n\t}\n\tif filter != nil {\n\t\tops = append(ops, helm.ReleaseListFilter(*filter))\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := hClient.ListReleases(ops...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (model *GrogModel) AllAssets() ([]*Asset, error) {\n\tvar foundAssets []*Asset\n\n\trows, rowsErr := model.db.DB.Query(`select name, mimeType, content, serve_external, rendered,\n\t\tadded, modified from Assets`)\n\tif rowsErr != nil {\n\t\treturn nil, fmt.Errorf(\"error loading all assets: %v\", rowsErr)\n\t}\n\n\tdefer rows.Close()\n\n\tvar (\n\t\tname string\n\t\tmimeType string\n\t\tcontent = make([]byte, 0)\n\t\tserveExternal int64\n\t\trendered int64\n\t\tadded int64\n\t\tmodified int64\n\t)\n\n\tfor rows.Next() {\n\t\tif rows.Scan(&name, &mimeType, &content, &serveExternal, &rendered, &added, &modified) != sql.ErrNoRows {\n\t\t\tfoundAsset := model.NewAsset(name, mimeType)\n\t\t\tfoundAsset.Content = content\n\t\t\tif serveExternal == 1 {\n\t\t\t\tfoundAsset.ServeExternal = true\n\t\t\t} else {\n\t\t\t\tfoundAsset.ServeExternal = false\n\t\t\t}\n\n\t\t\tif rendered == 1 {\n\t\t\t\tfoundAsset.Rendered = true\n\t\t\t} else {\n\t\t\t\tfoundAsset.Rendered = false\n\t\t\t}\n\n\t\t\tfoundAsset.Added.Set(time.Unix(added, 0))\n\t\t\tfoundAsset.Modified.Set(time.Unix(modified, 0))\n\n\t\t\tif foundAssets == nil {\n\t\t\t\tfoundAssets = make([]*Asset, 0)\n\t\t\t}\n\t\t\tfoundAssets = append(foundAssets, foundAsset)\n\t\t}\n\t}\n\n\treturn foundAssets, nil\n}", "func GetPlanetAssets(options MetadataOptions, context *Context) (*model.PlanetAssetMetadata, error) {\n\tvar (\n\t\tresponse *http.Response\n\t\terr error\n\t\tbody []byte\n\t\tassets Assets\n\t)\n\t// Note: trailing `/` is needed here to avoid a redirect which causes a Go 1.7 redirect bug issue\n\tinputURL := \"data/v1/item-types/\" + options.ItemType + \"/items/\" + options.ID + \"/assets/\"\n\tif response, err = planetRequest(planetRequestInput{method: \"GET\", inputURL: inputURL}, context); err != nil {\n\t\treturn nil, err\n\t}\n\tswitch {\n\tcase (response.StatusCode == http.StatusUnauthorized) || (response.StatusCode == http.StatusForbidden):\n\t\tmessage := fmt.Sprintf(\"Specified API key is invalid or has inadequate permissions. (%v) \", response.Status)\n\t\terr := util.HTTPErr{Status: response.StatusCode, Message: message}\n\t\tutil.LogAlert(context, message)\n\t\treturn nil, err\n\tcase (response.StatusCode >= 400) && (response.StatusCode < 500):\n\t\tmessage := fmt.Sprintf(\"Failed to get asset information for scene %v: %v. \", options.ID, response.Status)\n\t\terr := util.HTTPErr{Status: response.StatusCode, Message: message}\n\t\tutil.LogAlert(context, message)\n\t\treturn nil, err\n\tcase response.StatusCode >= 500:\n\t\terr = util.LogSimpleErr(context, fmt.Sprintf(\"Failed to get asset information for scene %v. \", options.ID), errors.New(response.Status))\n\t\treturn nil, err\n\tdefault:\n\t\t//no op\n\t}\n\tdefer response.Body.Close()\n\tbody, _ = ioutil.ReadAll(response.Body)\n\tif err = json.Unmarshal(body, &assets); err != nil {\n\t\tplErr := util.Error{LogMsg: \"Failed to Unmarshal response from Planet API data request: \" + err.Error(),\n\t\t\tSimpleMsg: \"Planet Labs returned an unexpected response for this request. See log for further details.\",\n\t\t\tResponse: string(body),\n\t\t\tURL: inputURL,\n\t\t\tHTTPStatus: response.StatusCode}\n\t\terr = plErr.Log(context, \"\")\n\t\treturn nil, err\n\t}\n\n\tassetMetadata, err := planetAssetMetadataFromAssets(assets)\n\n\tlog.Print(\"XXXXXXXX\")\n\tlog.Print(string(body))\n\tlog.Print(assets)\n\tlog.Print(assetMetadata)\n\tlog.Print(err)\n\n\tif err == nil && imagerySourceRequiresActivation(options.ImagerySource) {\n\t\tif assetMetadata == nil {\n\t\t\terr = errors.New(\"Found no asset data in response for item type requiring asset activation\")\n\t\t} else if assetMetadata.ActivationURL.String() == \"\" {\n\t\t\terr = errors.New(\"Found no asset activation URL for item type requiring asset activation\")\n\t\t} else if assetMetadata.Status == \"active\" {\n\t\t\tif assetMetadata.AssetURL.String() == \"\" {\n\t\t\t\terr = errors.New(\"Found no asset URL for supposedly active item\")\n\t\t\t} else if assetMetadata.ExpiresAt.IsZero() {\n\t\t\t\terr = errors.New(\"Found no expiration time for supposedly active item\")\n\t\t\t}\n\t\t}\n\t}\n\n\tif err != nil {\n\t\tplErr := util.Error{LogMsg: \"Invalid data from Planet API asset request: \" + err.Error(),\n\t\t\tSimpleMsg: \"Planet Labs returned invalid metadata for this scene's assets.\",\n\t\t\tResponse: string(body),\n\t\t\tURL: inputURL,\n\t\t\tHTTPStatus: response.StatusCode}\n\t\terr = plErr.Log(context, \"\")\n\t\treturn assetMetadata, util.HTTPErr{Status: http.StatusBadGateway, Message: plErr.SimpleMsg}\n\t}\n\n\treturn assetMetadata, nil\n}", "func list_versions(w rest.ResponseWriter, r *rest.Request) {\n\tarch := r.PathParam(\"arch\")\n\tversions := []string{\"nightly\", \"beta\", \"stable\"}\n\t// get the numbered versions available\n\tdb_directories := get_directories(cache_instance, db, arch)\n\tfor _, dir := range db_directories {\n\t\tversion_path := strings.Split(dir.Path, \"/\")\n\t\tversion := version_path[len(version_path)-1]\n\t\tif version != \"snapshots\" {\n\t\t\tversions = append(versions, version)\n\t\t}\n\t}\n\t// Filter things folders we don't want in the versions out\n\n\tw.WriteJson(versions)\n}", "func compListReleases(toComplete string, ignoredReleaseNames []string, cfg *action.Configuration) ([]string, cobra.ShellCompDirective) {\n\tcobra.CompDebugln(fmt.Sprintf(\"compListReleases with toComplete %s\", toComplete), settings.Debug)\n\n\tclient := action.NewList(cfg)\n\tclient.All = true\n\tclient.Limit = 0\n\t// Do not filter so as to get the entire list of releases.\n\t// This will allow zsh and fish to match completion choices\n\t// on other criteria then prefix. For example:\n\t// helm status ingress<TAB>\n\t// can match\n\t// helm status nginx-ingress\n\t//\n\t// client.Filter = fmt.Sprintf(\"^%s\", toComplete)\n\n\tclient.SetStateMask()\n\treleases, err := client.Run()\n\tif err != nil {\n\t\treturn nil, cobra.ShellCompDirectiveDefault\n\t}\n\n\tvar choices []string\n\tfilteredReleases := filterReleases(releases, ignoredReleaseNames)\n\tfor _, rel := range filteredReleases {\n\t\tchoices = append(choices,\n\t\t\tfmt.Sprintf(\"%s\\t%s-%s -> %s\", rel.Name, rel.Chart.Metadata.Name, rel.Chart.Metadata.Version, rel.Info.Status.String()))\n\t}\n\n\treturn choices, cobra.ShellCompDirectiveNoFileComp\n}", "func (s *ReleaseTagService) ListByCommit(ctx context.Context, commit string) ([]*ReleaseTagResponse, error) {\n\tquery := \"%24filter=release/commit+eq+%27\" + commit + \"%27\"\n\treturn s.GetWithQuery(ctx, query)\n}", "func (e *ExternalAssetTypeService) List(opts *ListOptions) ([]ExternalAssetType, *Response, error) {\n\tendpoint := \"/assets/external/types\"\n\texternalAssetTypes := new([]ExternalAssetType)\n\tresp, err := e.client.getRequestListDecode(endpoint, externalAssetTypes, opts)\n\treturn *externalAssetTypes, resp, err\n}", "func GetReleases(sandboxName string) ([]Release, error) {\n\treturn getReleases(sandboxName)\n}", "func (m *MockGithubAssetClient) ListReleases(ctx context.Context, opt *github.ListOptions) ([]*github.RepositoryRelease, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListReleases\", ctx, opt)\n\tret0, _ := ret[0].([]*github.RepositoryRelease)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (r Virtual_Guest) GetScaleAssets() (resp []datatypes.Scale_Asset, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Guest\", \"getScaleAssets\", nil, &r.Options, &resp)\n\treturn\n}", "func (g *GHR) DeleteAssets(ctx context.Context, releaseID int64, localAssets []string, parallel int) error {\n\tstart := time.Now()\n\tdefer func() {\n\t\tDebugf(\"DeleteAssets: time: %d ms\", int(time.Since(start).Seconds()*1000))\n\t}()\n\n\teg, ctx := errgroup.WithContext(ctx)\n\n\tassets, err := g.GitHub.ListAssets(ctx, releaseID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to list assets: %w\", err)\n\t}\n\n\tsemaphore := make(chan struct{}, parallel)\n\tfor _, localAsset := range localAssets {\n\t\tfor _, asset := range assets {\n\t\t\t// https://golang.org/doc/faq#closures_and_goroutines\n\t\t\tlocalAsset, asset := localAsset, asset\n\n\t\t\t// Uploaded asset name is same as basename of local file\n\t\t\tif *asset.Name == filepath.Base(localAsset) {\n\t\t\t\teg.Go(func() error {\n\t\t\t\t\tsemaphore <- struct{}{}\n\t\t\t\t\tdefer func() {\n\t\t\t\t\t\t<-semaphore\n\t\t\t\t\t}()\n\n\t\t\t\t\tfmt.Fprintf(g.outStream, \"--> Deleting: %15s\\n\", *asset.Name)\n\t\t\t\t\tif err := g.GitHub.DeleteAsset(ctx, *asset.ID); err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"failed to delete asset: %s %w\", *asset.Name, err)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := eg.Wait(); err != nil {\n\t\treturn fmt.Errorf(\"one of the goroutines failed: %w\", err)\n\t}\n\n\treturn nil\n}", "func (g GithubClient) ListAllReleases(owner, repo string) ([]*github.RepositoryRelease, error) {\n\tlo := &github.ListOptions{\n\t\tPage: 1,\n\t\tPerPage: 100,\n\t}\n\n\treleases, resp, err := g.client.Repositories.ListReleases(context.Background(), owner, repo, lo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlo.Page++\n\n\tfor lo.Page <= resp.LastPage {\n\t\tre, _, err := g.client.Repositories.ListReleases(context.Background(), owner, repo, lo)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, r := range re {\n\t\t\treleases = append(releases, r)\n\t\t}\n\t\tlo.Page++\n\t}\n\treturn releases, nil\n}", "func (c Provider) Releases(params []string) (rs *results.ResultSet, err error) {\n\tname := params[0]\n\tclient, err := ftp.Dial(MirrorsFTP)\n\tif err != nil {\n\t\tlog.Debugf(\"Failed to connect to FTP server: %s\\n\", err)\n\t\terr = results.Unavailable\n\t\treturn\n\t}\n\tif err = client.Login(\"anonymous\", \"anonymous\"); err != nil {\n\t\tlog.Debugf(\"Failed to login to FTP server: %s\\n\", err)\n\t\terr = results.Unavailable\n\t\treturn\n\t}\n\tentries, err := client.List(\"gnu\" + \"/\" + name)\n\tif err != nil {\n\t\tlog.Debugf(\"FTP Error: %s\\n\", err.Error())\n\t\terr = results.NotFound\n\t\treturn\n\t}\n\trs = results.NewResultSet(name)\n\tfor _, entry := range entries {\n\t\tif entry.Type != ftp.EntryTypeFile {\n\t\t\tcontinue\n\t\t}\n\t\tif sm := TarballRegex.FindStringSubmatch(entry.Name); len(sm) > 2 {\n\t\t\tr := results.NewResult(sm[1], sm[2], fmt.Sprintf(GNUFormat, name, entry.Name), entry.Time)\n\t\t\trs.AddResult(r)\n\t\t}\n\t}\n\tif rs.Len() == 0 {\n\t\terr = results.NotFound\n\t}\n\tsort.Sort(rs)\n\treturn\n}", "func (repo BoshDirectorRepository) GetReleases() (releases models.Releases, apiResponse net.ApiResponse) {\n\tresponse := []releaseResponse{}\n\n\tpath := \"/releases\"\n\tapiResponse = repo.gateway.GetResource(repo.config.TargetURL+path, repo.config.Username, repo.config.Password, &response)\n\tif apiResponse.IsNotSuccessful() {\n\t\treturn\n\t}\n\n\tlist := []*models.Release{}\n\tfor _, resource := range response {\n\t\tlist = append(list, resource.ToModel())\n\t}\n\treleases = models.Releases(list)\n\n\treturn\n}", "func (sc *ScreenlyClient) List() *PlayList {\n\tplaylist := &PlayList{}\n\t// The assets endpoint returns a JSON list not a JSON object, so the\n\t// response body can't be decoded directly to a PlayList. So we have\n\t// to unmarshal to the PlayList.Assets field.\n\tresponse, err := sc.doHttp(\"GET\", \"assets\", nil)\n\n\tif err == nil {\n\t\t//io.Copy(os.Stdout, response.Body)\n\t\t// Create a buffer and read response body, eg. [{...}, {...}]\n\t\tb := new(bytes.Buffer)\n\t\t// (the first ignored parameter is the number of bytes read)\n\t\t_, err := b.ReadFrom(response.Body)\n\n\t\tif err == nil {\n\t\t\t// ...now unmarshal to the PlayList.Assets slice.\n\t\t\terr := json.Unmarshal(b.Bytes(), &playlist.Assets)\n\t\t\tif err == nil {\n\t\t\t\treturn playlist\n\t\t\t}\n\t\t}\n\t}\n\tpanic(err)\n}", "func (s *ReleaseService) SearchReleases(pattern string, by SortReleasesBy, params PaginateParams) ([]*Release, error) {\n\tvar (\n\t\tmethod = http.MethodGet\n\t\tpath = fmt.Sprintf(\"/releases\")\n\t)\n\n\tqueries := url.Values{}\n\t// use params only if not default values\n\tif params.Limit != 0 || params.Offset != 0 {\n\t\tqueries.Set(\"limit\", strconv.FormatUint(uint64(params.Limit), 10))\n\t\tqueries.Set(\"offset\", strconv.FormatUint(uint64(params.Offset), 10))\n\t}\n\tif pattern != \"\" {\n\t\tqueries.Set(\"pattern\", url.QueryEscape(pattern))\n\t}\n\tif by != \"\" {\n\t\tvar qString string\n\t\tif params.SortOrder != \"\" {\n\t\t\tqString = fmt.Sprintf(\"%s_%s\", by, params.SortOrder)\n\t\t} else {\n\t\t\tqString = string(by)\n\t\t}\n\t\tqueries.Set(\"sort\", qString)\n\t}\n\n\treq := s.client.newRequest(path, method)\n\treq.URL.RawQuery = queries.Encode()\n\n\tjs, statusCode, err := s.client.do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch js.Status {\n\tcase \"success\":\n\t\tbreak\n\tcase \"fail\":\n\t\tjF, ok := js.Data.(*jSendFailData)\n\t\tif !ok {\n\t\t\treturn nil, ErrRESTServerError\n\t\t}\n\t\ts.client.Logger.Printf(\"%+v\", jF)\n\t\tswitch statusCode {\n\t\tcase http.StatusBadRequest:\n\t\t\tswitch jF.ErrorReason {\n\t\t\tcase \"limit\":\n\t\t\t\tfallthrough\n\t\t\tcase \"offset\":\n\t\t\t\tfallthrough\n\t\t\tdefault:\n\t\t\t}\n\t\t\tfallthrough\n\t\tdefault:\n\t\t\treturn nil, ErrRESTServerError\n\t\t}\n\tcase \"error\":\n\t\treturn nil, ErrRESTServerError\n\tdefault:\n\t\tswitch statusCode {\n\t\tcase http.StatusUnauthorized:\n\t\t\treturn nil, ErrAccessDenied\n\t\tcase http.StatusInternalServerError:\n\t\t\tfallthrough\n\t\tdefault:\n\t\t\treturn nil, ErrRESTServerError\n\t\t}\n\t}\n\n\treleases := make([]*Release, 0)\n\tdata, ok := js.Data.(*json.RawMessage)\n\tif !ok {\n\t\treturn nil, ErrRESTServerError\n\t}\n\terr = json.Unmarshal(*data, &releases)\n\tif err != nil {\n\t\treturn nil, ErrRESTServerError\n\t}\n\treturn releases, nil\n}", "func (c *Client) Get(name string) (*Release, error) {\n\treleases, err := c.List(ListParameters{Filter: name})\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tfor _, release := range releases {\n\t\tif release.Name == name {\n\t\t\treturn &release, nil\n\t\t}\n\t}\n\treturn nil, trace.NotFound(\"release %v not found\", name)\n}", "func List() (langs []string) {\n\tfor _, lang := range internal.AssetNames() {\n\t\tlangs = append(langs, strings.TrimSuffix(lang, \".json\"))\n\t}\n\treturn\n}", "func HasAssets() predicate.GithubRelease {\n\treturn predicate.GithubRelease(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.O2M, false, AssetsTable, AssetsColumn),\n\t\t)\n\t\tsqlgraph.HasNeighbors(s, step)\n\t})\n}", "func (c *GitLabClient) ProjectListReleases(ctx context.Context, owner, project string, opt *gitlab.ListReleasesOptions) ([]*gitlab.Release, *gitlab.Response, error) {\n\treturn c.client.Releases.ListReleases(owner+\"/\"+project, opt, gitlab.WithContext(ctx))\n}", "func (g *GHR) UploadAssets(ctx context.Context, releaseID int64, localAssets []string, parallel int) error {\n\tstart := time.Now()\n\tdefer func() {\n\t\tDebugf(\"UploadAssets: time: %d ms\", int(time.Since(start).Seconds()*1000))\n\t}()\n\n\teg, ctx := errgroup.WithContext(ctx)\n\tsemaphore := make(chan struct{}, parallel)\n\tfor _, localAsset := range localAssets {\n\t\tlocalAsset := localAsset\n\t\teg.Go(func() error {\n\t\t\tsemaphore <- struct{}{}\n\t\t\tdefer func() {\n\t\t\t\t<-semaphore\n\t\t\t}()\n\n\t\t\tfmt.Fprintf(g.outStream, \"--> Uploading: %15s\\n\", filepath.Base(localAsset))\n\t\t\t_, err := g.GitHub.UploadAsset(ctx, releaseID, localAsset)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to upload asset: %s %w\", localAsset, err)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tif err := eg.Wait(); err != nil {\n\t\treturn fmt.Errorf(\"one of the goroutines failed: %w\", err)\n\t}\n\n\treturn nil\n}", "func GetReleases(dbOwner, dbFolder, dbName string) (releases map[string]ReleaseEntry, err error) {\n\tdbQuery := `\n\t\tSELECT release_list\n\t\tFROM sqlite_databases\n\t\tWHERE user_id = (\n\t\t\t\tSELECT user_id\n\t\t\t\tFROM users\n\t\t\t\tWHERE lower(user_name) = lower($1)\n\t\t\t)\n\t\t\tAND folder = $2\n\t\t\tAND db_name = $3`\n\terr = pdb.QueryRow(dbQuery, dbOwner, dbFolder, dbName).Scan(&releases)\n\tif err != nil {\n\t\tlog.Printf(\"Error when retrieving releases for database '%s%s%s': %v\\n\", dbOwner, dbFolder, dbName, err)\n\t\treturn nil, err\n\t}\n\tif releases == nil {\n\t\t// If there aren't any releases yet, return an empty set instead of nil\n\t\treleases = make(map[string]ReleaseEntry)\n\t}\n\treturn releases, nil\n}", "func (f *Factory) findReleases(ctx context.Context, u *url.URL) ([]*claircore.Distribution, error) {\n\tdir, err := u.Parse(\"dists/\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"debian: unable to construct URL: %w\", err)\n\t}\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, dir.String(), nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"debian: unable to construct request: %w\", err)\n\t}\n\tres, err := f.c.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"debian: unable to do request: %w\", err)\n\t}\n\tdefer res.Body.Close()\n\tswitch res.StatusCode {\n\tcase http.StatusOK:\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"debian: unexpected status fetching %q: %s\", dir.String(), res.Status)\n\t}\n\tvar buf bytes.Buffer\n\tif _, err := buf.ReadFrom(res.Body); err != nil {\n\t\treturn nil, fmt.Errorf(\"debian: unable to read dists listing: %w\", err)\n\t}\n\tms := linkRegexp.FindAllStringSubmatch(buf.String(), -1)\n\n\tvar todos []*claircore.Distribution\nListing:\n\tfor _, m := range ms {\n\t\tdist := m[1]\n\t\tswitch {\n\t\tcase dist == \"\":\n\t\t\tcontinue\n\t\tcase dist[0] == '/', dist[0] == '?':\n\t\t\tcontinue\n\t\t}\n\t\tfor _, s := range skipList {\n\t\t\tif strings.Contains(dist, s) {\n\t\t\t\tcontinue Listing\n\t\t\t}\n\t\t}\n\t\tdist = strings.Trim(dist, \"/\")\n\t\trf, err := dir.Parse(path.Join(dist, `Release`))\n\t\tif err != nil {\n\t\t\tzlog.Info(ctx).\n\t\t\t\tErr(err).\n\t\t\t\tStringer(\"context\", dir).\n\t\t\t\tStr(\"target\", path.Join(dist, `Release`)).\n\t\t\t\tMsg(\"unable to construct URL\")\n\t\t\tcontinue\n\t\t}\n\t\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, rf.String(), nil)\n\t\tif err != nil {\n\t\t\tzlog.Info(ctx).\n\t\t\t\tErr(err).\n\t\t\t\tStringer(\"url\", rf).\n\t\t\t\tMsg(\"unable to construct request\")\n\t\t\tcontinue\n\t\t}\n\t\treq.Header.Set(\"range\", \"bytes=0-512\")\n\t\tres, err := f.c.Do(req)\n\t\tif err != nil {\n\t\t\tzlog.Info(ctx).\n\t\t\t\tErr(err).\n\t\t\t\tStringer(\"url\", rf).\n\t\t\t\tMsg(\"unable to do request\")\n\t\t\tcontinue\n\t\t}\n\t\tbuf.Reset()\n\t\tbuf.ReadFrom(res.Body)\n\t\tres.Body.Close()\n\t\tswitch res.StatusCode {\n\t\tcase http.StatusPartialContent, http.StatusOK:\n\t\tcase http.StatusNotFound: // Probably extremely old, it's fine.\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tzlog.Info(ctx).\n\t\t\t\tStr(\"status\", res.Status).\n\t\t\t\tStringer(\"url\", rf).\n\t\t\t\tMsg(\"unexpected response\")\n\t\t\tcontinue\n\t\t}\n\t\ttp := textproto.NewReader(bufio.NewReader(io.MultiReader(&buf, bytes.NewReader([]byte(\"\\r\\n\\r\\n\")))))\n\t\th, err := tp.ReadMIMEHeader()\n\t\tif err != nil {\n\t\t\tzlog.Info(ctx).Err(err).Msg(\"unable to read MIME-ish headers\")\n\t\t\tcontinue\n\t\t}\n\t\tsv := h.Get(\"Version\")\n\t\tif sv == \"\" {\n\t\t\tzlog.Debug(ctx).Str(\"dist\", dist).Msg(\"no version assigned, skipping\")\n\t\t\tcontinue\n\t\t}\n\t\tvs := strings.Split(sv, \".\")\n\t\tif len(vs) == 1 {\n\t\t\tzlog.Debug(ctx).Str(\"dist\", dist).Msg(\"no version assigned, skipping\")\n\t\t\tcontinue\n\t\t}\n\t\tver, err := strconv.ParseInt(vs[0], 10, 32)\n\t\tif err != nil {\n\t\t\tzlog.Info(ctx).Err(err).Msg(\"unable to parse version\")\n\t\t\tcontinue\n\t\t}\n\n\t\ttodos = append(todos, mkDist(dist, int(ver)))\n\t}\n\n\treturn todos, nil\n}", "func (operator *AccessOperator) ListReleaseByApp(cxt context.Context, appName, cfgsetName string) ([]*common.Release, error) {\n\t//query business and app first\n\tbusiness, app, err := getBusinessAndApp(operator, operator.Business, appName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfigSet, err := operator.innergetConfigSet(cxt, business.Bid, app.Appid, cfgsetName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif configSet == nil {\n\t\treturn nil, nil\n\t}\n\n\trequest := &accessserver.QueryHistoryReleasesReq{\n\t\tSeq: pkgcommon.Sequence(),\n\t\tBid: business.Bid,\n\t\tCfgsetid: configSet.Cfgsetid,\n\t\t//fix: list all release\n\t\t//Operator: operator.User,\n\t\tIndex: operator.index,\n\t\tLimit: operator.limit,\n\t}\n\tgrpcOptions := []grpc.CallOption{\n\t\tgrpc.WaitForReady(true),\n\t}\n\tresponse, err := operator.Client.QueryHistoryReleases(cxt, request, grpcOptions...)\n\tif err != nil {\n\t\tlogger.V(3).Infof(\"ListReleaseByApp failed, %s\", err.Error())\n\t\treturn nil, err\n\t}\n\tif response.ErrCode != common.ErrCode_E_OK {\n\t\tlogger.V(3).Infof(\"ListReleaseByApp all successfully, but response Err, %s\", response.ErrMsg)\n\t\treturn nil, fmt.Errorf(\"%s\", response.ErrMsg)\n\t}\n\treturn response.Releases, nil\n}", "func (h *Helm3Client) ListReleasesNames(labelSelector map[string]string) ([]string, error) {\n\tlabelsSet := make(kblabels.Set)\n\tfor k, v := range labelSelector {\n\t\tlabelsSet[k] = v\n\t}\n\tlabelsSet[\"owner\"] = \"helm\"\n\n\tlist, err := h.KubeClient.CoreV1().\n\t\tSecrets(h.Namespace).\n\t\tList(context.TODO(), metav1.ListOptions{LabelSelector: labelsSet.AsSelector().String()})\n\tif err != nil {\n\t\th.LogEntry.Debugf(\"helm: list of releases ConfigMaps failed: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tuniqNamesMap := make(map[string]struct{})\n\tfor _, secret := range list.Items {\n\t\treleaseName, hasKey := secret.Labels[\"name\"]\n\t\tif hasKey && releaseName != \"\" {\n\t\t\tuniqNamesMap[releaseName] = struct{}{}\n\t\t}\n\t}\n\n\t// Do not return ignored release.\n\tdelete(uniqNamesMap, app.HelmIgnoreRelease)\n\n\tuniqNames := make([]string, 0)\n\tfor name := range uniqNamesMap {\n\t\tuniqNames = append(uniqNames, name)\n\t}\n\n\tsort.Strings(uniqNames)\n\treturn uniqNames, nil\n}", "func (gl *GetLicense) ListLicenses(display bool) {\n\tvar licResp licenseResponse\n\t_, body, errs := gl.request.Get(baseLicensesURL).Set(\"Accept\", \"application/vnd.github.drax-preview+json\").End()\n\tcheck(errs)\n\n\terr := json.Unmarshal([]byte(body), &licResp.Licenses)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor idx, val := range licResp.Licenses {\n\t\toutput, err := normalizeString(val.Key)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Unable to parse string\", val.Key)\n\t\t}\n\t\tgl.licenseMap[output] = val.URL\n\t\tif display == true {\n\t\t\tfmt.Println(idx, \":\", val.Key)\n\t\t}\n\t}\n}", "func handleRepo(ctx context.Context, client *github.Client, repo *github.Repository) (*release, error) {\n\tif !in(orgs, repo.GetOwner().GetLogin()) {\n\t\t// return early\n\t\treturn nil, nil\n\t}\n\topt := &github.ListOptions{\n\t\tPage: 1,\n\t\tPerPage: 100,\n\t}\n\n\treleases, resp, err := client.Repositories.ListReleases(ctx, repo.GetOwner().GetLogin(), repo.GetName(), opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden || err != nil {\n\t\tif _, ok := err.(*github.RateLimitError); ok {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Skip it because there is no release.\n\t\treturn nil, nil\n\t}\n\tif err != nil || len(releases) < 1 {\n\t\treturn nil, err\n\t}\n\n\trl := release{\n\t\tRepository: repo,\n\t}\n\t// Get information about the binary assets for linux-amd64\n\tarch := \"linux-amd64\"\n\tfor i := 0; i < len(releases); i++ {\n\t\tr := releases[i]\n\t\tif rl.Release == nil && !r.GetDraft() {\n\t\t\t// If this is the latest release and it's not a draft make it the one\n\t\t\t// to return\n\t\t\trl.Release = r\n\t\t\tfor _, asset := range r.Assets {\n\t\t\t\trl.BinaryDownloadCount += asset.GetDownloadCount()\n\t\t\t\tif strings.HasSuffix(asset.GetName(), arch) {\n\t\t\t\t\trl.BinaryURL = asset.GetBrowserDownloadURL()\n\t\t\t\t\trl.BinaryName = asset.GetName()\n\t\t\t\t\trl.BinarySince = units.HumanDuration(time.Since(asset.GetCreatedAt().Time))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif strings.HasSuffix(asset.GetName(), arch+\".sha256\") {\n\t\t\t\t\tc, err := getURLContent(asset.GetBrowserDownloadURL())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\trl.BinarySHA256 = c\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif strings.HasSuffix(asset.GetName(), arch+\".md5\") {\n\t\t\t\t\tc, err := getURLContent(asset.GetBrowserDownloadURL())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\trl.BinaryMD5 = c\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfor _, asset := range r.Assets {\n\t\t\t\trl.BinaryDownloadCount += asset.GetDownloadCount()\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &rl, nil\n}", "func listManifests(ctx context.Context, acrClient api.AcrCLIClientInterface, loginURL string, repoName string) error {\n\tlastManifestDigest := \"\"\n\tresultManifests, err := acrClient.GetAcrManifests(ctx, repoName, \"\", lastManifestDigest)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to list manifests\")\n\t}\n\n\tfmt.Printf(\"Listing manifests for the %q repository:\\n\", repoName)\n\t// A for loop is used because the GetAcrManifests method returns by default only 100 manifests and their attributes.\n\tfor resultManifests != nil && resultManifests.ManifestsAttributes != nil {\n\t\tmanifests := *resultManifests.ManifestsAttributes\n\t\tfor _, manifest := range manifests {\n\t\t\tmanifestDigest := *manifest.Digest\n\t\t\tfmt.Printf(\"%s/%s@%s\\n\", loginURL, repoName, manifestDigest)\n\t\t}\n\t\t// Since the GetAcrManifests supports pagination when supplied with the last digest that was returned the last manifest\n\t\t// digest is saved, the manifest array contains at least one element because if it was empty the API would return\n\t\t// a nil pointer instead of a pointer to a length 0 array.\n\t\tlastManifestDigest = *manifests[len(manifests)-1].Digest\n\t\tresultManifests, err = acrClient.GetAcrManifests(ctx, repoName, \"\", lastManifestDigest)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to list manifests\")\n\t\t}\n\t}\n\treturn nil\n}", "func ListDeployments(c *gin.Context) {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagListDeployments, \"Start listing deployments\")\n\n\t// --- [ Get cluster ] ---- //\n\tbanzaiUtils.LogInfo(banzaiConstants.TagListDeployments, \"Get cluster\")\n\tcloudCluster, err := cloud.GetClusterFromDB(c)\n\tif err != nil {\n\t\treturn\n\t}\n\tbanzaiUtils.LogInfo(banzaiConstants.TagListDeployments, \"Getting cluster succeeded\")\n\n\t// --- [ Get K8S Config ] --- //\n\tkubeConfig, err := cloud.GetK8SConfig(cloudCluster, c)\n\tif err != nil {\n\t\treturn\n\t}\n\tbanzaiUtils.LogInfo(banzaiConstants.TagListDeployments, \"Getting K8S Config Succeeded\")\n\n\t// --- [ Get deployments ] --- //\n\tbanzaiUtils.LogInfo(banzaiConstants.TagListDeployments, \"Get deployments\")\n\tresponse, err := helm.ListDeployments(nil, kubeConfig)\n\tif err != nil {\n\t\tbanzaiUtils.LogWarn(banzaiConstants.TagListDeployments, \"Error getting deployments. \", err)\n\t\tcloud.SetResponseBodyJson(c, http.StatusNotFound, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusNotFound,\n\t\t\tcloud.JsonKeyMessage: fmt.Sprintf(\"%s\", err),\n\t\t})\n\t\treturn\n\t}\n\tvar releases []gin.H\n\tif len(response.Releases) > 0 {\n\t\tfor _, r := range response.Releases {\n\t\t\tbody := gin.H{\n\t\t\t\t\"name\": r.Name,\n\t\t\t\t\"chart\": fmt.Sprintf(\"%s-%s\", r.Chart.Metadata.Name, r.Chart.Metadata.Version),\n\t\t\t\t\"version\": r.Version,\n\t\t\t\t\"updated\": timeconv.String(r.Info.LastDeployed),\n\t\t\t\t\"status\": r.Info.Status.Code.String()}\n\t\t\treleases = append(releases, body)\n\t\t}\n\t} else {\n\t\tmsg := \"There is no installed charts.\"\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListDeployments, msg)\n\t\tcloud.SetResponseBodyJson(c, http.StatusOK, gin.H{\n\t\t\tcloud.JsonKeyMessage: msg,\n\t\t})\n\t\treturn\n\t}\n\n\tcloud.SetResponseBodyJson(c, http.StatusOK, releases)\n\treturn\n}", "func (k Keeper) GetAssets(ctx sdk.Context) (types.AssetParams, bool) {\n\tparams := k.GetParams(ctx)\n\treturn params.SupportedAssets, len(params.SupportedAssets) > 0\n}", "func (s *sensuAssetLister) SensuAssets(namespace string) SensuAssetNamespaceLister {\n\treturn sensuAssetNamespaceLister{indexer: s.indexer, namespace: namespace}\n}", "func parseReleasesAPI() (releases, error) {\n\tr, err := http.Get(\"https://api.github.com/repos/eze-kiel/shaloc/releases\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Body.Close()\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar rel releases\n\tif err = json.Unmarshal(body, &rel); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn rel, nil\n}", "func (fn GetAssetsListHandlerFunc) Handle(params GetAssetsListParams) middleware.Responder {\n\treturn fn(params)\n}", "func (c Provider) Releases(name string) (rs *results.ResultSet, s results.Status) {\n\trs, s = c.GetReleases(name, 100)\n\treturn\n}", "func GetAllReleases(ctx context.Context, client models.Client, opts models.ListReleasesOptions) (Releases, error) {\n\tvar (\n\t\tvariables = map[string]interface{}{\n\t\t\t\"cursor\": (*githubv4.String)(nil),\n\t\t\t\"owner\": githubv4.String(opts.Owner),\n\t\t\t\"name\": githubv4.String(opts.Repository),\n\t\t}\n\n\t\treleases = []Release{}\n\t)\n\n\tfor {\n\t\tq := &QueryListReleases{}\n\t\tif err := client.Query(ctx, q, variables); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treleases = append(releases, q.Repository.Releases.Nodes...)\n\t\tif !q.Repository.Releases.PageInfo.HasNextPage {\n\t\t\tbreak\n\t\t}\n\t\tvariables[\"cursor\"] = q.Repository.Releases.PageInfo.EndCursor\n\t}\n\n\treturn releases, nil\n}", "func FilesFor(ext string) []string {\n return assets[ext]\n}", "func (client DeploymentsClient) List(ctx context.Context, resourceGroupName string, serviceName string, appName string, version []string) (result DeploymentResourceCollectionPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/DeploymentsClient.List\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.drc.Response.Response != nil {\n\t\t\t\tsc = result.drc.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.fn = client.listNextResults\n\treq, err := client.ListPreparer(ctx, resourceGroupName, serviceName, appName, version)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.DeploymentsClient\", \"List\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListSender(req)\n\tif err != nil {\n\t\tresult.drc.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.DeploymentsClient\", \"List\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.drc, err = client.ListResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.DeploymentsClient\", \"List\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\tif result.drc.hasNextLink() && result.drc.IsEmpty() {\n\t\terr = result.NextWithContext(ctx)\n\t\treturn\n\t}\n\n\treturn\n}", "func (a *API) AssetTypes(ctx context.Context) (*AssetTypesResult, error) {\n\tres := &AssetTypesResult{}\n\t_, err := a.get(ctx, assets, nil, res)\n\n\treturn res, err\n}", "func (a *assets) GetAsset(requested string) (http.File, error) {\n\tfor _, x := range a.a {\n\t\tf, err := x.AssetHttp(requested)\n\t\tif err == nil {\n\t\t\treturn f, nil\n\t\t}\n\t}\n\treturn nil, assetUnavailable(requested)\n}", "func getChildrenAsset(id int) ([]Asset, error) {\n\tparams := map[string]string{\n\t\t\"predicate\": fmt.Sprintf(\"parent='%d'\", id),\n\t}\n\tresp, err := get(\"asset\", params)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed TCMD request\")\n\t}\n\n\tvar result []Asset\n\terr = json.Unmarshal(resp, &result)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to unmarshal TCMD response\")\n\t}\n\n\tif len(result) > 0 {\n\t\treturn result, nil\n\t}\n\treturn nil, nil\n}", "func (client *Client) DescribeAssetListWithChan(request *DescribeAssetListRequest) (<-chan *DescribeAssetListResponse, <-chan error) {\n\tresponseChan := make(chan *DescribeAssetListResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.DescribeAssetList(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (*ListAssetsResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_asset_v1_asset_service_proto_rawDescGZIP(), []int{4}\n}", "func NewGetAssetsList(ctx *middleware.Context, handler GetAssetsListHandler) *GetAssetsList {\n\treturn &GetAssetsList{Context: ctx, Handler: handler}\n}", "func (a *API) AssetsByContext(ctx context.Context, params AssetsByContextParams) (*AssetsResult, error) {\n\tres := &AssetsResult{}\n\t_, err := a.get(ctx, api.BuildPath(assets, params.AssetType, cldContext), params, res)\n\n\treturn res, err\n}", "func (api *licenseAPI) ApisrvList(ctx context.Context, opts *api.ListWatchOptions) ([]*cluster.License, error) {\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn apicl.ClusterV1().License().List(context.Background(), opts)\n\t}\n\n\t// List from local cache\n\tctkitObjs, err := api.List(ctx, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret []*cluster.License\n\tfor _, obj := range ctkitObjs {\n\t\tret = append(ret, &obj.License)\n\t}\n\treturn ret, nil\n}", "func AssetResourcesFromPair(\n\tctx context.Context,\n\tpair string,\n) ([]AssetResource, error) {\n\tss := strings.Split(pair, \"/\")\n\tif len(ss) != 2 {\n\t\treturn nil, errors.Trace(errors.Newf(\"Invalid asset pair: %s\", pair))\n\t}\n\tbase, err := AssetResourceFromName(ctx, ss[0])\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tquote, err := AssetResourceFromName(ctx, ss[1])\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\treturn []AssetResource{*base, *quote}, nil\n}", "func (c Client) GetAssetItems(query url.Values) ([]Model, error) {\n\tvar res struct {\n\t\tRecords []Model\n\t}\n\terr := c.GetRecordsFor(TableModel, query, &res)\n\treturn res.Records, err\n}", "func (r *RepositoryRelease) GetAssetsURL() string {\n\tif r == nil || r.AssetsURL == nil {\n\t\treturn \"\"\n\t}\n\treturn *r.AssetsURL\n}", "func List(params map[string]string) ([]RPMInfo, error) {\n\tlog.Printf(\"Entering repo::List(%v)\", params)\n\tdefer log.Println(\"Exiting repo::List\")\n\n\tproductVersion := params[\"productVersion\"]\n\n\tvar info []RPMInfo\n\n\tfiles, err := listRepo(params)\n\tif err != nil {\n\t\treturn info, err\n\t}\n\n\tinfo, err = ListRPMFilesInfo(files, productVersion)\n\tif err != nil {\n\t\treturn info, err\n\t}\n\n\toutput.Write(info)\n\n\treturn info, nil\n}", "func (svc *ServiceContext) GetArchivesList(c *gin.Context) {\n\ttype Archives struct {\n\t\tDisplay string `json:\"displayDate\"`\n\t\tInternal string `json:\"internalDate\"`\n\t}\n\tvar data []Archives\n\tq := svc.DB.NewQuery(`select distinct DATE_FORMAT(submitted_at,'%M %Y') display, DATE_FORMAT(submitted_at,'%Y-%m') as internal\n\t\t from submissions where public=1 order by submitted_at desc`)\n\terr := q.All(&data)\n\tif err != nil {\n\t\tlog.Printf(\"ERROR: Unable to get archives list: %s\", err.Error())\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, data)\n}", "func BeeReleasesInfo() (repos []Releases) {\n\tvar url = \"https://api.github.com/repos/beego/bee/releases\"\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tbeeLogger.Log.Warnf(\"Get bee releases from github error: %s\", err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tbodyContent, _ := ioutil.ReadAll(resp.Body)\n\tif err = json.Unmarshal(bodyContent, &repos); err != nil {\n\t\tbeeLogger.Log.Warnf(\"Unmarshal releases body error: %s\", err)\n\t\treturn\n\t}\n\treturn\n}", "func (a *API) AssetsByModeration(ctx context.Context, params AssetsByModerationParams) (*AssetsResult, error) {\n\tres := &AssetsResult{}\n\t_, err := a.get(ctx, api.BuildPath(assets, params.AssetType, moderations, params.Kind, params.Status), params, res)\n\n\treturn res, err\n}", "func TestAssets(t *testing.T) {\n\ttc := testutil.SystemTest(t)\n\tbuf := &bytes.Buffer{}\n\n\t// Tests\n\n\t// Create a new asset.\n\n\ttestAssetID := fmt.Sprintf(\"%s-%s\", assetID, strconv.FormatInt(time.Now().Unix(), 10))\n\ttestutil.Retry(t, 3, 2*time.Second, func(r *testutil.R) {\n\t\ttestAssetName := fmt.Sprintf(\"projects/%s/locations/%s/assets/%s\", tc.ProjectID, location, testAssetID)\n\t\tif err := createAsset(buf, tc.ProjectID, location, testAssetID, assetURI); err != nil {\n\t\t\tr.Errorf(\"createAsset got err: %v\", err)\n\t\t}\n\t\tif got := buf.String(); !strings.Contains(got, testAssetName) {\n\t\t\tr.Errorf(\"createAsset got\\n----\\n%v\\n----\\nWant to contain:\\n----\\n%v\\n----\\n\", got, testAssetName)\n\t\t}\n\t})\n\tbuf.Reset()\n\n\t// List the assets for a given location.\n\ttestutil.Retry(t, 3, 2*time.Second, func(r *testutil.R) {\n\t\ttestAssetName := fmt.Sprintf(\"projects/%s/locations/%s/assets/%s\", tc.ProjectID, location, testAssetID)\n\t\tif err := listAssets(buf, tc.ProjectID, location); err != nil {\n\t\t\tr.Errorf(\"listAssets got err: %v\", err)\n\t\t}\n\t\tif got := buf.String(); !strings.Contains(got, testAssetName) {\n\t\t\tr.Errorf(\"listAssets got\\n----\\n%v\\n----\\nWant to contain:\\n----\\n%v\\n----\\n\", got, testAssetName)\n\t\t}\n\t})\n\tbuf.Reset()\n\n\t// Get the asset.\n\ttestutil.Retry(t, 3, 2*time.Second, func(r *testutil.R) {\n\t\ttestAssetName := fmt.Sprintf(\"projects/%s/locations/%s/assets/%s\", tc.ProjectID, location, testAssetID)\n\t\tif err := getAsset(buf, tc.ProjectID, location, testAssetID); err != nil {\n\t\t\tr.Errorf(\"getAsset got err: %v\", err)\n\t\t}\n\t\tif got := buf.String(); !strings.Contains(got, testAssetName) {\n\t\t\tr.Errorf(\"getAsset got\\n----\\n%v\\n----\\nWant to contain:\\n----\\n%v\\n----\\n\", got, testAssetName)\n\t\t}\n\t})\n\tbuf.Reset()\n\n\t// Delete the asset.\n\ttestutil.Retry(t, 3, 2*time.Second, func(r *testutil.R) {\n\t\tif err := deleteAsset(buf, tc.ProjectID, location, testAssetID); err != nil {\n\t\t\tr.Errorf(\"deleteAsset got err: %v\", err)\n\t\t}\n\t\tif got := buf.String(); !strings.Contains(got, deleteAssetResponse) {\n\t\t\tr.Errorf(\"deleteAsset got\\n----\\n%v\\n----\\nWant to contain:\\n----\\n%v\\n----\\n\", got, deleteAssetResponse)\n\t\t}\n\t})\n\tbuf.Reset()\n\tt.Logf(\"\\nTestAssets() completed\\n\")\n}", "func (api *RestAPI) GetRelease(epicID string) ([]ReleaseItem, error) {\n\tresults := []ReleaseItem{}\n\tissue, err := api.getIssue(epicID)\n\tif err != nil {\n\t\treturn results, err\n\t}\n\n\tscanner := bufio.NewScanner(strings.NewReader(issue.Fields.Description.(string)))\n\tfor scanner.Scan() {\n\t\tline := strings.ToLower(scanner.Text())\n\t\tif strings.Contains(line, \"/app#/projects\") {\n\t\t\tparts := strings.Split(line, \"/\")\n\t\t\tresults = append(results, ReleaseItem{Project: parts[5], Version: parts[7]})\n\t\t}\n\t}\n\treturn results, nil\n}", "func githubLatestAssets(repo string) (string, []githubAsset, error) {\n\turlstr := \"https://api.github.com/repos/\" + repo + \"/releases/latest\"\n\n\t// create request\n\treq, err := http.NewRequest(\"GET\", urlstr, nil)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\t// do request\n\tcl := &http.Client{}\n\tres, err := cl.Do(req)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tdefer res.Body.Close()\n\n\tvar release struct {\n\t\tName string `json:\"name\"`\n\t\tAssets []githubAsset `json:\"assets\"`\n\t}\n\tif err := json.NewDecoder(res.Body).Decode(&release); err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\treturn release.Name, release.Assets, nil\n}", "func (c *Client) AllReleases() ([]db.Release, error) {\n\tnames, err := c.names()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tuiprogress.Start()\n\tbar := uiprogress.AddBar(len(names))\n\tbar.PrependFunc(func(b *uiprogress.Bar) string {\n\t\trate := float64(b.Current()) / b.TimeElapsed().Seconds()\n\t\tremainingCount := b.Total - b.Current()\n\t\tremainingTime := time.Duration(float64(remainingCount)/rate) * time.Second\n\n\t\treturn fmt.Sprintf(\n\t\t\t\"%v left (%.f/s)\",\n\t\t\tremainingTime,\n\t\t\trate,\n\t\t)\n\t})\n\treleases := make(chan db.Release)\n\tc.addReleases(names, releases, bar)\n\tclose(releases)\n\treturn releaseChanToSlice(releases), nil\n}", "func (*ListAssetsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_asset_v1_asset_service_proto_rawDescGZIP(), []int{3}\n}", "func (x SyntheticMonitorEntity) GetAssets() []SyntheticsSyntheticMonitorAsset {\n\treturn x.Assets\n}", "func (n Node) bundledAssets(suffix string) (bytes.Buffer, error) {\n\tvar b bytes.Buffer\n\n\tfiles, err := filepath.Glob(filepath.Join(n.path, \"*.\"+suffix))\n\tif err != nil {\n\t\treturn b, err\n\t}\n\tif len(files) == 0 {\n\t\treturn b, fmt.Errorf(\"no .%s assets in path %s\", suffix, n.path)\n\t}\n\n\tfor _, f := range files {\n\t\tc, err := ioutil.ReadFile(f)\n\t\tif err != nil {\n\t\t\treturn b, err\n\t\t}\n\t\tb.Write(c)\n\t}\n\treturn b, nil\n}", "func (api *licenseAPI) List(ctx context.Context, opts *api.ListWatchOptions) ([]*License, error) {\n\tvar objlist []*License\n\tobjs, err := api.ct.List(\"License\", ctx, opts)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, obj := range objs {\n\t\tswitch tp := obj.(type) {\n\t\tcase *License:\n\t\t\teobj := obj.(*License)\n\t\t\tobjlist = append(objlist, eobj)\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Got invalid object type %v while looking for License\", tp)\n\t\t}\n\t}\n\n\treturn objlist, nil\n}" ]
[ "0.6093649", "0.6046222", "0.59944326", "0.59482193", "0.59288824", "0.58725035", "0.5847745", "0.5836617", "0.5797065", "0.5786176", "0.56488395", "0.5619454", "0.5564959", "0.55334187", "0.55060554", "0.55030614", "0.5474657", "0.5451275", "0.5429421", "0.5429421", "0.5406295", "0.53806317", "0.537906", "0.53558564", "0.5352642", "0.534725", "0.5295007", "0.52913266", "0.5241822", "0.5205474", "0.5195217", "0.51909876", "0.51652867", "0.5140375", "0.5129053", "0.5118103", "0.51154244", "0.5088738", "0.50770193", "0.50147176", "0.5013716", "0.5003745", "0.5000244", "0.4982303", "0.49815413", "0.49522084", "0.49482363", "0.49404442", "0.49137935", "0.4906011", "0.48906827", "0.48879662", "0.4880456", "0.48707545", "0.48685378", "0.4863946", "0.48483688", "0.4847425", "0.48381075", "0.4827649", "0.4826882", "0.4818904", "0.48153716", "0.4802901", "0.478812", "0.47866273", "0.47762328", "0.47714424", "0.47623935", "0.47598225", "0.4745174", "0.47450766", "0.47413683", "0.4732762", "0.472315", "0.4721168", "0.47205603", "0.47115025", "0.4701954", "0.46997878", "0.46938914", "0.46924338", "0.46869245", "0.46818835", "0.468065", "0.4680072", "0.46737957", "0.46616748", "0.46565196", "0.46553206", "0.46439207", "0.4629752", "0.4629206", "0.4618484", "0.4612731", "0.46061137", "0.4600307", "0.45954624", "0.45901498", "0.4585719" ]
0.77932364
0
set up the database connection
func init() { //load in environment variables from .env //will print error message when running from docker image //because env file is passed into docker run command envErr := godotenv.Load("/home/ubuntu/go/src/github.com/200106-uta-go/BAM-P2/.env") if envErr != nil { if !strings.Contains(envErr.Error(), "no such file or directory") { log.Println("Error loading .env: ", envErr) } } var server = os.Getenv("DB_SERVER") var dbPort = os.Getenv("DB_PORT") var dbUser = os.Getenv("DB_USER") var dbPass = os.Getenv("DB_PASS") var db = os.Getenv("DB_NAME") // Build connection string connString := fmt.Sprintf("server=%s;user id=%s;password=%s;port=%s;database=%s;", server, dbUser, dbPass, dbPort, db) // Create connection pool var err error database, err = sql.Open("sqlserver", connString) if err != nil { log.Fatal("Error creating connection pool: ", err.Error()) } ctx := context.Background() err = database.PingContext(ctx) httputil.GenericErrHandler("error", err) //create user table if it doesn't exist statement, err := database.Prepare(`IF NOT EXISTS (SELECT * FROM sysobjects WHERE name='user_table' and xtype='U') CREATE TABLE user_table (id INT NOT NULL IDENTITY(1,1) PRIMARY KEY, username VARCHAR(255), password VARCHAR(255))`) if err != nil { log.Fatal(err) } _, err = statement.Exec() if err != nil { log.Fatalln(err) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *syncer) dbSetup() error {\n\ts.logger.Info(\"Connecting to the database\")\n\n\tsqlDB, err := sql.Open(\"sqlite3_with_fk\", s.config.Database)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Setup the DB struct\n\ts.db = sqlDB\n\n\t// We don't want multiple clients during setup\n\ts.db.SetMaxOpenConns(1)\n\n\t// Test the connection\n\terr = s.db.Ping()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create the DB schema (if needed)\n\t_, err = s.db.Exec(schema)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Set the connection limit for the DB pool\n\ts.db.SetMaxOpenConns(10)\n\n\treturn nil\n}", "func SetupDB() {\n\n\t// sqlite \"gorm.io/driver/sqlite\"\n\t//database, err := gorm.Open(sqlite.Open(\"database.db\"), &gorm.Config{})\n\n\t// mysql \"gorm.io/driver/mysql\"\n\t// dsn := \"root:@tcp(127.0.0.1:3306)/cmgostock?charset=utf8mb4&parseTime=True&loc=Local\"\n\t// database, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})\n\n\t// postgresql \t\"gorm.io/driver/postgres\"\n\tdsn := \"host=10.82.69.121 user=postgres password=1234 dbname=cmgostock port=5432 sslmode=disable TimeZone=Asia/Bangkok\"\n\t//dsn := \"host=localhost user=postgres password=1234 dbname=cmgostock port=5432 sslmode=disable TimeZone=Asia/Bangkok\"\n\t database, err := gorm.Open(postgres.Open(dsn), &gorm.Config{})\n\tif err != nil {\n\t\tpanic(\"failed to connect database\")\n\t}\n\n\tdatabase.AutoMigrate(&model.User{})\n\tdatabase.AutoMigrate(&model.Product{})\n\tdatabase.AutoMigrate(&model.Transaction{})\n\n\tdb = database\n}", "func init() {\n\tuser := \"root\"\n\tpass := \"pwpw\"\n\tname := \"itemsDB\"\n\n\tdbconf := user + \":\" + pass + \"@/\" + name\n\tconn, err := sql.Open(\"mysql\", dbconf)\n\tif err != nil {\n\t\tpanic(err.Error)\n\t}\n\tConn = conn\n}", "func init() {\n\tlog.Info(\"Initializing database\")\n\tpsqlInfo := fmt.Sprintf(\"host=%s port=%s user=%s \"+\n\t\t\"password=%s dbname=%s sslmode=disable\",\n\t\tconfig.Config().GetString(\"database.host\"),\n\t\tconfig.Config().GetString(\"database.port\"),\n\t\tconfig.Config().GetString(\"database.user\"),\n\t\tconfig.Config().GetString(\"database.password\"),\n\t\tconfig.Config().GetString(\"database.name\"))\n\tdb, err := sql.Open(\"postgres\", psqlInfo)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\terr = db.Ping()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlog.Info(\"Successfully connected to database!\")\n}", "func initDBConnection(dbUser, dbPass, dbURL, dbNAME string) (err error) {\n\t/*\n\t\tVariables defined here\n\t*/\n\tvar user, pass, url, name string\n\n\t/*\n\t\tverify that all variables exists\n\t*/\n\tif len(dbUser) == 0 || len(dbURL) == 0 || len(dbPass) == 0 || len(dbNAME) == 0 {\n\t\terr = errors.New(\"Missing DB Credentails. Please Check\")\n\t\treturn\n\t}\n\n\t/*\n\t\tverify the varibles and set values after remove spaces\n\t*/\n\tif len(dbUser) > 0 && len(dbPass) > 0 && len(dbURL) > 0 && len(dbNAME) > 0 {\n\t\tuser = strings.TrimSpace(dbUser)\n\t\tpass = strings.TrimSpace(dbPass)\n\t\turl = strings.TrimSpace(dbURL)\n\t\tname = strings.TrimSpace(dbNAME)\n\t}\n\n\t/*\n\t\tPrepares the connection string\n\t*/\n\tconnString := fmt.Sprintf(\"postgres://%s:%s@%s/%s?sslmode=require\", user, pass, url, name)\n\tfmt.Printf(\"connecting to database: %s\\n\", url)\n\n\t/*\n\t\tconnects the database with the provided values, in case of any issue error will be raise\n\t*/\n\tdb, err = sql.Open(\"postgres\", connString)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Database refused connection: %s\", err.Error())\n\t\treturn\n\t}\n\n\treturn\n}", "func init() {\n\tRepoCreateDatabaseConnection(DatabaseConnection{Name: \"Write presentation\"})\n\tRepoCreateDatabaseConnection(DatabaseConnection{Name: \"Host meetup\"})\n}", "func init() {\n\n\tvar err error\n db, err = sql.Open(\"postgres\",\"user=snake dbname=snake_game sslmode=disable port=26257\")\n\t\n\tif err != nil {\n\t\tlog.Fatal(\"error connecting to the database: \", err, nil)\n\t}\n\n if err != nil {\n\t\tlog.Fatal(\"error connecting to the database: \", err, nil)\n\t}\n}", "func connect() {\n\tconn, err := http.NewConnection(http.ConnectionConfig{\n\t\tEndpoints: []string{hlp.Conf.DB.URL},\n\t})\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to create HTTP connection: %v\", err)\n\t}\n\n\tclient, err := driver.NewClient(driver.ClientConfig{\n\t\tConnection: conn,\n\t\tAuthentication: driver.BasicAuthentication(\n\t\t\thlp.Conf.DB.User,\n\t\t\thlp.Conf.DB.Pass,\n\t\t),\n\t})\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to create new client: %v\", err)\n\t}\n\n\tctx := context.Background()\n\tdb, err := client.Database(ctx, \"cardo_dev\")\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to open database: %v\", err)\n\t}\n\n\tDatabase = db\n}", "func initDatabase() {\n\n\tpsqlInfo := fmt.Sprintf(\"host=%s port=%d user=%s password=%s dbname=%s sslmode=disable\", host, port, user, password, dbname)\n\n\tvar err error\n\tdbClient, err = sqlx.Open(\"postgres\", psqlInfo)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer dbClient.Close()\n\n\terr = dbClient.Ping()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(\"Successfully connected!\")\n}", "func (db *Database) init() (*gorm.DB, error) {\n\tvar connection, err = gorm.Open(db.Driver, db.getURI())\n\tif err != nil {\n\t\tfmt.Printf(\"✖ Cannot connect to %s database\\n\", db.Driver)\n\t\tlog.Fatal(\"This is the error:\", err)\n\t} else {\n\t\tfmt.Printf(\"⚡ We are connected to the %s database\\n\", db.Driver)\n\t}\n\treturn connection, err\n}", "func setup() *gorm.DB {\n\tenv := config.GetEnvironments()\n\tdsn := fmt.Sprintf(\"host=%s user=%s password=%s dbname=%s sslmode=disable port=5432\", env.PostgresqlHost, env.PostgresqlUsername, env.PostgresqlPassword, env.PostgresqlDatabase)\n\tdb, err := gorm.Open(postgres.Open(dsn), &gorm.Config{})\n\n\tif err != nil {\n\t\tpanic(\"Falied to connect to database err: \" + err.Error())\n\t}\n\tconfig.Green(\"Connected to database have been established\")\n\n\terr = db.AutoMigrate(\n\t\t&models.Location{},\n\t\t&models.Company{},\n\t\t&models.Department{},\n\t\t&models.Employee{},\n\t)\n\n\tif err != nil {\n\t\tlog.Fatalln(err.Error())\n\t}\n\n\tdb.Model(&models.Department{}).Association(\"Location\")\n\tdb.Model(&models.Employee{}).Association(\"Company\")\n\t// db.Model(&models.Employee{}).Association(\"\")\n\n\tif err != nil {\n\t\tlog.Fatalln(err.Error())\n\t}\n\n\treturn db\n}", "func InitDBConnection() *Database {\n\thost := viper.GetString(\"db.host\")\n\tuser := viper.GetString(\"db.user\")\n\tdbname := viper.GetString(\"db.dbname\")\n\tdbConfig := fmt.Sprintf(\"host=%s user=%s dbname=%s sslmode=disable\", host, user, dbname)\n\tdb, err := gorm.Open(\"postgres\", dbConfig)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"Failed to initiate a connection to the database: %s\", err))\n\t}\n\n\tfmt.Println(\"Migrating database\")\n\tdb.AutoMigrate(&User{}, &Organization{}, &Restaurant{}, &Menu{}, &Activity{}, &OrderItem{})\n\n\treturn &Database{db}\n}", "func InitDatabase(dsn string) error {\n\tfmt.Println(\"Init db connection\")\n\t// config := mysql.NewConfig()\n\t// config.User = username\n\t// config.Passwd = password\n\t// config.Net = protocol\n\t// config.Addr = host\n\t// config.DBName = database\n\t// config.Params = map[string]string{\n\t// \t\"charset\": charset,\n\t// \t\"parseTime\": \"True\",\n\t// }\n\tdb, err := gorm.Open(\"sqlite3\", dsn)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn err\n\t}\n\tDbConn = db\n\treturn nil\n}", "func SetupDatabase() {\n\tvar err error\n\tDbConn, err = sql.Open(\"mysql\", \"root:Passw0rd123!@tcp(127.0.0.1:3306)/inventorydb\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tDbConn.SetMaxOpenConns(3)\n\tDbConn.SetMaxIdleConns(3)\n\tDbConn.SetConnMaxLifetime(60 * time.Second)\n}", "func InitDatabase() *sql.DB {\n\tlog.Println(\"connecting database.\")\n\n\tquery := url.Values{}\n\tquery.Add(\"database\", \"Company\")\n\n\tu := &url.URL{\n\t\tScheme: \"sqlserver\",\n\t\tUser: url.UserPassword(\"sa\", \"1234\"),\n\t\tHost: fmt.Sprintf(\"%s:%d\", \"localhost\", 1433),\n\t\t// Path: instance, // if connecting to an instance instead of a port\n\t\tRawQuery: query.Encode(),\n\t}\n\n\tlog.Println(u.String())\n\n\tcondb, err := sql.Open(\"sqlserver\", u.String())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlog.Println(\"test ping database.\")\n\tif err = condb.Ping(); err != nil {\n\t\tpanic(err)\n\t}\n\treturn condb\n}", "func init() {\n\t_ = godotenv.Load()\n\n\thostname := os.Getenv(\"HOST\")\n\tdbname := os.Getenv(\"DBNAME\")\n\tusername := os.Getenv(\"DBUSER\")\n\tpassword := os.Getenv(\"PASSWORD\")\n\n\tdbString := \"host=\" + hostname + \" user=\" + username + \" dbname=\" + dbname + \" sslmode=disable password=\" + password\n\n\tvar err error\n\tdb, err = gorm.Open(\"postgres\", dbString)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\tpanic(\"Unable to connect to DB\")\n\t}\n\n\tdb.AutoMigrate(&QuestionModel{})\n\tdb.AutoMigrate(&AnswerModel{})\n\tdb.AutoMigrate(&UserModel{})\n\tdb.AutoMigrate(&Cohort{})\n}", "func Setup() (*sql.DB, error) {\n\n\tdbUser := config.GetString(\"database.user\")\n\tdbPasswd := config.GetString(\"database.passwd\")\n\tdbHost := config.GetString(\"database.host\")\n\tdbName := config.GetString(\"database.name\")\n\tdbConnection := config.GetString(\"database.connection\")\n\tconnectionString := dbUser + \":\" + dbPasswd + \"@\" + dbConnection + \"(\" + dbHost + \")\" + \"/\" + dbName + \"?charset=utf8\"\n\n\tdb, err := sql.Open(\"mysql\", connectionString)\n\n\tif err != nil {\n\t\tprint(\"sql open:\" + err.Error())\n\t\treturn nil, err\n\t}\n\n\t// Ping the database once since Open() doesn't open a connection\n\terr = db.Ping()\n\tif err != nil {\n\t\tprint(\"do.ping:\" + err.Error())\n\t\treturn nil, err\n\t}\n\n\tDatabase = db\n\treturn Database, nil\n}", "func init() {\n\tdbinfo := fmt.Sprintf(\"user=%s password=%s dbname=%s sslmode=disable\",\n\t\tos.Getenv(\"POSTGRES_USER\"), os.Getenv(\"POSTGRES_PASSWORD\"), DATABASE_NAME)\n\tdb, err := sql.Open(\"postgres\", dbinfo)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tDB = db\n}", "func init() {\n\tconfig.Read()\n\n\tdao.Server = config.Server\n\tdao.Database = config.Database\n\tdao.Connect()\n}", "func init() {\n\tconfig.Read()\n\n\tdao.Server = config.Server\n\tdao.Database = config.Database\n\tdao.Connect()\n}", "func Setup() {\n\tnow := time.Now()\n\tvar err error\n\tfmt.Print(setting.FileConfigSetting.Database)\n\tconnectionstring := fmt.Sprintf(\n\t\t\"user=%s password=%s dbname=%s sslmode=disable host=%s port=%s\",\n\t\tsetting.FileConfigSetting.Database.User,\n\t\tsetting.FileConfigSetting.Database.Password,\n\t\tsetting.FileConfigSetting.Database.Name,\n\t\tsetting.FileConfigSetting.Database.Host,\n\t\tsetting.FileConfigSetting.Database.Port)\n\tfmt.Printf(\"%s\", connectionstring)\n\tConn, err = gorm.Open(setting.FileConfigSetting.Database.Type, connectionstring)\n\tif err != nil {\n\t\tlog.Printf(\"connection.setup err : %v\", err)\n\t\tpanic(err)\n\t}\n\tgorm.DefaultTableNameHandler = func(db *gorm.DB, defaultTableName string) string {\n\t\treturn setting.FileConfigSetting.Database.TablePrefix + defaultTableName\n\t}\n\tConn.SingularTable(true)\n\tConn.DB().SetMaxIdleConns(10)\n\tConn.DB().SetMaxOpenConns(100)\n\n\tgo autoMigrate()\n\n\ttimeSpent := time.Since(now)\n\tlog.Printf(\"Config database is ready in %v\", timeSpent)\n\n}", "func setupDB() error {\n\t// get parameters and initialize the database\n\tdbDriver := os.Getenv(\"MESSAGES_DB_DRIVER\")\n\tdbName := os.Getenv(\"MESSAGES_DB_NAME\")\n\tif dbDriver == \"\" || dbName == \"\" {\n\t\treturn errors.New(\"main: db driver or db name not set\")\n\t}\n\n\tif err := db.InitDB(dbDriver, dbName); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func InitializeDb() {\n\tdbPort, err := strconv.Atoi(os.Getenv(\"DB_PORT\"))\n\tif err != nil {\n\t\tlog.Fatal(\"Database port is not valid\")\n\t}\n\n\tdbConnString := fmt.Sprintf(\n\t\t\"host=%s port=%d user=%s password=%s dbname=%s sslmode=disable\",\n\t\tos.Getenv(\"DB_HOST\"),\n\t\tdbPort,\n\t\tos.Getenv(\"DB_USER\"),\n\t\tos.Getenv(\"DB_PASS\"),\n\t\tos.Getenv(\"DB_NAME\"),\n\t)\n\n\tDB, err = sql.Open(\"postgres\", dbConnString)\n\tif err != nil {\n\t\tlog.Fatal(\"Could not connect to db- \", err)\n\t}\n}", "func init() {\n\n\tvar err error\n\tdatabase, err = sql.Open(\"mysql\", config.MySQLToFormatDNS())\n\tif err != nil {\n\t\tlog.Fatal(\"==> Error in library/mysql: \" + err.Error())\n\t}\n\n\tdatabase.SetMaxOpenConns(20)\n\tdatabase.SetMaxIdleConns(20)\n\n}", "func setup() {\n\tvar err error\n\tmysql := config.MySQLConfig()\n\tdb, err = gorm.Open(\"mysql\", fmt.Sprintf(\"%s:%s@tcp(%s:%d)/%s?charset=utf8&parseTime=True&loc=Local\",\n\t\tmysql.Username,\n\t\tmysql.Password,\n\t\tmysql.Host,\n\t\tmysql.Port,\n\t\tmysql.DbName))\n\n\tif err != nil {\n\t\tlog.Fatalf(\"models.Setup err: %v\", err)\n\t}\n\tif server := config.ServerConfig(); server.Debug {\n\t\tdb.LogMode(true)\n\t}\n\n\tdb.AutoMigrate(Admin{}, Permission{}, User{}, Role{}, News{}, Site{}, Message{})\n\n\tdb.SingularTable(false)\n\tdb.DB().SetMaxIdleConns(10)\n\tdb.DB().SetMaxOpenConns(100)\n}", "func initDb(username, password, endpoint, port, database string) (*sql.DB, error) {\n\t// Create url for connection\n\turl := fmt.Sprintf(\"%s:%s@tcp(%s:%s)/%s?parseTime=true\", username, password, endpoint, port, database)\n\n\t// Open connection to SQL DB\n\tdb, err := sql.Open(\"mysql\", url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Test database connection\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db, err\n}", "func Initialize() {\n\tdatabaseHost := os.Getenv(\"DB_HOST\")\n\tdatabasePort := os.Getenv(\"DB_PORT\")\n\tdatabaseUser := os.Getenv(\"DB_USER\")\n\tdatabasePass := os.Getenv(\"DB_PASS\")\n\tdatabaseName := os.Getenv(\"DB_NAME\")\n\n\tpostgresConnectionURL := fmt.Sprintf(\"postgres://%s:%s@%s:%s/%s?sslmode=disable\", databaseUser, databasePass, databaseHost, databasePort, databaseName)\n\n\tvar err error\n\tdb, err = sql.Open(\"postgres\", postgresConnectionURL)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t//defer db.Close()\n\n\terr = db.Ping()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tmaxOpenConn, err := strconv.Atoi(os.Getenv(\"DB_MAX_OPEN_CONN\"))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tmaxIdleConn, err := strconv.Atoi(os.Getenv(\"DB_MAX_IDLE_CONN\"))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdb.SetMaxOpenConns(maxOpenConn)\n\tdb.SetMaxIdleConns(maxIdleConn)\n\n\tfmt.Println(\"Database connected!\")\n\n}", "func setupDB() *sql.DB {\n\turi := os.Getenv(\"BROKER_DB\")\n\tdb, err := sql.Open(\"postgres\", uri)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tlog.Fatal(\"Unable to establish database connection.\", err)\n\t}\n\n\thour, _ := time.ParseDuration(\"1h\")\n\tdb.SetConnMaxLifetime(hour)\n\tdb.SetMaxIdleConns(4)\n\tdb.SetMaxOpenConns(20)\n\treturn db\n}", "func init() {\n\tlog.Info(\"mysql 链接中。。。\")\n\tvar v dbInfo\n\tv.UserName = \"root\"\n\tv.Port = 3306\n\tv.UserPassword = \"gogocuri\"\n\tv.DbName = \"wanqu2\"\n\tv.Address = \"192.168.0.162\"\n\n\tvar err error\n\tdb, err = gorm.Open(\"mysql\", fmt.Sprintf(\"%s:%s@tcp(%s:%d)/%s?charset=utf8&parseTime=True&loc=Local\",\n\t\tv.UserName, v.UserPassword, v.Address, v.Port, v.DbName))\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to connect database:\", err)\n\t}\n\t// 关闭复数表名,如果设置为true,`User`表的表名就会是`user`,而不是`users`\n\tdb.SingularTable(true)\n\t// 启用Logger,显示详细日志\n\tdb.LogMode(true)\n\t//自定义日志\n\tdb.SetLogger(log.NewGormLogger())\n\t//连接池\n\tdb.DB().SetMaxIdleConns(50)\n\tdb.DB().SetMaxOpenConns(200)\n\tlog.Info(\"mysql 链接成功\")\n}", "func initializeDB() *gorm.DB {\n\t// load Env Variables\n\tHOST := os.Getenv(\"HOST\")\n\tDB_PORT := os.Getenv(\"DB_PORT\")\n\tUSER := os.Getenv(\"USER\")\n\tNAME := os.Getenv(\"NAME\")\n\tPASSWORD := os.Getenv(\"PASSWORD\")\n\n\t// Data connection string\n\tDB_URI := fmt.Sprintf(\"host=%s user=%s dbname=%s sslmode=disable password=%s port=%s\", HOST, USER, NAME, PASSWORD, DB_PORT)\n\t\n\t// Open DB\n\tdb, err := gorm.Open(postgres.Open(DB_URI), &gorm.Config{})\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t} else {\n\t\tfmt.Println(\"DB Connected successfully\")\n\t}\n\n\tdb.AutoMigrate(&models.Person{})\n\tdb.AutoMigrate(&models.Book{})\n\n\treturn db\n}", "func SetupConnection(config config.Database) {\n\tlogger := log.GetLogger()\n\n\tdsn := os.Getenv(\"LAUNDRY_DSN\")\n\tif dsn == \"\" {\n\t\tdsn = fmt.Sprintf(\"%s:%s@tcp(%s:%d)/%s?parseTime=1\", config.Username, config.Password, config.Host, config.Port, config.Database)\n\t}\n\n\tfor i := 1; i <= config.PoolSize; i++ {\n\t\tvar dbConn dbConnection\n\n\t\tfor j := config.RetryCount; j >= 0; j-- {\n\t\t\tlogger.Info(\"connection to databas\")\n\n\t\t\tdb, err := sql.Open(\"mysql\", dsn)\n\n\t\t\tif err != nil {\n\t\t\t\tlogger.Warnf(\"could not connect att attempt %d: %s\", j, err)\n\t\t\t\ttime.Sleep(time.Second * 5)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdbConn.simpleDb = db\n\t\t\tdbConn.db = sqlx.NewDb(db, \"mysql\")\n\t\t}\n\n\t\tgo monitorConnection(dbConn)\n\n\t\tconnectionPool = append(connectionPool, dbConn)\n\t}\n}", "func initializeMysqlConn() {\n\tdbConn, err := sql.Open(\"mysql\", \"admin:admin@tcp(y2search_mysql:3306)/y2search_db?collation=utf8mb4_unicode_ci\")\n\tdb = *dbConn\n\tif err != nil {\n\t\tlog.Panic(err.Error()) // Just for example purpose. You should use proper error handling instead of panic\n\t}\n\n\t// Open doesn't open a connection. Validate DSN data:\n\terr = db.Ping()\n\tif err != nil {\n\t\tlog.Panic(err.Error()) // proper error handling instead of panic in your app\n\t}\n}", "func SetupDatabaseConnection() *sql.DB {\n\tdbURL, err := pq.ParseURL(os.Getenv(\"DATABASE_URL\"))\n\tconfig.LogFatal(err)\n\n\tdb, err = sql.Open(\"postgres\", dbURL)\n\tconfig.LogFatal(err)\n\n\terr = db.Ping()\n\tconfig.LogFatal(err)\n\n\treturn db\n}", "func init() {\n\tconnection = \"MySql\"\n}", "func DBInit(conStr string) {\n\tif db == nil {\n\t\tvar err error\n\t\tdbConnection, err := gorm.Open(\"mysql\", conStr+\"?charset=utf8&parseTime=True&loc=Local\")\n\t\t// db connection will be closed if there's no request for a while\n\t\t// which would cause 500 error when a new request comes.\n\t\t// diable pool here to avoid this problem.\n\t\tdbConnection.DB().SetMaxIdleConns(0)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err.Error(),\n\t\t\t}).Fatal(\"Faile to create db connection pool\")\n\t\t} else {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"message\": dbConnection.GetErrors(),\n\t\t\t\t\"db\": conStr,\n\t\t\t}).Info(\"connected to mysql\")\n\t\t}\n\t\tdb = &DB{dbConnection}\n\t}\n\tdb.dbConnect.SetLogger(log.StandardLogger())\n\t// db.Debug message will be logged be logrug with Info level\n\tdb.dbConnect.Debug().AutoMigrate(&Todo{})\n}", "func initDB() {\n\tvar err error\n\tpsqlInfo := fmt.Sprintf(\"host=%s port=%d user=%s \"+\n\t\t\"password=%s dbname=%s sslmode=disable\",\n\t\thost, port, user, password, dbname)\n\tdb, err = sql.Open(\"postgres\", psqlInfo)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}", "func Setup() {\n\tdao.Server = \"mongodb://\" + os.Getenv(\"MONGO_USER\") + \":\" + os.Getenv(\"MONGO_PWORD\") + \"@ds125469.mlab.com:25469/newsy_db\"\n\tdao.Database = \"newsy_db\"\n}", "func connect_db() {\n\tdb, err = sql.Open(\"mysql\", \"root:jadir123@tcp(127.0.0.1:3306)/go_db\")\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\terr = db.Ping()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}", "func initDB(options Options) (*mgo.Session, error) {\n\tdialInfo := &mgo.DialInfo{\n\t\tAddrs: strings.Split(options.DBHost, \",\"),\n\t\tDatabase: options.DBName,\n\t\tUsername: options.DBUser,\n\t\tPassword: options.DBPassword,\n\t\tDialServer: func(addr *mgo.ServerAddr) (net.Conn, error) {\n\t\t\treturn tls.Dial(\"tcp\", addr.String(), &tls.Config{InsecureSkipVerify: true})\n\t\t},\n\t\tReplicaSetName: \"rs0\",\n\t\tTimeout: time.Second * 10,\n\t}\n\n\tif !options.SSL {\n\t\tdialInfo.ReplicaSetName = \"\"\n\t\tdialInfo.DialServer = nil\n\t}\n\t// connect to the database\n\tsession, err := mgo.DialWithInfo(dialInfo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn session, err\n}", "func init() {\n\tconfig.Read()\n\tdao.DialInfo = &mgo.DialInfo{\n\t\tAddrs: []string{config.Server},\n\t\tDatabase: config.Database,\n\t\tUsername: config.Username,\n\t\tPassword: config.Password,\n\t}\n\n\tdao.Server = config.Server\n\tdao.Database = config.Database\n\tdao.Connect()\n\n}", "func Dbcon() (db *sql.DB, err error) {\n\tdb, err = sql.Open(\"mysql\", \"mremmalex:password@tcp(localhost:3306)/backendtest\")\n\treturn db, err\n}", "func Setup() {\n\tvar err error\n\tdb, err = gorm.Open(mysql.Open(fmt.Sprintf(\"%s:%s@tcp(%s)/%s?charset=utf8&parseTime=True&loc=Local\",\n\t\tconfig.DatabaseConfiguration.User,\n\t\tconfig.DatabaseConfiguration.Password,\n\t\tconfig.DatabaseConfiguration.Host,\n\t\tconfig.DatabaseConfiguration.Name)), &gorm.Config{\n\t\tNamingStrategy: schema.NamingStrategy{\n\t\t\tSingularTable: true,\n\t\t},\n\t})\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to connect to database. err: %v\", err)\n\t}\n\n\tif err = migrateDatabase(); err != nil {\n\t\tlog.Fatalf(\"failed to migrate database. err: %v\", err)\n\t}\n}", "func SetupDatabase() {\n\tu := helper.GetEnv(\"DATABASE_USER\", \"golang\")\n\tp := helper.GetEnv(\"DATABSE_PASSWORD\", \"golang\")\n\th := helper.GetEnv(\"DATABASE_HOST\", \"localhost:3306\")\n\tn := helper.GetEnv(\"DATABASE_NAME\", \"go_test\")\n\tq := \"charset=utf8mb4&parseTime=True&loc=Local\"\n\n\t// Assemble the connection string.\n\tdsn := fmt.Sprintf(\"%s:%s@tcp(%s)/%s?%s\", u, p, h, n, q)\n\n\t// Connect to the database.\n\tdb, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})\n\n\t// Migrate the schema\n\tdb.AutoMigrate(&User{})\n\n\tif err != nil {\n\t\tpanic(\"Could not open database connection\")\n\t}\n\n\tDB = db\n}", "func init() {\n\tvar err error\n\tDB, err = gorm.Open(config.MysqlConf.DriverName, config.MysqlConf.Conn)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tDB.DB().SetMaxOpenConns(config.MysqlConf.MaxOpenConns)\n\tDB.DB().SetMaxIdleConns(config.MysqlConf.MaxIdleConns)\n\tDB.DB().SetConnMaxLifetime(time.Duration(config.MysqlConf.ConnMaxLifetime))\n\t//DB.LogMode(true)\n}", "func init() {\r\n\tCandidates = ConnectDB(\"Candidates\")\r\n}", "func (w *DBInstance) connect() (req DBRequest, err error) {\n\treq.db, err = gorm.Open(\"mysql\", fmt.Sprintf(\"%v:%v@/%v?charset=utf8&parseTime=True&loc=Local\", w.sqlUser, w.sqlPass, w.sqlDBName))\n\treturn\n}", "func init() {\n\t// Open a connection to GORM\n\tdb, err := gorm.Open(\"sqlite3\", \"shop.db\")\n\tif err != nil {\n\t\tpanic(\"Failed to connect database\")\n\t}\n\n\tDB = db\n\n\tDB.AutoMigrate(models.Supply{})\n}", "func (sql *SqlConnection) InitDB() error {\n\n\tvar err error\n\n\t// open a db connection //\n\tsql.Db, err = gorm.Open(\"sqlite3\", \"/var/tmp/tennis.db\")\n\tif err != nil {\n\t\tfmt.Println(\"Failed to connect database : \", err.Error())\n\t}\n\tsql.Db.LogMode(true)\n\n\treturn err\n}", "func dbInit() {\n\t//User Input\n\tusernm := creds.UserName\n\tpass := creds.Password\n\tDBName := creds.DBName\n\tlit.Debug(\"Hit dbInit \" + DBName)\n\tlog.Println(usernm + \":\" + pass + \"@tcp(127.0.0.1:3306)/\")\n\n\tdb, err := sql.Open(\"mysql\", usernm+\":\"+pass+\"@tcp(127.0.0.1:3306)/\")\n\terr = db.Ping() //Need to ping to generate connection and trigger err\n\tif err != nil {\n\t\tlit.Error(\"Error in Init Log-in\")\n\t\tcreds = getCreds()\n\t\tfile, _ := json.MarshalIndent(creds, \"\", \"\\t\")\n\t\t_ = ioutil.WriteFile(\"configs/creds.json\", file, 0644)\n\t} else {\n\t\tlit.Debug(\"Attempt DB Creation\")\n\t\t_, err = db.Exec(\"CREATE DATABASE \" + DBName)\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t} else {\n\t\t\tlog.Println(\"Database Created:\", \"\\\"\"+DBName+\"\\\"\")\n\t\t}\n\t\tdb.Exec(\"USE \" + DBName)\n\t\tstmt, err := db.Prepare(\"CREATE TABLE `employee` (`id` int(6) unsigned NOT NULL AUTO_INCREMENT,`name` varchar(30) NOT NULL,`city` varchar(30) NOT NULL,PRIMARY KEY (`id`));\")\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t} else {\n\t\t\t_, err = stmt.Exec()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err.Error())\n\t\t\t} else {\n\t\t\t\tlog.Println(\"Table Created\", \"\\\"\"+\"employees\"+\"\\\"\")\n\t\t\t}\n\t\t}\n\t}\n}", "func SetupDatabase(readerConfig, writerConfig Option) (reader, writer *sql.DB, err error) {\n\treader, err = createConnection(readerConfig)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\twriter, err = createConnection(writerConfig)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn reader, writer, nil\n}", "func InitDB(setting *domain.GlobalConfig) {\n\tsource := \"\"\n\tswitch setting.DBType {\n\tcase domain.SQLITE3:\n\t\tlogrus.Info(\"InitDB has done when new client, skip.\")\n\t\treturn\n\tcase domain.MYSQL:\n\t\tsource = fmt.Sprintf(\"%s:%s@tcp(%s:%s)/\",\n\t\t\tsetting.DBUser, setting.DBPassword, setting.DBHost, setting.DBPort)\n\tdefault:\n\t\tsource = fmt.Sprintf(\"%s:%s@tcp(%s:%s)/\",\n\t\t\tsetting.DBUser, setting.DBPassword, setting.DBHost, setting.DBPort)\n\t}\n\n\tdb, err := sql.Open(setting.DBType, source)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"connection to db error: %s\", err)\n\t}\n\tdefer db.Close()\n\n\tsql := \"CREATE DATABASE IF NOT EXISTS \" + setting.DBName + \";\"\n\t_, err = db.Exec(sql)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"create db %s error: %v\", setting.DBName, err)\n\t}\n}", "func InitDb(){\r\n\tconnectionURL:=os.Getenv(\"CONNECTION_URL\")\r\n\tvar err error\r\n\tDBConn, err = gorm.Open(\"postgres\",connectionURL)\r\n\tif err!= nil{\r\n\t\tpanic(\"failed to connect to db\")\r\n\t}\r\n\tfmt.Println(\"db is connected lets go.........\")\r\n\tDBConn.AutoMigrate(&models.GoItems{})\r\n\tfmt.Println(\"db has been migrated\")\r\n}", "func init() {\n\tcfg = pkg.InitializeConfig()\n\t_, err := pkg.InitializeDb()\n\tif err != nil {\n\t\tpanic(\"failed to initialize db connection : \" + err.Error())\n\t}\n}", "func connectDB(cfg *config.DB) error{\n\turi := fmt.Sprintf(\"%s:%s@tcp(%s)/%s?charset=utf8&parseTime=True\", cfg.User, cfg.Password, cfg.Address, cfg.Name)\n\tconn, err := gorm.Open(dialect, uri)\n\tif err != nil{\n\t\treturn err\n\t}\n\tdefaultDB = &DB{conn}\n\tdefaultDB.DB.DB().SetMaxIdleConns(cfg.MaxIdleConn)\n\tdefaultDB.DB.DB().SetMaxOpenConns(cfg.MaxOpenConn)\n\tdefaultDB.DB.DB().SetConnMaxLifetime(cfg.MaxConnLifetime)\n\tdefaultDB.DB.LogMode(cfg.Debug)\n\n\treturn nil\n}", "func init() {\n\tdao.Server = \"mongodb://shivam:[email protected]:25294/shayona-store\"\n\tdao.Database = \"shayona-store\"\n\tdao.Connect()\n}", "func dbInit(dbc co.DbConnectionRequest) {\n\tdb, err := sql.Open(\"mysql\", dbc.User+\":\"+dbc.Pwd+\"@tcp(\"+dbc.Server+\":\"+dbc.Port+\")/\")\n\tif err != nil {\n\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t}\n\tfor _, stmt := range organizationsSchema {\n\t\tfmt.Println(stmt)\n\t\t_, err := db.Exec(stmt)\n\t\tif err != nil {\n\t\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t\t}\n\t}\n\tdb.Close()\n\treturn\n}", "func DbConnect() {\n\tpostgresHost, _ := os.LookupEnv(\"CYCLING_BLOG_DB_SERVICE_SERVICE_HOST\")\n\tpostgresPort := 5432\n\tpostgresUser, _ := os.LookupEnv(\"POSTGRES_USER\")\n\tpostgresPassword, _ := os.LookupEnv(\"PGPASSWORD\")\n\tpostgresName, _ := os.LookupEnv(\"POSTGRES_DB\")\n\n\tenv := ENV{Host: postgresHost, Port: postgresPort, User: postgresUser, Password: postgresPassword, Dbname: postgresName}\n\n\tpsqlInfo := fmt.Sprintf(\"host=%s port=%d user=%s password=%s dbname=%s sslmode=disable\", env.Host, env.Port, env.User, env.Password, env.Dbname)\n\tdb, err := sql.Open(\"postgres\", psqlInfo)\n\tif err != nil {\n\t\tlog.Panic(\"DbConnect: unable to connect to database\", err)\n\t}\n\tDB = db\n\tfmt.Println(\"Successfully connected!\")\n}", "func InitDatabase() (err error) {\n\tvar pgo *pg.Options\n\n\tif pgo, err = pg.ParseURL(options.PgSQLDSN); err != nil {\n\t\treturn\n\t}\n\tlog.Debugf(\"Try to connect to postgrsql server...\")\n\tdb = pg.Connect(pgo)\n\treturn\n}", "func InitDB() {\n\tconnStr := \"user=osama dbname=hackernews password=ibnjunaid \"\n\t// Use root:dbpass@tcp(172.17.0.2)/hackernews, if you're using Windows.\n\tdb, err := sql.Open(\"postgres\", connStr)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tDb = db\n\n}", "func InitializeDB() *Database {\n\tconfig := new(dbConfig)\n\tconfigFile, err := ioutil.ReadFile(\"config.yaml\")\n\terr = yaml.Unmarshal(configFile, &config)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcredentials := fmt.Sprintf(\"%s:%s@/%s?charset=utf8&parseTime=True&loc=Local\", config.Database.DatabaseUser, config.Database.DatabasePassword, config.Database.Database)\n\tdialect := config.Database.DatabaseType\n\n\tdb, err := gorm.Open(dialect, credentials)\n\tif err != nil {\n\t\tlog.Fatal().Msgf(\"Failed to connect to Database. Reason: %v\\n\", err)\n\t}\n\tlog.Info().Msg(\"Successfully connected to qBot Database.\")\n\n\tdb.DB().SetConnMaxLifetime(time.Second * 100)\n\tdb.DB().SetMaxIdleConns(50)\n\tdb.DB().SetMaxOpenConns(200)\n\n\t//db.DropTableIfExists(models.User{}, models.Question{}, models.Answer{}) // Temp\n\t//db.DropTable(\"user_questions\", \"question_answers\", \"user_answers\") // Temp\n\tif err := db.AutoMigrate(models.User{}, models.Question{}, models.Answer{}).Error; err != nil {\n\t\tlog.Fatal().Msgf(\"Unable to migrate database. \\nReason: %v\", err)\n\t}\n\tlog.Info().Msg(\"Database migration successful.\")\n\treturn &Database{db}\n}", "func init() {\n\t// db, err := sql.Open(\"mysql\", mysql_connect)\n\t// if err != nil {\n\t// \tlog.Fatal(err)\n\t// } else {\n\t// \tlog.Println(\"Successfully connected to mysql database\")\n\t// }\n\t// defer db.Close()\n\n}", "func DBInit() {\n\t// Mode = \"PRODUCTION\"\n\t// if Mode == \"PRODUCTION\" {\n\t// \tDatabaseURL = \"test.sqlite3\"\n\t// \tDatabaseName = \"sqlite3\"\n\t// } else if Mode == \"DEPLOY\" {\n\tDatabaseURL = os.Getenv(\"DATABASE_URL\")\n\tDatabaseName = \"postgres\"\n\t// }\n\n\tdb, err := gorm.Open(DatabaseName, DatabaseURL)\n\tif err != nil {\n\t\tpanic(\"We can't open database!(dbInit)\")\n\t}\n\t//残りのモデルはまだ入れてない。\n\tdb.AutoMigrate(&model.Post{})\n\tdb.AutoMigrate(&model.User{})\n\tdb.AutoMigrate(&model.Room{})\n\tdefer db.Close()\n}", "func createConnection() *sql.DB {\n\t// load .env file\n\terr := godotenv.Load(\".env\")\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error loading .env file\")\n\t}\n\n\t// Open the connection\n\tdb, err := sql.Open(\"postgres\", os.Getenv(\"POSTGRES_URL\"))\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// check the connection\n\terr = db.Ping()\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(\"Successfully connected!\")\n\t// return the connection\n\treturn db\n}", "func init() {\n\tvar err error\n\tvar dataSource = os.Getenv(\"MYSQL_CONNECTION\")\n\tengine, err = xorm.NewEngine(\"mysql\", dataSource)\n\tif(err != nil){\n\t\tpanic(err)\n\t}\n\n\t//engine.ShowSQL(true)\n}", "func InitDB(driver, connectionstring string) error {\n\tdb, err := gorm.Open(driver, connectionstring)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsetDB(db)\n\treturn nil\n}", "func InitDb() {\n db, err := sqlx.Open(\"postgres\", config.DbURI)\n if err != nil {\n log.Fatalln(\"Database source URI error: \" + err.Error())\n }\n err = db.Ping()\n if err != nil {\n log.Fatalln(\"Database connect error: \" + err.Error())\n }\n Pool = db\n log.Println(\"Database connected\")\n}", "func InitDB() {\n\tdatabase, err := sql.Open(\"mysql\", \"jiraiya:Shivi<323@tcp(database-1.caqh2nel7qhl.us-east-2.rds.amazonaws.com:3306)/cumul\")\n\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tDB = database\n}", "func DBInit() *gorm.DB {\n\t//db, err := gorm.Open(\"mysql\", \"root:@tcp(128.199.211.144:3306)/godb?charset=utf8&parseTime=True&loc=Local\")\n\tdb, err := gorm.Open(\"mysql\",\"root:orion2402@tcp(localhost:3306)/popfren?charset=utf8&parseTime=True&loc=Local\")\n\tif err != nil {\n\t\tpanic(\"failed to connect to database\")\n\t}\n\n\tdb.AutoMigrate(structs.Person{})\n\treturn db\n}", "func (c *client) connect() error {\n\tvar connection *sql.DB\n\tvar err error\n\tif os.Getenv(\"MODE\") == \"development\" {\n\t\tvar connectionString = fmt.Sprintf(\n\t\t\t\"host=%s port=%s user=%s password=%s dbname=%s sslmode=disable\",\n\t\t\tos.Getenv(\"PGHOST\"),\n\t\t\tos.Getenv(\"PGPORT\"),\n\t\t\tos.Getenv(\"PGUSER\"),\n\t\t\tos.Getenv(\"PGPASSWORD\"),\n\t\t\tos.Getenv(\"PGDATABASE\"),\n\t\t)\n\t\tc.connectionString = connectionString\n\t\tconnection, err = sql.Open(\"postgres\", connectionString)\n\t} else if os.Getenv(\"MODE\") == \"production\" {\n\t\tconnection, err = sql.Open(\"postgres\", os.Getenv(\"DATABASE_URL\"))\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"connection to pg failed: %v\", err)\n\t}\n\n\tc.db = connection\n\n\tfmt.Println(\"postgres connection established...\")\n\treturn nil\n}", "func init() {\n\tdb, db_err = pgx.NewConnPool(pgx.ConnPoolConfig{\n\t\tConnConfig: pgx.ConnConfig{\n\t\t\tHost: \"localhost\",\n\t\t\tDatabase: \"pmp\",\n\t\t\tUser: \"ashutosh\",\n\t\t\tPassword: \"123\",\n\t\t\tPort: 5432,\n\t\t},\n\t\tMaxConnections: 10,\n\t})\n\n\tif db_err != nil {\n\t\tfmt.Println(\"Can't connect to database\")\n\t}\n}", "func (db *StockDB) Setup(env Environment) *StockDB {\n\t// TODO(jhurwich) implement user/pass/address switch based on local or prod environment\n\tvar dbname, password, host, user, suffix string\n\tswitch env {\n\tcase Local:\n\t\tdbname = \"trendydb\"\n\t\tpassword = \"localpass\"\n\t\thost = \"localhost\"\n\t\tuser = \"localuser\"\n\t\tsuffix = \"?sslmode=disable\"\n\tcase Production:\n\t\t// TODO(jhurwich) define for production environment\n\tcase TestLocal:\n\t\tdbname = \"trendytestdb\"\n\t\tpassword = \"localpass\"\n\t\thost = \"localhost\"\n\t\tuser = \"localuser\"\n\t\tsuffix = \"?sslmode=disable\"\n\t}\n\tdbSource := fmt.Sprintf(\"postgres://%s:%s@%s/%s%s\", user, password, host, dbname, suffix)\n\n\t// initialize the db, note that it's a global object, it is never closed\n\tdb = &StockDB{*(sqlx.MustConnect(\"postgres\", dbSource))}\n\tdb.CreateIfNotExists()\n\tDB = db // not entirely sure why we need this line with the address assignment two up, but whatever\n\treturn db\n}", "func connectToDB() {\n\tconnectString := fmt.Sprintf(\"dbname=%s sslmode=disable\", viper.GetString(\"DB.Name\"))\n\tif viper.GetString(\"DB.User\") != \"\" {\n\t\tconnectString += fmt.Sprintf(\" user=%s\", viper.GetString(\"DB.User\"))\n\t}\n\tif viper.GetString(\"DB.Password\") != \"\" {\n\t\tconnectString += fmt.Sprintf(\" password=%s\", viper.GetString(\"DB.Password\"))\n\t}\n\tif viper.GetString(\"DB.Host\") != \"\" {\n\t\tconnectString += fmt.Sprintf(\" host=%s\", viper.GetString(\"DB.Host\"))\n\t}\n\tif viper.GetString(\"DB.Port\") != \"5432\" {\n\t\tconnectString += fmt.Sprintf(\" port=%s\", viper.GetString(\"DB.Port\"))\n\t}\n\tmodels.DBConnect(viper.GetString(\"DB.Driver\"), connectString)\n}", "func Initialize() {\n\tconnection := os.Getenv(\"DB_HOST\")\n\tusername := os.Getenv(\"MYSQLUSER\")\n\tpassword := os.Getenv(\"MYSQLPASS\")\n\n\tfor {\n\t\tconn, err := net.DialTimeout(\"tcp\", connection, 6*time.Second)\n\t\tif conn != nil {\n\t\t\tconn.Close()\n\t\t\tbreak\n\t\t}\n\n\t\tlogrus.Info(\"Sleeping till mysql be available... \", err)\n\t\ttime.Sleep(5 * time.Second)\n\t}\n\n\tdb, err := sql.Open(\"mysql\", \"\"+username+\":\"+password+\"@tcp(\"+connection+\")/zinnion?charset=utf8\")\n\tif err != nil {\n\t\tlogrus.Info(\"No connection with mysql, \", err)\n\t}\n\tMysqlClient = db\n}", "func InitializeDatabase(c *Config) (*gorm.DB, error) {\n\tvar err error\n\tcon, err := gorm.Open(\"mysql\", c.GetDatasource())\n\n\tif err != nil {\n\t\tfmt.Println(\"[ERROR] Failed to connect to MySQL. Config= \" + Configuration.MySQL.Host)\n\t\treturn nil, err\n\t}\n\n\tcon.LogMode(LogMode)\n\tcon.SingularTable(true)\n\n\tfmt.Println(\"[INFO] Connected to MySQL. Config => \" + Configuration.MySQL.Host + \", LogMode => \" + fmt.Sprintf(\"%v\", LogMode))\n\treturn con, nil\n}", "func createConnection() *sql.DB {\n\t// load .env file\n\terr := godotenv.Load(\".env\")\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error loading .env file\")\n\t}\n\n\t// Open the connection\n\tusername := os.Getenv(\"db_user\")\n\tpassword := os.Getenv(\"db_pass\")\n\tdbName := os.Getenv(\"db_name\")\n\tdbHost := os.Getenv(\"db_host\")\n\tdbURI := fmt.Sprintf(\"host=%s user=%s dbname=%s sslmode=disable password=%s\", dbHost, username, dbName, password) //Build connection string\n\n\tdb, err := sql.Open(\"postgres\", dbURI)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// check the connection\n\terr = db.Ping()\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(\"Successfully connected!\")\n\t// return the connection\n\treturn db\n}", "func EstablishConnection() *sqlx.DB {\n\tif db != nil {\n\t\treturn db\n\t}\n\n\tdb = sqlx.MustConnect(\"sqlite3\", datasourceName)\n\tif err := migrateDB(); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn db\n}", "func InitializeDB() *gorm.DB {\n\tdb, err := gorm.Open(\"mysql\", \"root:root@tcp(127.0.0.1:3306)/referee?parseTime=true&readTimeout=1s&writeTimeout=1s&timeout=1s\")\n\tCheck(err)\n\n\treturn db\n}", "func InitDB(dataSourceName string) {\n\n\t\tvar err error\n\t\tDB, err = sql.Open(\"postgres\", dataSourceName)\n\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t}\n\n\t\tif err = DB.Ping(); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t} else {\n\t\t\t\tlog.Println(\"Connection to db established!\")\n\t\t}\n\n}", "func InitDatabase() *Database {\n\t// eg. \"postgres://postgres:postgres@localhost/postgres?sslmode=disable\"\n\t// TODO: enable SSL on DB\n\tconn, err := sql.Open(\"postgres\", os.Getenv(\"PG_CONNECTION_STRING\"))\n\tif err != nil {\n\t\tlog.Fatal(err) // kill server if we can't use DB on startup\n\t}\n\treturn &Database{\n\t\tconn: conn,\n\t}\n}", "func init() {\n\tdb, db_err = pgx.NewConnPool(pgx.ConnPoolConfig{\n\t\tConnConfig: pgx.ConnConfig{\n\t\t\tHost: \"localhost\",\n\t\t\tDatabase: \"web_portal\",\n\t\t\tUser: \"postgres\",\n\t\t\tPassword: \"anil205474\",\n\t\t\tPort: 5432,\n\t\t},\n\t\tMaxConnections: 10,\n\t})\n\n\tif db_err != nil {\n\t\tfmt.Println(\"Can't connect to database\")\n\t}\n}", "func dbConnect() (*sql.DB, error) {\n\tdb, err := sql.Open(\"mysql\", getDataSource())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// use the library_api database\n\t_, err = db.Exec(`USE library_api`)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}", "func Setup() Modeler {\n\tlog.Println(\"Connecting to Database.\")\n\tdb, err := sql.Open(\"postgres\", buildConnStr())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Block executing while we attempt to connect to the database\n\tconnectionBackoff(db, 6)\n\n\tlog.Println(\"Initializing Database.\")\n\t// Run necessary db commands e.g. migrations\n\tinitDb(db)\n\n\treturn model{db}\n}", "func init() {\n\tvar err error\n\t//db, err = sql.Open(\"postgres\", \"postgres://wookie:[email protected]/wookie?sslmode=disable\")\n\tdb, err = sql.Open(\"postgres\", \"user=wookie dbname=wookie sslmode=disable\")\n\tif err != nil {\n\t\tERROR.Println(\"init db\", err.Error())\n\t\treturn\n\t}\n\n\t//////////////////////////////////////\n\t// drop tables\n\t// DANGER this will empty the db\n\t//\n\t//////////////////////////////////////\n\t_, err = db.Exec(`DROP TABLE classes, users, quiz, attendance CASCADE`)\n\tfmt.Println(err)\n\n\t/////////////////////////////////////////////\n\t//////creating\n\t/////////////////////////////////////////////\n\n\t_, err = db.Exec(`CREATE TABLE users (\n uid serial PRIMARY KEY,\n email text UNIQUE,\n password bytea,\n salt bytea\n )`)\n\tfmt.Println(err)\n\n\t_, err = db.Exec(`CREATE TABLE attendance (\n cid integer PRIMARY KEY,\n students json,\n date_created date\n )`)\n\tfmt.Println(err)\n\n\t_, err = db.Exec(`CREATE TABLE classes (\n cid serial PRIMARY KEY,\n name text,\n students json,\n uid integer REFERENCES users (uid),\n semester text\n )`)\n\tfmt.Println(err)\n\n\t_, err = db.Exec(`CREATE TABLE quiz (\n qid serial PRIMARY KEY,\n info json,\n type integer,\n cid integer REFERENCES classes (cid)\n )`)\n\tfmt.Println(err)\n}", "func init() {\n\t//orm.RegisterDriver(\"mysql\", orm.DRMySQL)\n\n\tmysqlReg := beego.AppConfig.String(\"mysqluser\") + \":\" +\n\t\tbeego.AppConfig.String(\"mysqlpass\") + \"@tcp(127.0.0.1:3306)/\" +\n\t\tbeego.AppConfig.String(\"mysqldb\") + \"?charset=utf8&parseTime=true&loc=Australia%2FSydney\"\n\torm.RegisterDataBase(\"default\", \"mysql\", mysqlReg)\n}", "func (db *Postgres) connect() error {\n\tdbMap, err := gosql.Open(\"postgres\", db.URI)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed to open database with uri: %s\", db.URI)\n\t}\n\n\t// Configure the database mapping object\n\tdb.DbMap = &gorp.DbMap{Db: dbMap, Dialect: gorp.PostgresDialect{}}\n\n\t// Verify database\n\terr = db.ping()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed to ping database with uri: %s\", db.URI)\n\t}\n\n\treturn nil\n}", "func InitDatabase() *Server {\n\tvar err error\n\n\tconnString := getConnString()\n\n\tlog.Printf(\"Setting connection to db with configuration: %s \\n\", connString)\n\n\tserver := &Server{}\n\tserver.db, err = sql.Open(\"sqlserver\", connString)\n\tif err != nil {\n\t\tlog.Fatal(\"Error opening connection: \", err.Error())\n\t}\n\n\tserver.db.SetConnMaxLifetime(time.Minute * 4)\n\n\treturn server\n}", "func InitDatabase() {\n\tvar err error\n\tdsn := \"root:@tcp(127.0.0.1)/test_server?charset=utf8mb4&parseTime=True&loc=Local\"\n\tDB, err = gorm.Open(mysql.Open(dsn), &gorm.Config{})\n\n\tif err != nil {\n\t\tpanic(\"database is error\")\n\t}\n\n\tDB.AutoMigrate(&model.User{})\n\n\tfmt.Println(\"Database Connected\")\n}", "func (p *DatabaseHandler) init(s *Server) error {\n\tdb, err := sql.Open(\"sqlite3\", s.srcDir+\"/database.db\")\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn StringError{\"ERROR: Some of databases weren't opened!\"}\n\t}\n\tp.db = db\n\n\tp.createTable()\n\treturn nil\n}", "func setupDatabase(db *sqlx.DB) error {\n\t// turn on write-ahead log and store temp tables on disk.\n\t_, err := db.Exec(`\n\t\tPRAGMA journal_mode=WAL;\n\t\tPRAGMA temp_store=1;\n\t\tPRAGMA foreign_keys=ON;\n\t\tPRAGMA encoding='UTF-8';\n\t`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"execute PRAGMAs: %w\", err)\n\t}\n\n\ttx, err := db.Beginx()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"begin Tx: %w\", err)\n\t}\n\tdefer tx.Rollback()\n\n\tvar version int\n\tif err = tx.Get(&version, `PRAGMA user_version;`); err != nil {\n\t\treturn fmt.Errorf(\"PRAGMA user_version: %w\", err)\n\t}\n\tlog.Printf(\"[repo] current schema: %d\", version)\n\n\tswitch version {\n\tcase 0:\n\t\tlog.Printf(\"initialising database ...\")\n\t\t_, err = tx.Exec(`\n\t\t\t-- data table\n\t\t\tCREATE TABLE IF NOT EXISTS codepoints (\n\t\t\t\thex TEXT PRIMARY KEY,\n\t\t\t\tdec INTEGER,\n\t\t\t\tcategory TEXT,\n\t\t\t\tname TEXT,\n\t\t\t\taliases TEXT,\n\t\t\t\tentity TEXT\n\t\t\t);\n\t\t\t-- CREATE INDEX IF NOT EXISTS idx_hex ON codepoints(hex);\n\n\t\t\t-- full-text search\n\t\t\tCREATE VIRTUAL TABLE IF NOT EXISTS search USING FTS5(\n\t\t\t\tname,\n\t\t\t\taliases,\n\t\t\t\tentity,\n\t\t\t\thex,\n\t\t\t\tdec,\n\t\t\t\tcontent = 'codepoints',\n\t\t\t\ttokenize = \"unicode61 remove_diacritics 1 separators '-'\"\n\t\t\t);\n\n\t\t\tCREATE TRIGGER IF NOT EXISTS codepoints_ai AFTER INSERT ON codepoints\n\t\t\tBEGIN\n\t\t\t\tINSERT INTO search (rowid, name, aliases, entity, hex, dec)\n\t\t\t\t\tVALUES (new.rowid, new.name, new.aliases, new.entity, new.hex, new.dec);\n\t\t\tEND;\n\t\t\tCREATE TRIGGER IF NOT EXISTS codepoints_ad AFTER DELETE ON codepoints\n\t\t\tBEGIN\n\t\t\t\tINSERT INTO search (search, rowid, name, aliases, entity, hex, dec)\n\t\t\t\t\tVALUES ('delete', old.rowid, old.name, old.aliases, old.entity, old.hex, old.dec);\n\t\t\tEND;\n\t\t\tCREATE TRIGGER IF NOT EXISTS codepoints_au AFTER UPDATE ON codepoints\n\t\t\tBEGIN\n\t\t\t\tINSERT INTO search (search, rowid, name, aliases, entity, hex, dec)\n\t\t\t\t\tVALUES ('delete', old.rowid, old.name, old.aliases, old.entity, old.hex, old.dec);\n\t\t\t\tINSERT INTO search (rowid, name, aliases, entity, hex, dec)\n\t\t\t\t\tVALUES (new.rowid, new.name, new.aliases, new.entity, new.hex, new.dec);\n\t\t\tEND;\n\n\t\t\tPRAGMA user_version = 1;\n\t\t`)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"migrate to version 1: %w\", err)\n\t\t}\n\t\tfallthrough\n\tcase 1: // future migration\n\t\tbreak\n\t}\n\n\tif err = tx.Commit(); err != nil {\n\t\treturn fmt.Errorf(\"commit schema updates: %w\", err)\n\t}\n\tvar n int\n\tif err = db.Get(&n, `PRAGMA user_version;`); err != nil {\n\t\treturn fmt.Errorf(\"PRAGMA user_version: %w\", err)\n\t}\n\n\tif n > version {\n\t\tif _, err := db.Exec(`VACUUM;`); err != nil {\n\t\t\treturn fmt.Errorf(\"VACUUM database: %w\", err)\n\t\t}\n\t}\n\treturn nil\n}", "func connectToDatabase(app *app) error {\n\tlog.Info().Msg(\"connection to database...\")\n\n\tdb, err := sqlx.Connect(\"postgres\", app.config.Database)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Info().Msg(\"successfully connected!\")\n\tapp.repo = repo.New(db)\n\n\treturn nil\n}", "func Setup() {\n\tvar err error\n\tDB, err = gorm.Open(config.DatabaseConfig.Type, fmt.Sprintf(\"%s:%s@tcp(%s)/%s?charset=utf8&parseTime=True&loc=Local\",\n\t\tconfig.DatabaseConfig.User,\n\t\tconfig.DatabaseConfig.Password,\n\t\tconfig.DatabaseConfig.Host,\n\t\tconfig.DatabaseConfig.Name))\n\n\tif err != nil {\n\t\tlog.Fatalf(\"models.Setup err: %v\", err)\n\t}\n\n\tDB.SingularTable(true)\n\tDB.DB().SetMaxIdleConns(10)\n\tDB.DB().SetMaxOpenConns(100)\n\tDB.LogMode(true)\n\n\tmigrate(DB)\n}", "func InitializeDatabase(config string, log debug.Logger) {\n\tdatabaseAccess = databaseConnection{dbconfig: config, log: log}\n\tdatabaseAccess.initialize()\n}", "func (*ConnectDB) InitDB() framework.Mssql {\n\n\tvar user = framework.SA{\n\t\tUsername: \"sdxonestop\",\n\t\tPassword: \"baiyun+=1992\",\n\t}\n\n\tdb := framework.Mssql{\n\t\tDataSource: \"BAIYUN-MOBL1\",\n\t\tDatabase: \"SdxOneStopDB\",\n\t\t// windwos: true 为windows身份验证,false 必须设置sa账号和密码\n\t\tWindows: true,\n\t\tSa: user,\n\t}\n\t// 连接数据库\n\terr := db.Open()\n\tif err != nil {\n\t\tfmt.Println(\"sql open:\", err)\n\t\treturn db\n\t} else {\n\t\tfmt.Println(\"Succeed to open DB...\")\n\t}\n\t//defer db.Close()\n\n\treturn db\n}", "func connectDB() error {\n\t// define login variables\n\thost := config.Get(\"postgresql\", \"host\")\n\tport := config.Get(\"postgresql\", \"port\")\n\tssl := config.Get(\"postgresql\", \"ssl\")\n\tdatabase := config.Get(\"postgresql\", \"database\")\n\tusername := config.Get(\"postgresql\", \"username\")\n\tpassword := config.Get(\"postgresql\", \"password\")\n\n\t// connect and return error\n\treturn db.Connect(host, port, ssl, database, username, password)\n}", "func (src *DataSrc) InitDB(conf *viper.Viper) error {\n\tdbEngine := conf.GetString(\"db.engine\")\n\tdbHost := conf.GetString(\"db.host\")\n\tdbPort := conf.GetString(\"db.port\")\n\tdbAddr := dbHost + \":\" + dbPort\n\tdbName := conf.GetString(\"db.name\")\n\tdbUser := conf.GetString(\"db.user\")\n\tdbPassword := conf.GetString(\"db.password\")\n\n\tvar errdb error\n\tvar db *sqlx.DB\n\tif dbEngine == \"postgres\" {\n\t\tdb, errdb = sqlx.Connect(\"postgres\", \"host=\"+dbHost+\" port=\"+dbPort+\" user=\"+dbUser+\" password=\"+dbPassword+\" dbname=\"+dbName+\" sslmode=disable\")\n\t\tif errdb != nil {\n\t\t\tfmt.Println(\"Error connectiing to DB \", errdb)\n\t\t\treturn errdb\n\t\t}\n\t\tsrc.DB = db\n\n\t} else if dbEngine == \"mysql\" {\n\t\tdb, errdb = sqlx.Connect(\"mysql\", dbUser+\":\"+dbPassword+\"@\"+dbAddr+\"/\"+dbName+\"?charset=utf8&parseTime=True&loc=Local\")\n\t\tif errdb != nil {\n\t\t\tfmt.Println(\"Error connectiing to DB \", errdb)\n\t\t\treturn errdb\n\t\t}\n\t\tsrc.DB = db\n\n\t} else if dbEngine == \"sqlite\" {\n\t\tdb, errdb = sqlx.Connect(\"sqlite3\", dbName)\n\t\tif errdb != nil {\n\t\t\tfmt.Println(\"Error connecting to DB \", errdb)\n\t\t\treturn errdb\n\t\t}\n\t\tsrc.DB = db\n\t}\n\treturn nil\n\n}", "func initDatabase() {\n\tif dbPath == \"\" {\n\t\t// No path provided, use the default path\n\t\tdbPath = getDefaultDBPath()\n\t}\n\t// Start the database\n\tdb, err := poddata.New(dbPath)\n\tif err != nil {\n\t\tlogrus.Fatal(err.Error())\n\t}\n\tdata = db\n}", "func init() {\n\trouter = chi.NewRouter()\n\trouter.Use(middleware.Recoverer)\n\n\tdbSource := fmt.Sprintf(\"root:%s@tcp(%s:%s)/%s?charset=utf8\", dbPass, dbHost, dbPort, dbName)\n\n\tvar err error\n\tdb, err = sql.Open(\"mysql\", dbSource)\n\n\tcatch(err)\n}", "func setup() {\n\tvar err error\n\tdb, err = sql.Open(\"postgres\", config.DataBase.Postgres())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\terr = db.Ping()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tts = httptest.NewServer(handler())\n\n\tclient = &http.Client{}\n}", "func (driver *SQLDriver) Initialize() error {\n\t// Parse the DSN and create a database object\n\tdb, err := sql.Open(env.Get(\"STORAGE_SQL_DRIVER\", \"sqlite3\"), env.Get(\"STORAGE_SQL_DSN\", \"./db\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Ping the database\n\terr = db.Ping()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Migrate the database\n\ttable := env.Get(\"STORAGE_SQL_TABLE\", \"pasty\")\n\t_, err = db.Exec(`\n\t\tCREATE TABLE IF NOT EXISTS ? (\n\t\t\tid varchar NOT NULL PRIMARY KEY,\n\t\t\tcontent varchar NOT NULL,\n\t\t\tsuggestedSyntaxType varchar NOT NULL,\n\t\t\tdeletionToken varchar NOT NULL,\n\t\t\tcreated bigint NOT NULL,\n\t\t\tautoDelete bool NOT NULL\n\t\t);\n `, table)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Set the database object and table name of the SQL driver\n\tdriver.database = db\n\tdriver.table = table\n\treturn nil\n}", "func (db *Postgres) initDB() error {\n\t// Create the schema\n\t// @afiune Can we rename this library?\n\tif err := migrator.Migrate(db.URI, db.SchemaPath); err != nil {\n\t\treturn errors.Wrapf(err, \"Unable to create database schema. [path:%s]\", db.SchemaPath)\n\t}\n\n\t// Add the tables to the database mappings\n\tdb.AddTableWithName(deployment{}, \"deployment\").SetKeys(true, \"id\")\n\tdb.AddTableWithName(supervisor{}, \"supervisor\").SetKeys(true, \"id\")\n\tdb.AddTableWithName(serviceGroup{}, \"service_group\").SetKeys(true, \"id\")\n\tdb.AddTableWithName(service{}, \"service\").SetKeys(true, \"id\")\n\n\t//return db.CreateTablesIfNotExists() // I don't think we can ensure the foreign keys\n\treturn nil\n}" ]
[ "0.76872724", "0.744406", "0.7325846", "0.7274146", "0.72703093", "0.71773756", "0.7104648", "0.7033461", "0.70319754", "0.70185804", "0.7011035", "0.7006", "0.70054144", "0.7000145", "0.699882", "0.69944245", "0.6985961", "0.6961673", "0.6960904", "0.6960904", "0.6959783", "0.69545263", "0.69478697", "0.6941011", "0.69386864", "0.69315964", "0.6920086", "0.6881678", "0.6864009", "0.6860521", "0.68552506", "0.6846254", "0.6830687", "0.6818805", "0.6814689", "0.6790635", "0.67804825", "0.6774236", "0.675496", "0.67332894", "0.67212766", "0.6720151", "0.66956246", "0.6681783", "0.66759247", "0.665403", "0.66510814", "0.66436535", "0.66344327", "0.66337144", "0.6623777", "0.66034424", "0.6595967", "0.6589171", "0.65812695", "0.6577391", "0.6573472", "0.6559818", "0.6555727", "0.6549023", "0.654727", "0.6546286", "0.6542177", "0.6514527", "0.650501", "0.6495702", "0.64920443", "0.64909977", "0.64884114", "0.6473665", "0.6469015", "0.64682657", "0.6456336", "0.6456071", "0.64549696", "0.645494", "0.6451606", "0.64450383", "0.643934", "0.6438287", "0.6436011", "0.64286184", "0.64178103", "0.6402474", "0.64010984", "0.6396598", "0.6396434", "0.6395472", "0.63895106", "0.6384037", "0.6379148", "0.6378929", "0.6375103", "0.63741803", "0.6371264", "0.6369452", "0.6365413", "0.6361857", "0.6360006", "0.63593715" ]
0.7022797
9
middleware to send all http requests to the logger
func logRequest(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { log.Println(r.RequestURI) next.ServeHTTP(w, r) }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func logAllRequestsMiddleware(next http.Handler) http.Handler {\r\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\r\n\t\tlogger.Info(\"%s %s\", r.Method, r.URL.Path)\r\n\r\n\t\tnext.ServeHTTP(w, r)\r\n\t})\r\n}", "func Logger(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Infof(\"logging: request middleware\")\n\t\tnext.ServeHTTP(w, r)\n\t\tlog.Infof(\"logging: response middleware\")\n\t})\n}", "func logger(next http.HandlerFunc) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\twi := &interceptor{delegate: w}\n\t\tdefer func() {\n\t\t\tlog.Println(r.RemoteAddr, \" \", r.Method, \" \", r.RequestURI, \" \", r.Proto, \" \", wi.Status, \" \", wi.Bytes)\n\t\t}()\n\t\tnext.ServeHTTP(wi, r)\n\t})\n}", "func RequestLoggerMiddleware(next http.Handler) http.Handler {\r\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\r\n\t\tstart := time.Now()\r\n\r\n\t\tif config.LogLevel == \"error\" {\r\n\t\t\tlog.Printf(\r\n\t\t\t\t\"%s\\t%#v\\t%s\\t%s\",\r\n\t\t\t\treq.Method,\r\n\t\t\t\treq.RequestURI,\r\n\t\t\t\ttime.Since(start),\r\n\t\t\t)\r\n\t\t}\r\n\t\tif config.LogLevel == \"debug\" {\r\n\t\t\tlog.Printf(\r\n\t\t\t\t\"%s\\t%#v\\t%s\\t%dB\\t%s\\tHeaders: %s\\tPayload: %s\",\r\n\t\t\t\treq.Method,\r\n\t\t\t\treq.RequestURI,\r\n\t\t\t\ttime.Since(start),\r\n\t\t\t\treq.ContentLength,\r\n\t\t\t\treq.TransferEncoding,\r\n\t\t\t\treq.Header,\r\n\t\t\t\treq.Body,\r\n\t\t\t)\r\n\t\t}\r\n\t\tnext.ServeHTTP(w, req)\r\n\t})\r\n}", "func loggerMiddleware(next http.Handler) http.Handler {\n\n\tlevel := Level()\n\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := r.Context()\n\t\tt1 := time.Now()\n\t\treqId := middleware.GetReqID(ctx)\n\t\tpreReqContent := log.Fields{\n\t\t\t\"time\": t1,\n\t\t\t\"requestId\": reqId,\n\t\t\t\"method\": r.Method,\n\t\t\t\"endpoint\": r.RequestURI,\n\t\t\t\"protocol\": r.Proto,\n\t\t}\n\n\t\tif r.RemoteAddr != \"\" {\n\t\t\tpreReqContent[\"ip\"] = r.RemoteAddr\n\t\t}\n\n\t\ttid := r.Header.Get(\"X-TID\")\n\t\tif tid != \"\" {\n\t\t\tpreReqContent[\"tid\"] = tid\n\t\t}\n\n\t\tlogger := log.FromContext(ctx).WithFields(preReqContent)\n\t\tctx = logger.ToContext(ctx)\n\t\tr = r.WithContext(ctx)\n\t\tlogger.Info(\"request started\")\n\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tlog.WithFields(\n\t\t\t\t\tlog.Fields{\n\t\t\t\t\t\t\"requestId\": reqId,\n\t\t\t\t\t\t\"duration\": time.Since(t1),\n\t\t\t\t\t\t\"status\": 500,\n\t\t\t\t\t\t\"stacktrace\": string(debug.Stack()),\n\t\t\t\t\t},\n\t\t\t\t).Error(\"request finished with panic\")\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}()\n\n\t\tww := middleware.NewWrapResponseWriter(w, r.ProtoMajor)\n\t\tnext.ServeHTTP(ww, r)\n\n\t\tstatus := ww.Status()\n\t\tpostReqContent := log.Fields{\n\t\t\t\"requestId\": reqId,\n\t\t\t\"duration\": time.Since(t1),\n\t\t\t\"contentLength\": ww.BytesWritten(),\n\t\t\t\"status\": status,\n\t\t}\n\n\t\tif cache := ww.Header().Get(\"x-cache\"); cache != \"\" {\n\t\t\tpostReqContent[\"cache\"] = cache\n\t\t}\n\n\t\tlogger = log.FromContext(ctx).WithFields(postReqContent)\n\t\tif status >= 100 && status < 500 {\n\n\t\t\tvar method func(format string, args ...interface{})\n\n\t\t\tswitch level {\n\t\t\tcase \"TRACE\":\n\t\t\t\tmethod = logger.Tracef\n\t\t\tcase \"DEBUG\":\n\t\t\t\tmethod = logger.Debugf\n\t\t\tdefault:\n\t\t\t\tmethod = logger.Infof\n\t\t\t}\n\n\t\t\tmethod(\"request finished\")\n\t\t} else if status == 500 {\n\t\t\tlogger.WithField(\"stacktrace\",\n\t\t\t\tstring(debug.Stack())).Error(\"internal error during request\")\n\t\t} else {\n\t\t\tmessage := \"request finished\"\n\n\t\t\t// FIX: For some reason, the 'context.deadlineExceededError{}' isn't getting into here, we\n\t\t\t// did a quick fix checking the status code and returing the same message as the error., but\n\t\t\t// something is wrong and we need fix it.\n\t\t\tif status == 504 {\n\t\t\t\tmessage += \": context deadline exceeded\"\n\t\t\t} else {\n\t\t\t\tif err := ctx.Err(); err != nil {\n\t\t\t\t\tmessage += fmt.Sprintf(\": %s\", err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tlogger.Error(message)\n\t\t}\n\t}\n\n\treturn http.HandlerFunc(fn)\n}", "func RequestLogger() wago.MiddleWareHandler {\n\treturn func(c *wago.Context) {\n\t\tlogger.WithFields(logger.Fields{\n\t\t\twago.REQUEST_ID: c.GetString(wago.REQUEST_ID),\n\t\t\t\"path\": c.Request.URL.Path,\n\t\t\t\"host\": c.Request.Host,\n\t\t\t\"header\": c.Request.Header,\n\t\t}).Debug(\"before-handle\")\n\n\t\tc.Next()\n\n\t\tlogger.WithFields(logger.Fields{\n\t\t\twago.REQUEST_ID: c.GetString(wago.REQUEST_ID),\n\t\t\t\"path\": c.Request.URL.Path,\n\t\t\t\"host\": c.Request.Host,\n\t\t\t\"header\": c.Request.Header,\n\t\t}).Debug(\"after-handle\")\n\t}\n}", "func Logger(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\t\tww := middleware.NewWrapResponseWriter(w, r.ProtoMajor)\n\t\thost, port, _ := net.SplitHostPort(r.RemoteAddr)\n\t\tdefer func() {\n\t\t\tvar event *zerolog.Event\n\t\t\tif ww.Status() < 500 {\n\t\t\t\tevent = log.Info()\n\t\t\t} else {\n\t\t\t\tevent = log.Error()\n\t\t\t}\n\t\t\tevent.\n\t\t\t\tFields(map[string]interface{}{\n\t\t\t\t\t\"host\": host,\n\t\t\t\t\t\"port\": port,\n\t\t\t\t\t\"method\": r.Method,\n\t\t\t\t\t\"status\": ww.Status(),\n\t\t\t\t\t\"took\": float64(time.Since(start)) / 1e6,\n\t\t\t\t\t\"bytes_in\": r.Header.Get(\"Content-Length\"),\n\t\t\t\t\t\"bytes_out\": ww.BytesWritten(),\n\t\t\t\t}).\n\t\t\t\tTimestamp().\n\t\t\t\tMsg(r.URL.Path)\n\t\t}()\n\t\tnext.ServeHTTP(ww, r)\n\t})\n}", "func loggerMiddleware() martini.Handler {\n\treturn func(res http.ResponseWriter, req *http.Request, c martini.Context) {\n\t\tstart := time.Now()\n\t\taddr := req.Header.Get(\"X-Real-IP\")\n\t\tif addr == \"\" {\n\t\t\taddr = req.Header.Get(\"X-Forwarded-For\")\n\t\t\tif addr == \"\" {\n\t\t\t\taddr = req.RemoteAddr\n\t\t\t}\n\t\t}\n\t\trw := res.(martini.ResponseWriter)\n\t\tc.Next()\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"method\": req.Method,\n\t\t\t\"path\": req.URL.Path,\n\t\t\t\"addr\": addr,\n\t\t\t\"status\": rw.Status(),\n\t\t\t\"status_text\": http.StatusText(rw.Status()),\n\t\t\t\"duration\": time.Since(start),\n\t\t}).Info(\"Completed\")\n\t}\n}", "func (s *server) loggingMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := r.Context()\n\t\tlogger := logging.GetLogger(ctx)\n\t\trouteName := mux.CurrentRoute(r).GetName()\n\t\tmethod := r.Method\n\t\trequestLogger := logger.With(\n\t\t\tzap.String(\"route\", routeName),\n\t\t\tzap.String(\"method\", method),\n\t\t)\n\n\t\tctx = logging.ContextWithLogger(ctx, requestLogger)\n\t\tctx = context.WithValue(ctx, \"route\", routeName)\n\t\tctx = context.WithValue(ctx, \"method\", method)\n\n\t\tlogging.GetLogger(ctx).Info(\"request started\")\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t\tlogging.GetLogger(ctx).Info(\"request completed\")\n\t})\n}", "func (s *Service) LoggingMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// Do stuff here\n\t\tl := log.WithFields(log.Fields{\n\t\t\t\"environment\": s.environment,\n\t\t\t\"request-path\": r.RequestURI,\n\t\t\t\"request-method\": r.Method,\n\t\t})\n\t\tl.Infoln()\n\t\t// Call the next handler, which can be another middleware in the chain, or the final handler.\n\t\tm := httpsnoop.CaptureMetrics(next, w, r)\n\t\tl.WithFields(log.Fields{\n\t\t\t\"request-duration\": m.Duration,\n\t\t\t\"response-code\": m.Code,\n\t\t}).Infoln(\"handler response\")\n\t})\n}", "func LoggerMiddleware() echo.MiddlewareFunc {\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) (err error) {\n\n\t\t\tif logRequests {\n\n\t\t\t\treq := c.Request()\n\t\t\t\tres := c.Response()\n\t\t\t\tstart := time.Now()\n\t\t\t\tif err := next(c); err != nil {\n\t\t\t\t\tc.Error(err)\n\t\t\t\t}\n\t\t\t\tstop := time.Now()\n\n\t\t\t\tp := req.URL.Path\n\t\t\t\tif p == \"\" {\n\t\t\t\t\tp = \"/\"\n\t\t\t\t}\n\n\t\t\t\tbytesIn := req.Header.Get(echo.HeaderContentLength)\n\t\t\t\tif bytesIn == \"\" {\n\t\t\t\t\tbytesIn = \"0\"\n\t\t\t\t}\n\n\t\t\t\tlogContext := logrus.WithFields(map[string]interface{}{\n\t\t\t\t\t\"time_rfc3339\": time.Now().Format(time.RFC3339),\n\t\t\t\t\t\"remote_ip\": c.RealIP(),\n\t\t\t\t\t\"host\": req.Host,\n\t\t\t\t\t\"uri\": req.RequestURI,\n\t\t\t\t\t\"method\": req.Method,\n\t\t\t\t\t\"path\": p,\n\t\t\t\t\t\"referer\": req.Referer(),\n\t\t\t\t\t\"user_agent\": req.UserAgent(),\n\t\t\t\t\t\"status\": res.Status,\n\t\t\t\t\t\"latency\": strconv.FormatInt(stop.Sub(start).Nanoseconds()/1000, 10),\n\t\t\t\t\t\"latency_human\": stop.Sub(start).String(),\n\t\t\t\t\t\"bytes_in\": bytesIn,\n\t\t\t\t\t\"bytes_out\": strconv.FormatInt(res.Size, 10),\n\t\t\t\t})\n\n\t\t\t\tmsg := fmt.Sprintf(\"%s %s [ %d ]\", req.Method, p, res.Status)\n\t\t\t\tif res.Status > 499 {\n\t\t\t\t\tlogContext.Error(msg)\n\t\t\t\t} else if res.Status > 399 {\n\t\t\t\t\tlogContext.Warn(msg)\n\t\t\t\t} else {\n\t\t\t\t\tlogContext.Info(msg)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (h *HomeHandlers) Logger(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tstartTime := time.Now()\n\t\tdefer h.logger.Printf(\"request processed in %d ms \\n\", time.Now().Sub(startTime))\n\t\tnext(w, r)\n\t}\n}", "func (s Server) logger(handler http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\trw := newAppResponseWriter(w)\n\n\t\tstart := time.Now()\n\t\tdefer func() {\n\t\t\ti := log.Info()\n\n\t\t\thost, _, err := net.SplitHostPort(r.RemoteAddr)\n\t\t\tif err != nil {\n\t\t\t\ti.Err(err)\n\t\t\t}\n\n\t\t\ti.Str(\"ip\", host).\n\t\t\t\tStr(\"startAt\", start.String()).\n\t\t\t\tStr(\"method\", r.Method).\n\t\t\t\tStr(\"path\", r.URL.Path).\n\t\t\t\tStr(\"proto\", r.Proto).\n\t\t\t\tInt(\"status\", rw.statusCode).\n\t\t\t\tTimeDiff(\"latency\", time.Now(), start)\n\n\t\t\tif len(r.UserAgent()) > 0 {\n\t\t\t\ti.Str(\"userAgent\", r.UserAgent())\n\t\t\t}\n\n\t\t\ti.Msg(\"http-request\")\n\t\t}()\n\n\t\thandler.ServeHTTP(rw, r)\n\t})\n}", "func Logger(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {\n\t\tfmt.Printf(\"[ %s ] :: %s :: %v\\n\", req.Method, req.URL, time.Now())\n\t\tnext.ServeHTTP(res, req)\n\t})\n}", "func Logger(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\t\twrw := middleware.NewWrapResponseWriter(w, r.ProtoMajor)\n\t\tnext.ServeHTTP(wrw, r)\n\n\t\tscheme := \"http\"\n\t\tif r.TLS != nil {\n\t\t\tscheme = \"https\"\n\t\t}\n\n\t\tfields := []zapcore.Field{\n\t\t\tzap.String(\"requestScheme\", scheme),\n\t\t\tzap.String(\"requestProto\", r.Proto),\n\t\t\tzap.String(\"requestMethod\", r.Method),\n\t\t\tzap.String(\"requestAddr\", r.RemoteAddr),\n\t\t\tzap.String(\"requestUserAgent\", r.UserAgent()),\n\t\t\tzap.String(\"requestURI\", fmt.Sprintf(\"%s://%s%s\", scheme, r.Host, r.RequestURI)),\n\t\t\tzap.Int(\"responseStatus\", wrw.Status()),\n\t\t\tzap.Int(\"responseBytes\", wrw.BytesWritten()),\n\t\t\tzap.Float64(\"requestLatency\", float64(time.Since(start).Nanoseconds())/1000000),\n\t\t}\n\n\t\tlog.Info(r.Context(), \"Request completed\", fields...)\n\t}\n\n\treturn http.HandlerFunc(fn)\n}", "func logRequests(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar source string\n\t\tif isExternal(r) {\n\t\t\tsource = \"external\"\n\t\t} else {\n\t\t\tsource = \"internal\"\n\t\t}\n\t\tlog.Println(source, r.Method, r.RequestURI)\n\t\t// Call the next handler, which can be another middleware in the chain, or the final handler.\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func loggerMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\n\t\tnext.ServeHTTP(w, r)\n\n\t\tlog.Printf(\"[%s]\\t%s\\t%s\", r.Method, r.URL.String(), time.Since(start))\n\t})\n}", "func LoggingMw(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlogger.Logger.Infow(\"incoming request\", \"method\", r.Method, \"path\", r.URL.Path)\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func LoggerMiddleware(skippers ...SkipperFunc) gin.HandlerFunc {\n\treturn func (c *gin.Context) {\n\t\tif skipHandler(c, skippers...) {\n\t\t\tc.Next()\n\t\t\treturn\n\t\t}\n\n\t\tmethod := c.Request.Method\n\n\t\tfields := make(logger.Fields)\n\n\t\tfields[\"ip\"] = c.ClientIP()\n\t\tfields[\"method\"] = method\n\t\tfields[\"url\"] = c.Request.URL.String()\n\t\tfields[\"proto\"] = c.Request.Proto\n\n\t\tif method == http.MethodPost || method == http.MethodPut {\n\t\t\tmediaType, _, _ := mime.ParseMediaType(c.GetHeader(\"Content-Type\"))\n\t\t\tif mediaType == \"application/json\" {\n\t\t\t\tbody, err := ioutil.ReadAll(c.Request.Body)\n\t\t\t\tc.Request.Body.Close()\n\t\t\t\tif err == nil {\n\t\t\t\t\tbuf := bytes.NewBuffer(body)\n\t\t\t\t\tc.Request.Body = ioutil.NopCloser(buf)\n\t\t\t\t\tfields[\"request_body\"] = string(body)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tstart := time.Now()\n\t\tc.Next()\n\t\ttimeConsuming := time.Since(start).Nanoseconds() / 1e6\n\t\tfields[\"time_consuming(ms)\"] = timeConsuming\n\n\t\tfields[\"res_status\"] = c.Writer.Status()\n\t\tif id := ginhelper.GetUserID(c); id != \"\" {\n\t\t\tfields[\"user_id\"] = id\n\t\t}\n\t\tif r := ginhelper.GetResponseBody(c); r != \"\" {\n\t\t\tfields[\"response_body\"] = r\n\t\t}\n\n\t\tlogger.InfoWithFields(\"API Log\", fields)\n\t}\n}", "func Logger(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"%s \\\"%s %s %s\\\" %d\\n\", r.RemoteAddr, r.Method, r.RequestURI, r.Proto, r.ContentLength)\n\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func (s *Setup) LoggerMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\n\t\ts.logger.Infow(\n\t\t\t\"request_logging\",\n\t\t\t\"method\", r.Method,\n\t\t\t\"url\", r.URL.String(),\n\t\t\t\"agent\", r.UserAgent(),\n\t\t\t\"referer\", r.Referer(),\n\t\t\t\"proto\", r.Proto,\n\t\t\t\"remote_address\", r.RemoteAddr,\n\t\t\t\"latency\", time.Since(start),\n\t\t)\n\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func (h *Handlers) Logger(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tstartTime := time.Now()\n\t\tnext(w, r)\n\t\th.logger.Printf(\"request processed in %vs\\n\", time.Now().Sub(startTime))\n\t}\n}", "func LoggingMiddleware(next http.Handler) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\t\twrapped := wrapResponseWriter(w)\n\t\tnext.ServeHTTP(wrapped, r)\n\t\tLog.Info().Int(\"status\", wrapped.status).Str(\"method\", r.Method).Str(\"path\", r.URL.EscapedPath()).Str(\"duration\", time.Since(start).String()).Msg(\"Request processed\")\n\t})\n}", "func (app *application) logRequest(next http.Handler) http.Handler {\r\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\r\n\t\tapp.infoLog.Printf(\"%s - %s %s %s\", r.RemoteAddr, r.Proto, r.Method, r.URL.RequestURI())\r\n\r\n\t\tnext.ServeHTTP(w, r)\r\n\t})\r\n}", "func LoggingMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Infof(\"request [url = %v] [method = %v], [remote = %v], [protocol = %v]\",\n\t\t\tr.RequestURI,\n\t\t\tr.Method,\n\t\t\tr.RemoteAddr,\n\t\t\tr.Proto)\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func Logger(nextFunc http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\" %s %s %s\", r.Method, r.RequestURI, r.Host)\n\n\t\tnextFunc(w, r)\n\t}\n}", "func RequestLogger(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\t\tctx := r.Context()\n\t\t// Parse request information\n\t\trequestURIparts := append(strings.SplitN(r.RequestURI, \"?\", 2), \"\")\n\n\t\t// Instantiate verbose logger\n\t\tlogger := logrus.\n\t\t\tWithField(\"request\", uuid.New().String()).\n\t\t\tWithField(\"route\", r.Method+\" \"+requestURIparts[0]).\n\t\t\tWithField(\"query\", requestURIparts[1]).\n\t\t\tWithField(\"instance\", instanceID).\n\t\t\tWithField(\"ip\", r.RemoteAddr).\n\t\t\tWithField(\"referer\", r.Referer()).\n\t\t\tWithField(\"agent\", r.UserAgent())\n\n\t\tctx = loglib.SetLogger(ctx, logger.Logger)\n\t\tlogger.Infof(\"START\")\n\n\t\tr = r.WithContext(ctx)\n\t\tnext.ServeHTTP(w, r)\n\n\t\tlogger.\n\t\t\tWithField(\"duration\", time.Since(start)).\n\t\t\tInfof(\"END\")\n\n\t}\n\treturn http.HandlerFunc(fn)\n}", "func SetMiddleWareLogger(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t// fmt.Println(\"\")\n\t\tlog.Printf(\"%s %s%s %s\", r.Method, r.Host, r.RequestURI, r.Proto)\n\t\tnext(w, r)\n\t}\n}", "func logger(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Println(\"request: \" + r.RequestURI)\n\t\th.ServeHTTP(w, r)\n\t})\n}", "func RequestLoggingHandler(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\tres := w.(negroni.ResponseWriter)\n\tnext(w, r)\n\tlogrus.Info(fmt.Sprintf(\"%s %s %d\", r.Method, r.RequestURI, res.Status()))\n}", "func logger(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tformat := \"[%s] User agent => %s Remote addr => %s\"\n\t\tlog.Printf(format, r.Method, r.UserAgent(), r.RemoteAddr)\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func LoggerMiddleware(logger *logrus.Logger) gin.HandlerFunc {\n\tlogger.SetLevel(log.ErrorLevel)\n\tlogger.SetFormatter(&log.TextFormatter{})\n\n\treturn func(c *gin.Context) {\n\t\t// Start timer\n\t\tstart := time.Now().UTC()\n\n\t\t// log.Printf(\"%s %s %s %s\", c.Request.Method, c.Request.Host, c.Request.RequestURI, c.Request.Proto)\n\t\t// Process Request\n\t\tc.Next()\n\n\t\tif status := c.Writer.Status(); status != 200 {\n\t\t\tentry := logger.WithFields(log.Fields{\n\t\t\t\t\"client_ip\": https.GetClientIP(c),\n\t\t\t\t\"duration\": start,\n\t\t\t\t\"method\": c.Request.Method,\n\t\t\t\t\"path\": c.Request.RequestURI,\n\t\t\t\t\"status\": c.Writer.Status(),\n\t\t\t\t\"referrer\": c.Request.Referer(),\n\t\t\t\t\"request_id\": c.Writer.Header().Get(\"X-Request-Id\"),\n\t\t\t\t\"user_id\": https.GetUserID(c),\n\t\t\t})\n\n\t\t\tentry.Error(c.Errors.String())\n\t\t}\n\t}\n}", "func accessLogMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tlogFields := logrus.Fields{\n\t\t\t\"method\": req.Method,\n\t\t\t\"url\": req.URL,\n\t\t\t\"remote-addr\": req.RemoteAddr,\n\t\t\t\"user-agent\": req.UserAgent(),\n\t\t}\n\n\t\t//Create a spy with which we can see what was sent back to the client\n\t\tspy := &httpResponseSpy{Writer: w, ResponseCode: 200}\n\n\t\t//Start a timer\n\t\ttimeBefore := time.Now()\n\n\t\tnext.ServeHTTP(spy, req)\n\n\t\t//Get the time the request took\n\t\tlogFields[\"elasped-time\"] = time.Since(timeBefore)\n\n\t\tlogFields[\"bytes-sent\"] = spy.BytesSent\n\t\tlogFields[\"reponse-code\"] = spy.ResponseCode\n\n\t\tlogrus.WithFields(logFields).Info(\"Access log\")\n\t})\n}", "func (s *GoHomeServer) logMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ts.Logger.Debug(\"REST invocation\", \"url\", r.RequestURI)\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func LogMiddleware(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer log.Printf(\"[HTTP] %s %s\", r.Method, r.RequestURI)\n\t\th.ServeHTTP(w, r)\n\t})\n}", "func Logging(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"[%v] : %v\\n\", r.Method, r.URL.Path)\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func LoggerMiddleware(c *web.C, h http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := gctx.FromC(*c)\n\t\tmw := mutil.WrapWriter(w)\n\n\t\tlogger := log.WithField(\"req\", middleware.GetReqID(*c))\n\n\t\tctx = log.Set(ctx, logger)\n\t\tgctx.Set(c, ctx)\n\n\t\tlogStartOfRequest(ctx, r)\n\n\t\tthen := time.Now()\n\t\th.ServeHTTP(mw, r)\n\t\tduration := time.Now().Sub(then)\n\t\t// Checking `Accept` header from user request because if the streaming connection\n\t\t// is reset before sending the first event no Content-Type header is sent in a response.\n\t\tacceptHeader := r.Header.Get(\"Accept\")\n\t\tstreaming := strings.Contains(acceptHeader, render.MimeEventStream)\n\t\tlogEndOfRequest(ctx, r, duration, mw, streaming)\n\t}\n\n\treturn http.HandlerFunc(fn)\n}", "func Logger(next http.Handler) http.HandlerFunc {\n\tstdlogger := log.New(os.Stdout, \"\", 0)\n\t//errlogger := log.New(os.Stderr, \"\", 0)\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t// Start timer\n\t\tstart := time.Now()\n\n\t\t// Process request\n\t\twriter := statusWriter{w, 0}\n\t\tnext.ServeHTTP(&writer, r)\n\n\t\t// Stop timer\n\t\tend := time.Now()\n\t\tlatency := end.Sub(start)\n\n\t\tclientIP := r.RemoteAddr\n\t\tmethod := r.Method\n\t\tstatusCode := writer.status\n\t\tstatusColor := colorForStatus(statusCode)\n\t\tmethodColor := colorForMethod(method)\n\n\t\tstdlogger.Printf(\"[HTTP] %v |%s %3d %s| %12v | %s |%s %s %-7s %s\\n\",\n\t\t\tend.Format(\"2006/01/02 - 15:04:05\"),\n\t\t\tstatusColor, statusCode, reset,\n\t\t\tlatency,\n\t\t\tclientIP,\n\t\t\tmethodColor, reset, method,\n\t\t\tr.URL.Path,\n\t\t)\n\t}\n}", "func httpLogger(handler http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"%s %s %s\", r.RemoteAddr, r.Method, r.URL)\n\t\thandler.ServeHTTP(w, r)\n\t})\n}", "func logMiddleware(handler http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"%s - %s - %s\", r.RemoteAddr, r.Method, r.URL.Path)\n\t\tif debug {\n\t\t\tlog.Printf(\"Request: %+v\", r)\n\t\t}\n\t\t// Execute the original handler\n\t\thandler(w, r)\n\t}\n}", "func Logging(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tm := httpsnoop.CaptureMetrics(next, w, r)\n\t\tlog.Info().\n\t\t\tStr(\"method\", r.Method).\n\t\t\tStr(\"url\", r.URL.String()).\n\t\t\tInt(\"code\", m.Code).\n\t\t\tDur(\"duration\", m.Duration).\n\t\t\tSend()\n\t})\n}", "func logRequest(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"requested %s\", r.URL.Path)\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func Logging(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlogrus.WithField(\"path\", r.RequestURI).\n\t\t\tWithField(\"method\", r.Method).\n\t\t\tWithField(\"request_id\", GetRequestID(r.Context())).\n\t\t\tDebug(\"Request arrived\")\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func (srv *WebServer) logRequest(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Printf(\"%s %s\\n\", r.Method, r.RequestURI)\n\t\tnext(w, r)\n\t}\n}", "func LogMiddleware(logger logrus.FieldLogger) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tlog := &logReq{\n\t\t\tURI: c.Request.URL.Path,\n\t\t\tMethod: c.Request.Method,\n\t\t\tIP: c.ClientIP(),\n\t\t\tContentType: c.ContentType(),\n\t\t\tAgent: c.Request.Header.Get(\"User-Agent\"),\n\t\t}\n\n\t\t// format is string\n\t\toutput := fmt.Sprintf(\"%s %s %s %s %s\",\n\t\t\tlog.Method,\n\t\t\tlog.URI,\n\t\t\tlog.IP,\n\t\t\tlog.ContentType,\n\t\t\tlog.Agent,\n\t\t)\n\n\t\t// TODO: Use logger\n\t\tlogger.Debug(output)\n\n\t\tc.Next()\n\t}\n}", "func Logger() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tstart := time.Now()\n\t\tpath := c.Request.URL.Path\n\t\trequestID := GetRequestID(c)\n\t\trequestLog.Info(fmt.Sprintf(\"Incomming request: %s %s\", c.Request.Method, path), zap.String(\"requestId\", requestID))\n\n\t\tc.Next()\n\n\t\tend := time.Now()\n\t\tlatency := end.Sub(start)\n\n\t\trequestLog.Info(fmt.Sprintf(\"Outgoing request: %s %s\", c.Request.Method, path),\n\t\t\tzap.Int(\"status\", c.Writer.Status()),\n\t\t\tzap.String(\"requestId\", requestID),\n\t\t\tzap.Duration(\"latency\", latency))\n\t}\n}", "func Logger(next http.Handler, name string) http.Handler {\n\t// returns handler function\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// start time\n\t\tstart := time.Now()\n\n\t\t// run the handler\n\t\tnext.ServeHTTP(w, r)\n\n\t\t// log the request details\n\t\tlog.Printf(\n\t\t\t\"%s\\t%s\\t%s\\t%s\",\n\t\t\tr.Method,\n\t\t\tr.RequestURI,\n\t\t\tname,\n\t\t\ttime.Since(start),\n\t\t)\n\t})\n}", "func Logging(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"%s%s %s\", r.Host, r.RequestURI, r.UserAgent())\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func Logger(route routing.Route) Adapter {\n\treturn func(h http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tstart := time.Now()\n\n\t\t\t// server http\n\t\t\th.ServeHTTP(w, r)\n\n\t\t\t// by doing this the log happen after all http request\n\t\t\tlog.Printf(\n\t\t\t\t\"%s\\t%s\\t%s\\t%s\",\n\t\t\t\troute.Method,\n\t\t\t\troute.Pattern,\n\t\t\t\troute.Name,\n\t\t\t\ttime.Since(start),\n\t\t\t)\n\t\t})\n\t}\n}", "func RequestLogger(h http.Handler) http.Handler {\n\tf := func(w http.ResponseWriter, r *http.Request) {\n\t\trw := &responseWriter{w, 0}\n\t\th.ServeHTTP(rw, r)\n\t\tlog.EntryFromContext(r.Context()).WithFields(logrus.Fields{\n\t\t\t\"http-status\": fmtResponseCode(rw.statusCode),\n\t\t}).Infof(\"incoming request\")\n\t}\n\treturn http.HandlerFunc(f)\n}", "func Logger(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tinfo := newLogInfo(r)\n\t\tww := middleware.NewWrapResponseWriter(w, r.ProtoMajor)\n\t\tt1 := time.Now()\n\n\t\tdefer func() {\n\t\t\tinfo.Write(ww.Status(), ww.BytesWritten(), time.Since(t1))\n\t\t}()\n\n\t\tnext.ServeHTTP(ww, withLogInfo(r, info))\n\t})\n}", "func HTTPLogger(inner http.Handler, name string) http.Handler {\n return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n start := time.Now()\n\n inner.ServeHTTP(w, r)\n\n Logs.Infof(\n \"%s\\t%s\\t%s\\t%s\",\n r.Method,\n r.RequestURI,\n name,\n time.Since(start),\n )\n })\n}", "func Middleware() func(next echo.HandlerFunc) echo.HandlerFunc {\n\tl := logger.New()\n\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) error {\n\t\t\t// Record the time start time of the middleware invocation\n\t\t\tt1 := time.Now()\n\n\t\t\t// Generate a new UUID that will be used to recognize this particular\n\t\t\t// request\n\t\t\tid, err := uuid.NewV4()\n\t\t\tif err != nil {\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\n\t\t\t// Create a child logger with the unique UUID created and attach it to\n\t\t\t// the echo.Context. By attaching it to the context it can be fetched by\n\t\t\t// later middleware or handler functions to emit events with a logger\n\t\t\t// that contains this ID. This is useful as it allows us to emit all\n\t\t\t// events with the same request UUID.\n\t\t\tlog := l.ID(id.String())\n\t\t\tc.Set(key, log)\n\n\t\t\t// Execute the next middleware/handler function in the stack.\n\t\t\tif err := next(c); err != nil {\n\t\t\t\tc.Error(err)\n\t\t\t}\n\n\t\t\t// We have now succeeded executing all later middlewares in the stack and\n\t\t\t// have come back to the logger middleware. Record the time at which we\n\t\t\t// came back to this middleware. We can use the difference between t2 and\n\t\t\t// t1 to calculate the request duration.\n\t\t\tt2 := time.Now()\n\n\t\t\t// Get the request IP address.\n\t\t\tvar ipAddress string\n\t\t\tif xff := c.Request().Header.Get(\"x-forwarded-for\"); xff != \"\" {\n\t\t\t\tsplit := strings.Split(xff, \",\")\n\t\t\t\tipAddress = strings.TrimSpace(split[len(split)-1])\n\t\t\t} else {\n\t\t\t\tipAddress = c.Request().RemoteAddr\n\t\t\t}\n\n\t\t\t// Emit a log event with as much metadata as we can.\n\t\t\tlog.Root(logger.Data{\n\t\t\t\t\"status_code\": c.Response().Status,\n\t\t\t\t\"method\": c.Request().Method,\n\t\t\t\t\"path\": c.Request().URL.Path,\n\t\t\t\t\"route\": c.Path(),\n\t\t\t\t\"response_time\": t2.Sub(t1).Seconds() * 1000,\n\t\t\t\t\"referer\": c.Request().Referer(),\n\t\t\t\t\"user_agent\": c.Request().UserAgent(),\n\t\t\t\t\"ip_address\": ipAddress,\n\t\t\t\t\"trace_id\": c.Request().Header.Get(\"x-amzn-trace-id\"),\n\t\t\t}).Info(\"request handled\")\n\n\t\t\t// Succeeded executing the middleware invocation. A nil response\n\t\t\t// represents no errors happened.\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func RequestLogging(n janice.HandlerFunc) janice.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) error {\n\t\tp := r.URL.String()\n\n\t\tvar err error\n\t\tm := httpsnoop.CaptureMetricsFn(w, func(ww http.ResponseWriter) {\n\t\t\terr = n(ww, r)\n\t\t})\n\n\t\tle := Logger.WithFields(logrus.Fields{\n\t\t\t\"type\": \"request\",\n\t\t\t\"host\": r.Host,\n\t\t\t\"method\": r.Method,\n\t\t\t\"path\": p,\n\t\t\t\"code\": m.Code,\n\t\t\t\"duration\": m.Duration.String(),\n\t\t\t\"written\": m.Written,\n\t\t})\n\n\t\tif rid, ok := GetRequestID(r); ok {\n\t\t\tle = le.WithField(\"request\", rid)\n\t\t}\n\n\t\tle.Info()\n\n\t\treturn err\n\t}\n}", "func Logging() Middleware {\n\n // Create the middleware\n return func(f http.HandlerFunc) http.HandlerFunc {\n\n // Define the middleware's behavior\n return func(w http.ResponseWriter, r *http.Request) {\n start := time.Now()\n defer func() { log.Println(r.URL.Path, time.Since(start)) }()\n\n // Call the next middleware in the chain\n f(w, r)\n }\n }\n}", "func RequestLogger(f httprouter.Handle) httprouter.Handle {\n\treturn httprouter.Handle(func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\t\tt := time.Now()\n\t\tf(w, r, p)\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"method\": r.Method,\n\t\t\t\"resource\": r.URL.Path,\n\t\t\t\"took\": fmt.Sprintf(\"%d%s\", time.Since(t).Nanoseconds()/1000000, \"ms\"),\n\t\t}).Info(fmt.Sprintf(\"%s %s\", r.Method, r.URL.Path))\n\t})\n}", "func Logging(rw http.ResponseWriter, req *http.Request, next http.HandlerFunc) {\n\tcurrent := time.Now()\n\n\ttempQueryStr, err := json.Marshal(req.URL.Query())\n\tqueryStr := cleanRequestStr(tempQueryStr, err)\n\n\tbody, err := ioutil.ReadAll(req.Body)\n\tbodyStr := cleanRequestStr(body, err)\n\n\tnext(rw, req)\n\n\tres := rw.(negroni.ResponseWriter)\n\n\tfinished := time.Since(current)\n\n\tlog.Printf(\"{ \\\"User-Agent\\\": \\\"%s\\\", \\\"Url\\\": \\\"%s\\\", \\\"Host\\\": \\\"%s\\\", \\\"Uri\\\": \\\"%s\\\", \\\"Method\\\": \\\"%s\\\", \\\"Status Code\\\": %d, \\\"Query\\\": \\\"%s\\\", \\\"Body\\\": \\\"%s\\\", \\\"Response Time\\\": \\\"%s\\\" \\\"Content-Type\\\": \\\"%s\\\", \\\"Content-Length\\\": %d }\",\n\t\treq.Header.Get(\"User-Agent\"), req.Host+req.RequestURI, req.Host, req.RequestURI, req.Method, res.Status(), queryStr, bodyStr, finished, req.Header.Get(\"Content-Type\"), req.ContentLength)\n\n}", "func Logger(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\trec := statusRecorder{w, 200, time.Now()}\n\t\tnext(&rec, r)\n\t\tlogger.HTTPLogs(logger.PaintStatus(rec.status), fmt.Sprint(time.Since(rec.elapsed)), r.Host, logger.PaintMethod(r.Method), r.URL.Path)\n\t}\n}", "func RequestLogger(f LogFormatter) func(next http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\t\tentry := f.NewLogEntry(r)\n\t\t\tww := NewWrapResponseWriter(w, r.ProtoMajor)\n\n\t\t\tt1 := time.Now()\n\t\t\tdefer func() {\n\t\t\t\tentry.Write(ww.Status(), ww.BytesWritten(), ww.Header(), time.Since(t1), nil)\n\t\t\t}()\n\n\t\t\tnext.ServeHTTP(ww, WithLogEntry(r, entry))\n\t\t}\n\t\treturn http.HandlerFunc(fn)\n\t}\n}", "func LogMiddleware(typ string, opts LogOptions) func(next http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tvar message []byte\n\t\t\tvar err error\n\n\t\t\t// Read r.Body, and then replace with a fresh ReadCloser for the next handler\n\t\t\tif message, err = ioutil.ReadAll(r.Body); err != nil {\n\t\t\t\tmessage = []byte(\"<failed to read body: \" + err.Error() + \">\")\n\t\t\t}\n\t\t\t_ = r.Body.Close()\n\t\t\tr.Body = ioutil.NopCloser(bytes.NewBuffer(message))\n\n\t\t\tvar headers http.Header\n\t\t\tvar from string\n\t\t\tif opts.Headers {\n\t\t\t\theaders = r.Header\n\t\t\t}\n\t\t\tif opts.From {\n\t\t\t\tfrom = r.RemoteAddr\n\t\t\t}\n\t\t\tLogRequest(typ, r.Proto, r.Method, r.URL.String(), from, headers, message)\n\n\t\t\t// copy output of HTTP handler to our buffer for later logging\n\t\t\tww := middleware.NewWrapResponseWriter(w, r.ProtoMajor)\n\t\t\tvar buf *bytes.Buffer\n\t\t\tif opts.Response {\n\t\t\t\tbuf = new(bytes.Buffer)\n\t\t\t\tww.Tee(buf)\n\t\t\t}\n\n\t\t\t// print response afterwards\n\t\t\tvar resp []byte\n\t\t\tvar start time.Time\n\t\t\tdefer func() {\n\t\t\t\tif ww.Header().Get(\"Content-Type\") == \"text/event-stream\" {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif opts.Response && ww.BytesWritten() > 0 {\n\t\t\t\t\tresp = buf.Bytes()\n\t\t\t\t}\n\t\t\t\tif ww.Status() >= 400 {\n\t\t\t\t\tresp = nil // avoid printing stacktraces and SSE in response\n\t\t\t\t}\n\t\t\t\tvar hexencode bool\n\t\t\t\tif opts.EncodeBinary && ww.Header().Get(\"Content-Type\") != \"application/json\" {\n\t\t\t\t\thexencode = true\n\t\t\t\t}\n\t\t\t\tLogResponse(ww.Status(), time.Since(start), hexencode, resp)\n\t\t\t}()\n\n\t\t\t// start timer and preform request\n\t\t\tstart = time.Now()\n\t\t\tnext.ServeHTTP(ww, r)\n\t\t})\n\t}\n}", "func SetMiddlewareLogger(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"\\n%s %s%s %s\", r.Method, r.Host, r.RequestURI, r.Proto)\n\t\tnext(w, r)\n\t}\n}", "func (mw *MiddleWareContext) MLog(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tbegin := time.Now()\n\n\t\tdelegate := &responseWriterDelegator{ResponseWriter: w}\n\t\trw := delegate\n\n\t\tnext.ServeHTTP(rw, r) // call original\n\n\t\tcode := sanitizeCode(delegate.status)\n\t\tmethod := sanitizeMethod(r.Method)\n\n\t\t// Throw into a go routine so it does not block, but probably is alreayd in a go routine...have to check\n\t\tgo log.Info().Str(\"uri\", r.RequestURI).Str(\"type\", \"request\").Str(\"method\", method).Str(\"response_time\", time.Since(begin).String()).Str(\"status\", code).Msg(\"\")\n\t})\n}", "func logger(inner http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// Mark time at which request was received.\n\t\tvar start = time.Now()\n\t\t// Handle request.\n\t\tinner.ServeHTTP(w, r)\n\t\t// Log request with time elapsed.\n\t\tlog.Printf(\"%s\\t%s\\t%s\", r.Method, r.RequestURI, time.Since(start))\n\t})\n}", "func Logger(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\t\t//wrap the response writer to get the status code\n\t\t//cant access status code from http.ResponseWriter\n\t\tnrw := NewResponseWriter(w)\n\t\t//call the next handler\n\t\tnext.ServeHTTP(nrw, r)\n\t\t//response := w.(ResponseWriter)\n\n\t\tls := &loggerStruct{\n\t\t\tStartTime: start.Format(time.RFC3339),\n\t\t\tStatus: nrw.Status(),\n\t\t\tDuration: time.Since(start),\n\t\t\tHostName: r.Host,\n\t\t\tMethod: r.Method,\n\t\t\tPath: r.URL.Path,\n\t\t}\n\n\t\tt := template.New(\"logger_template\")\n\t\ttem := template.Must(t.Parse(loggerTemplate))\n\n\t\tbuf := &bytes.Buffer{}\n\t\ttem.Execute(buf, ls)\n\n\t\tlog.Println(html.UnescapeString(buf.String()))\n\t})\n}", "func Logger(router *chi.Mux) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Println(time.Now(), r.Method, r.URL) //example: 2020-07-06 09:32:44.634333 +0200 CEST m=+22.456178180 GET /posts\n\t\trouter.ServeHTTP(w, r) // dispatch the request, we are dispatching the request to the router, which is who has the handlers to serve the request\n\t})\n}", "func Logging(opts ...Option) gin.HandlerFunc {\n\to := defaultOptions()\n\to.apply(opts...)\n\n\treturn func(c *gin.Context) {\n\t\tstart := time.Now()\n\n\t\t// 忽略打印指定的路由\n\t\tif _, ok := o.ignoreRoutes[c.Request.URL.Path]; ok {\n\t\t\tc.Next()\n\t\t\treturn\n\t\t}\n\n\t\t// 处理前打印输入信息\n\t\tbuf := bytes.Buffer{}\n\t\t_, _ = buf.ReadFrom(c.Request.Body)\n\n\t\tfields := []zap.Field{\n\t\t\tzap.String(\"method\", c.Request.Method),\n\t\t\tzap.String(\"url\", c.Request.URL.String()),\n\t\t}\n\t\tif c.Request.Method == http.MethodPost || c.Request.Method == http.MethodPut || c.Request.Method == http.MethodPatch || c.Request.Method == http.MethodDelete {\n\t\t\tfields = append(fields,\n\t\t\t\tzap.Int(\"size\", buf.Len()),\n\t\t\t\tzap.String(\"body\", getBodyData(&buf, o.maxLength)),\n\t\t\t)\n\t\t}\n\t\treqID := \"\"\n\t\tif o.requestIDFrom == 1 {\n\t\t\treqID = c.Request.Header.Get(o.requestIDName)\n\t\t\tfields = append(fields, zap.String(o.requestIDName, reqID))\n\t\t} else if o.requestIDFrom == 2 {\n\t\t\tif v, isExist := c.Get(o.requestIDName); isExist {\n\t\t\t\tif requestID, ok := v.(string); ok {\n\t\t\t\t\treqID = requestID\n\t\t\t\t\tfields = append(fields, zap.String(o.requestIDName, reqID))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\to.log.Info(\"<<<<\", fields...)\n\n\t\tc.Request.Body = io.NopCloser(&buf)\n\n\t\t// 替换writer\n\t\tnewWriter := &bodyLogWriter{body: &bytes.Buffer{}, ResponseWriter: c.Writer}\n\t\tc.Writer = newWriter\n\n\t\t// 处理请求\n\t\tc.Next()\n\n\t\t// 处理后打印返回信息\n\t\tfields = []zap.Field{\n\t\t\tzap.Int(\"code\", c.Writer.Status()),\n\t\t\tzap.String(\"method\", c.Request.Method),\n\t\t\tzap.String(\"url\", c.Request.URL.Path),\n\t\t\tzap.Int64(\"time_us\", time.Since(start).Nanoseconds()/1000),\n\t\t\tzap.Int(\"size\", newWriter.body.Len()),\n\t\t\tzap.String(\"response\", strings.TrimRight(getBodyData(newWriter.body, o.maxLength), \"\\n\")),\n\t\t}\n\t\tif o.requestIDName != \"\" {\n\t\t\tfields = append(fields, zap.String(o.requestIDName, reqID))\n\t\t}\n\t\to.log.Info(\">>>>\", fields...)\n\t}\n}", "func LogRequests(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\n\t\tnext.ServeHTTP(w, r)\n\n\t\tlog.Printf(\n\t\t\t\"[%s]\\t%s\\t%s\",\n\t\t\tr.Method,\n\t\t\tr.URL.String(),\n\t\t\ttime.Since(start),\n\t\t)\n\t})\n}", "func LoggingMiddleware(log *logrus.Logger) func(http.Handler) http.Handler {\n\treturn func(handler http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tr = r.WithContext(context.New(r.Context()))\n\n\t\t\t// https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry#HttpRequest\n\t\t\trequest := &HTTPRequest{\n\t\t\t\tRequestMethod: r.Method,\n\t\t\t\tRequestURL: r.RequestURI,\n\t\t\t\tRemoteIP: r.RemoteAddr,\n\t\t\t\tReferer: r.Referer(),\n\t\t\t\tUserAgent: r.UserAgent(),\n\t\t\t\tRequestSize: strconv.FormatInt(r.ContentLength, 10),\n\t\t\t}\n\n\t\t\tm := httpsnoop.CaptureMetrics(handler, w, r)\n\n\t\t\trequest.Status = strconv.Itoa(m.Code)\n\t\t\trequest.Latency = fmt.Sprintf(\"%.9fs\", m.Duration.Seconds())\n\t\t\trequest.ResponseSize = strconv.FormatInt(m.Written, 10)\n\n\t\t\tfields := logrus.Fields{\"httpRequest\": request}\n\n\t\t\t// No idea if this works\n\t\t\ttraceHeader := r.Header.Get(\"X-Cloud-Trace-Context\")\n\t\t\tif traceHeader != \"\" {\n\t\t\t\tfields[\"trace\"] = traceHeader\n\t\t\t}\n\n\t\t\tlog.WithFields(fields).Info(\"Completed request\")\n\t\t})\n\t}\n}", "func (h Handlers) LoggingHandler(next http.Handler) http.Handler {\r\n\tfn := func(w http.ResponseWriter, r *http.Request) {\r\n\t\tvar t1, t2 time.Time\r\n\t\tif h.debugmode {\r\n\t\t\tlog.Println(\"############ SERVING REQUEST: \" + r.Method + \" \" + r.URL.String())\r\n\t\t\tlog.Println(\"FULL REQUEST: \")\r\n\t\t\t// Save a copy of this request for debugging.\r\n\t\t\trequestDump, err := httputil.DumpRequest(r, false)\r\n\t\t\tif err != nil {\r\n\t\t\t\tlog.Println(err)\r\n\t\t\t}\r\n\t\t\tlog.Println(string(requestDump))\r\n\t\t\tt1 = time.Now()\r\n\t\t}\r\n\t\tnext.ServeHTTP(w, r)\r\n\t\tif h.debugmode {\r\n\t\t\tt2 = time.Now()\r\n\t\t\tlog.Printf(\"############ DONE SERVING [%s] %q in %v\\n\", r.Method, r.URL.String(), t2.Sub(t1))\r\n\t\t}\r\n\t}\r\n\treturn http.HandlerFunc(fn)\r\n}", "func GinLogger(log *logrus.Logger) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\n\t\t// other handler can change c.Path so:\n\t\tpath := c.Request.URL.Path\n\t\tmethod := c.Request.Method\n\t\tstart := time.Now()\n\t\tc.Next()\n\t\tstop := time.Since(start)\n\t\tlatency := int(math.Ceil(float64(stop.Nanoseconds()) / 1000000.0))\n\t\tstatusCode := c.Writer.Status()\n\t\tclientIP := c.ClientIP()\n\t\tclientUserAgent := c.Request.UserAgent()\n\t\treferer := c.Request.Referer()\n\t\trequestS := c.Request.ContentLength\n\t\tresponseS := c.Writer.Size()\n\t\tif requestS < 0 {\n\t\t\trequestS = 0\n\t\t}\n\t\trequest := &HTTPRequest{\n\t\t\tRequestMethod: method,\n\t\t\tRequestURL: path,\n\t\t\tRemoteIP: clientIP,\n\t\t\tReferer: referer,\n\t\t\tUserAgent: clientUserAgent,\n\t\t\tResponseSize: strconv.Itoa(responseS),\n\t\t\tLatency: strconv.Itoa(latency),\n\t\t\tStatus: strconv.Itoa(statusCode),\n\t\t\tRequestSize: strconv.FormatInt(requestS, 10),\n\t\t}\n\n\t\tfields := logrus.Fields{\"httpRequest\": request}\n\n\t\ttraceHeader := c.GetHeader(\"X-Request-ID\")\n\t\tif traceHeader != \"\" {\n\t\t\tfields[\"trace\"] = traceHeader\n\t\t}\n\n\t\tentry := log.WithFields(fields)\n\n\t\tif len(c.Errors) > 0 {\n\t\t\tentry.Error(c.Errors.ByType(gin.ErrorTypePrivate).String())\n\t\t} else {\n\t\t\tmsg := fmt.Sprintf(\"[%s - %s] %d\", c.Request.Method, path, statusCode)\n\t\t\tif statusCode > 399 {\n\t\t\t\tentry.Error(msg)\n\t\t\t} else {\n\t\t\t\tentry.Info(msg)\n\t\t\t}\n\t\t}\n\t}\n}", "func LoggingMiddleware() Middleware {\n\treturn func(h http.Handler) http.Handler {\n\n\t\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\t\tfmt.Println(\"...Before LoggingMiddleware\")\n\t\t\tstart := time.Now().UnixNano()\n\t\t\th.ServeHTTP(w, r)\n\t\t\tend := time.Now().UnixNano()\n\t\t\tfmt.Printf(\"start-time(ns): %d end-time(ns): %d duration(ns):%d \\\"%s %s\\\"\\n\", start, end,(end-start),r.Method, r.RequestURI)\n\t\t\tfmt.Println(\"...After LoggingMiddleware\")\n\t\t}\n\n\t\treturn http.HandlerFunc(fn)\n\t}\n}", "func Log(h httprouter.Handle) httprouter.Handle {\n\treturn func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"host\": r.Host,\n\t\t\t\"address\": r.RemoteAddr,\n\t\t\t\"method\": r.Method,\n\t\t\t\"requestURI\": r.RequestURI,\n\t\t\t\"proto\": r.Proto,\n\t\t\t\"useragent\": r.UserAgent(),\n\t\t\t\"x-request-id\": r.Header.Get(\"X-Request-ID\"),\n\t\t}).Info(\"HTTP request\")\n\n\t\th(w, r, ps)\n\t}\n}", "func withRequestLog(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\treqDump, err := httputil.DumpRequest(r, true)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t} else {\n\t\t\tlog.Println(string(reqDump))\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\t}\n}", "func responseLogger(handler http.HandlerFunc) http.HandlerFunc {\n\treturn func (w http.ResponseWriter, r *http.Request){\n\t\tlog.Printf(\"\\n%s %s%s %s\",r.Method, r.Host, r.RequestURI, r.Proto )\n\t\thandler(w,r)\n\t}\n}", "func (handler *Handler) RequestLogger(targetMux http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\n\t\ttargetMux.ServeHTTP(w, r)\n\n\t\t// log request by who(IP address)\n\t\trequesterIP := r.RemoteAddr\n\n\t\tlog.Printf(\n\t\t\t\"%s\\t%s\\t%s\\t%v\",\n\t\t\tr.Method,\n\t\t\tr.RequestURI,\n\t\t\trequesterIP,\n\t\t\ttime.Since(start),\n\t\t)\n\t})\n}", "func Log(handler http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {\n\t\tfmt.Println(req)\n\t\thandler.ServeHTTP(res, req)\n\t})\n}", "func (m Middleware) Log(r *http.Request, err error) {\n\tif r == nil {\n\t\tr = &http.Request{}\n\t}\n\tentry := logEntry{\n\t\tTime: m.Now(),\n\t\tPackage: pkg,\n\t\tMethod: r.Method,\n\t\tUserAgent: r.UserAgent(),\n\t\tRemoteAddr: firstNonEmpty(r.Header.Get(\"X-Forwarded-For\"), r.RemoteAddr),\n\t}\n\tif r.URL != nil {\n\t\tentry.URL = r.URL.String()\n\t}\n\tif err != nil {\n\t\tentry.Error = err.Error()\n\t}\n\tbytes, err := json.Marshal(entry)\n\tif err != nil {\n\t\tm.Logger(`{ \"pkg\": \"watchman\", \"err\": \"json logger error\" }` + \"\\n\")\n\t\treturn\n\t}\n\tm.Logger(string(bytes) + \"\\n\")\n}", "func Logging() Middleware {\n\n\t// Create a new Middleware\n\treturn func(f http.HandlerFunc) http.HandlerFunc {\n\n\t\t// Define the http.HandlerFunc\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\t\t// Do middleware things\n\t\t\tstart := time.Now()\n\t\t\tdefer func() { log.Println(r.URL.Path, time.Since(start)) }()\n\n\t\t\t// Call the next middleware/handler in chain\n\t\t\tf(w, r)\n\t\t}\n\t}\n}", "func Logger(inner http.HandlerFunc, name string) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\n\t\tinner.ServeHTTP(w, r)\n\n\t\tlog.Printf(\n\t\t\t\"%s\\t%s\\t%s\\t%s\",\n\t\t\tr.Method,\n\t\t\tr.RequestURI,\n\t\t\tname,\n\t\t\ttime.Since(start),\n\t\t)\n\t})\n}", "func Logger(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"[%s] [%s] %s\", r.RemoteAddr, r.Method, r.URL.String())\n\t\th.ServeHTTP(w, r)\n\t})\n}", "func (l *loggerMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\tstart := time.Now()\n\tmethod := r.Method\n\turl := r.URL.String()\n\twn := w.(negroni.ResponseWriter)\n\tl.logger.WithFields(logrus.Fields{\n\t\t\"Client\": r.RemoteAddr,\n\t\t\"Method\": method,\n\t\t\"URL\": url,\n\t\t\"Referrer\": r.Referer(),\n\t\t\"User-Agent\": r.UserAgent(),\n\t}).Infof(\"Request\")\n\n\tnext(w, r)\n\n\tl.logger.WithFields(logrus.Fields{\n\t\t\"Method\": method,\n\t\t\"URL\": url,\n\t\t\"StatusCode\": wn.Status(),\n\t\t\"Size\": wn.Size(),\n\t\t\"Duration\": int64(time.Since(start) / time.Millisecond),\n\t}).Infof(\"Response\")\n}", "func RequestLogger(targetMux http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\n\t\ttargetMux.ServeHTTP(w, r)\n\n\t\t//log request by who(IP address)\n\t\trequesterIP := r.RemoteAddr\n\n\t\tlogger.Infow(\"Loaded page\",\n\t\t\t\"Method\", r.Method,\n\t\t\t\"RequestURI\", r.RequestURI,\n\t\t\t\"RequesterIP\", requesterIP,\n\t\t\t\"Time\", time.Since(start),\n\t\t)\n\t})\n}", "func requestLogger(l *log.Logger) Wrapper {\n\treturn func(fn http.HandlerFunc) http.HandlerFunc {\n\t\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\t\trid, ok := req.Context().Value(\"rid\").(string)\n\t\t\tif !ok {\n\t\t\t\trid = \"none\"\n\t\t\t}\n\n\t\t\tl.Printf(\"[%s] Before\", rid)\n\n\t\t\tstart := time.Now()\n\t\t\tfn(w, req)\n\t\t\tl.Printf(\"[%s] Finished in %s\", rid, time.Since(start))\n\t\t}\n\t}\n}", "func WithLogger(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tzap.L().Info(\n\t\t\t\"request\",\n\t\t\tzap.String(\"method\", r.Method),\n\t\t\tzap.String(\"path\", r.URL.Path),\n\t\t\tzap.Any(\"query\", r.URL.Query()),\n\t\t\tzap.Any(\"headers\", r.Header),\n\t\t\tzap.Int64(\"body\", r.ContentLength),\n\t\t)\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func Logger(log *logger.Logger) gin.HandlerFunc {\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\thostname = \"unknow\"\n\t}\n\n\tgin.Logger()\n\n\treturn func(c *gin.Context) {\n\t\tpath := c.Request.URL.Path\n\t\tstart := time.Now()\n\t\trequestTime := start.Format(timeFormat)\n\n\t\tc.Next()\n\n\t\tstop := time.Now()\n\t\tlatency := stop.Sub(start)\n\t\tstatusCode := c.Writer.Status()\n\t\tclientIP := c.ClientIP()\n\t\tclientUserAgent := c.Request.UserAgent()\n\t\treferer := c.Request.Referer()\n\n\t\tdataLength := c.Writer.Size()\n\t\tif dataLength < 0 {\n\t\t\tdataLength = 0\n\t\t}\n\n\t\treqID := c.GetString(\"Request-ID\")\n\t\tmethod := c.Request.Method\n\n\t\tentry := log.WithFields(logrus.Fields{\n\t\t\t\"Hostname\": hostname,\n\t\t\t\"Path\": path,\n\t\t\t\"Date\": requestTime,\n\t\t\t\"Latency\": latency,\n\t\t\t\"Code\": statusCode,\n\t\t\t\"IP\": clientIP,\n\t\t\t\"User-Agent\": clientUserAgent,\n\t\t\t\"Referer\": referer,\n\t\t\t\"Data-Length\": dataLength,\n\t\t\t\"Request-ID\": reqID,\n\t\t\t\"Method\": method,\n\t\t})\n\n\t\tif len(c.Errors) > 0 {\n\t\t\tentry.Error(c.Errors.ByType(gin.ErrorTypePrivate).String())\n\t\t} else {\n\t\t\tmsg := \"HTTP Request\"\n\t\t\tif statusCode >= 500 {\n\t\t\t\tentry.Error(msg)\n\t\t\t} else if statusCode >= 400 {\n\t\t\t\tentry.Warn(msg)\n\t\t\t} else {\n\t\t\t\tentry.Info(msg)\n\t\t\t}\n\t\t}\n\t}\n}", "func Logger() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\t// before request\n\t\tt := time.Now()\n\t\tlog.Println(\"on before request\")\n\t\tc.Next()\n\t\tlog.Println(\"on after request\")\n\t\t// after request\n\t\tlatency := time.Since(t)\n\t\tlog.Println(\"*** Latency :\", latency.Milliseconds())\n\t}\n}", "func Logger(log *logger.Logger, h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\trespMetrics := httpsnoop.CaptureMetricsFn(w, func(ww http.ResponseWriter) {\n\t\t\th.ServeHTTP(ww, r)\n\t\t})\n\n\t\tctx := r.Context()\n\t\ttraceID, spanID := tracing.TraceInfo(ctx)\n\n\t\tlogFields := []interface{}{\n\t\t\t\"trace-id\", traceID,\n\t\t\t\"span-id\", spanID,\n\t\t\t\"http-method\", r.Method,\n\t\t\t\"remote-addr\", r.RemoteAddr,\n\t\t\t\"user-agent\", r.UserAgent(),\n\t\t\t\"uri\", r.URL.String(),\n\t\t\t\"status-code\", respMetrics.Code,\n\t\t\t\"elapsed\", fmt.Sprintf(\"%.9fs\", respMetrics.Duration.Seconds()),\n\t\t}\n\n\t\tswitch {\n\t\tcase respMetrics.Code >= 500:\n\t\t\tlog.Errorw(\"Request completed\", logFields...)\n\t\tdefault:\n\t\t\tlog.Debugw(\"Request completed\", logFields...)\n\t\t}\n\t})\n}", "func LogReq(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tnext.ServeHTTP(w, r)\n\t\turi := r.URL.String()\n\t\tmethod := r.Method\n\t\tlog.Println(\"^^\", method, uri)\n\t})\n}", "func AccessLogMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := timeutil.Now()\n\t\twriter := &loggingWriter{\n\t\t\tResponseWriter: w,\n\t\t\taccessStats: accessStats{},\n\t\t}\n\t\tdefer func() {\n\t\t\t// add access log\n\t\t\tlog := writer.accessStats\n\t\t\tpath := r.RequestURI\n\t\t\tunescapedPath, err := pathUnescapeFunc(path)\n\t\t\tif err != nil {\n\t\t\t\tunescapedPath = path\n\t\t\t}\n\t\t\t// http://httpd.apache.org/docs/1.3/logs.html?PHPSESSID=026558d61a93eafd6da3438bb9605d4d#common\n\t\t\tlogger.AccessLog.Info(realIP(r) + \" \" + strconv.Itoa(int(timeutil.Now()-start)) + \"ms\" +\n\t\t\t\t\" \\\"\" + r.Method + \" \" + unescapedPath + \" \" + r.Proto + \"\\\" \" +\n\t\t\t\tstrconv.Itoa(log.status) + \" \" + strconv.Itoa(log.size))\n\t\t\tpaths := strings.Split(unescapedPath, \"?\")\n\t\t\tif len(paths) > 0 {\n\t\t\t\tpath = paths[0]\n\t\t\t}\n\t\t\thttpHandleTimer.WithLabelValues(path, strconv.Itoa(log.status)).Observe(float64(timeutil.Now() - start))\n\t\t}()\n\t\tnext.ServeHTTP(writer, r)\n\t})\n}", "func Logger(l log.Logger) gin.HandlerFunc {\n return func(c *gin.Context) {\n // Start timer\n start := time.Now()\n path := c.Request.URL.Path\n\n // Process request\n c.Next()\n\n // Stop timer\n end := time.Now()\n latency := end.Sub(start)\n\n // clientIP := c.ClientIP()\n method := c.Request.Method\n statusCode := c.Writer.Status()\n comment := c.Errors.ByType(gin.ErrorTypePrivate).String()\n\n l.Info(\"Request\", \"method\", method, \"status\", statusCode, \"latency\", latency, \"path\", path, \"comment\", comment)\n }\n}", "func Logger(inner http.Handler, name string) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\n\t\tlog.Printf(\n\t\t\t\"START %s\\t%s\\t%s\\n%s\\n\\n\",\n\t\t\tr.Method,\n\t\t\tr.RequestURI,\n\t\t\tname,\n\t\t\tdumpRequest(r),\n\t\t)\n\n\t\tinner.ServeHTTP(w, r)\n\n\t\tlog.Printf(\n\t\t\t\"STOP %s\\t%s\\t%s\\t%s\\n\",\n\t\t\tr.Method,\n\t\t\tr.RequestURI,\n\t\t\tname,\n\t\t\ttime.Since(start),\n\t\t)\n\t})\n}", "func Logging(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tnw := new(ResWrtrCapturer)\n\t\tnw.StatusCode = 200\n\t\tnw.internal = w\n\t\tutil.VerboseRequest(r, \"Recieved: %+v\", r.Header)\n\t\tnext.ServeHTTP(nw, r)\n\t\tutil.VerboseRequest(r, \"Complete(%d): %+v\", nw.StatusCode, nw.Header())\n\t})\n}", "func Logger() echo.MiddlewareFunc {\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) error {\n\t\t\tstart := time.Now()\n\t\t\tvar err error\n\t\t\tif err = next(c); err != nil {\n\t\t\t\tc.Error(err)\n\t\t\t}\n\t\t\tstop := time.Now()\n\n\t\t\treq := c.Request()\n\t\t\tres := c.Response()\n\t\t\treqSize := req.Header.Get(echo.HeaderContentLength)\n\t\t\tif reqSize == \"\" {\n\t\t\t\treqSize = \"0\"\n\t\t\t}\n\n\t\t\tfields := log.Fields{\n\t\t\t\t\"path\": req.RequestURI,\n\t\t\t\t\"method\": req.Method,\n\t\t\t\t\"status\": res.Status,\n\t\t\t\t\"request_size\": reqSize,\n\t\t\t\t\"response_size\": res.Size,\n\t\t\t\t\"duration\": stop.Sub(start).String(),\n\t\t\t\t\"error\": err,\n\t\t\t}\n\n\t\t\tif err == nil {\n\t\t\t\tfields[\"error\"] = \"\"\n\t\t\t}\n\t\t\tlogger := c.Get(\"logger\").(*log.Entry)\n\t\t\tlogger.WithFields(fields).Info(\"request\")\n\n\t\t\treturn err\n\t\t}\n\t}\n}", "func AccessLogMiddleware() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tstart := time.Now()\n\t\tr := c.Request\n\t\tdefer func() {\n\t\t\t// add access log\n\t\t\tpath := r.RequestURI\n\t\t\tunescapedPath, err := pathUnescapeFunc(path)\n\t\t\tif err != nil {\n\t\t\t\tunescapedPath = path\n\t\t\t}\n\t\t\t// http://httpd.apache.org/docs/1.3/logs.html?PHPSESSID=026558d61a93eafd6da3438bb9605d4d#common\n\t\t\trequestInfo := realIP(r) + \" \" + strconv.Itoa(int(time.Since(start).Milliseconds())) + \"ms\" +\n\t\t\t\t\" \\\"\" + r.Method + \" \" + unescapedPath + \" \" + r.Proto + \"\\\" \" +\n\t\t\t\tstrconv.Itoa(c.Writer.Status()) + \" \" + strconv.Itoa(c.Writer.Size())\n\n\t\t\tr := recover()\n\t\t\tswitch {\n\t\t\tcase r != nil:\n\t\t\t\tlogger.AccessLog.Error(requestInfo, logger.Stack())\n\t\t\tcase len(c.Errors) > 0:\n\t\t\t\tlogger.AccessLog.Error(requestInfo, logger.Error(c.Errors[0].Err))\n\t\t\tdefault:\n\t\t\t\tlogger.AccessLog.Info(requestInfo)\n\t\t\t}\n\n\t\t\tpaths := strings.Split(unescapedPath, \"?\")\n\t\t\tif len(paths) > 0 {\n\t\t\t\tpath = paths[0]\n\t\t\t}\n\t\t\t// ignore admin web static js, css files\n\t\t\tif strings.HasPrefix(path, \"/api/\") {\n\t\t\t\thttHandlerTimerVec.\n\t\t\t\t\tWithTagValues(path, strconv.Itoa(c.Writer.Status())).\n\t\t\t\t\tUpdateSince(start)\n\t\t\t}\n\t\t}()\n\t\tc.Next()\n\t}\n}", "func LoggerMiddleware(handle func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\thandle(w, r)\n\t}\n}", "func logger(fn http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer log.Printf(\"%s - %s\", r.Method, r.URL)\n\t\tfn(w, r)\n\t}\n}", "func (c *Admission) requestLoggingMiddlerware() echo.MiddlewareFunc {\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(ctx echo.Context) error {\n\t\t\trequest := ctx.Request()\n\t\t\tadmission, err := httputil.DumpRequest(request, true)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\"error\": err.Error()}).Error(\"unable to read the request body\")\n\t\t\t}\n\t\t\tstart := time.Now()\n\t\t\tif err = next(ctx); err != nil {\n\t\t\t\tctx.Error(err)\n\t\t\t}\n\t\t\tstop := time.Now()\n\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"code\": ctx.Response().Status,\n\t\t\t\t\"host\": request.Host,\n\t\t\t\t\"method\": request.Method,\n\t\t\t\t\"request\": admission,\n\t\t\t\t\"time\": stop.Sub(start).String(),\n\t\t\t\t\"uri\": request.RequestURI,\n\t\t\t}).Info(\"http request\")\n\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func LogRequest(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\t// Start timer\n\t\tstart := time.Now()\n\n\t\t// Add a requestID field to logger\n\t\tuid, _ := ulid.NewFromTime(start)\n\t\tl := log.With(zap.String(\"requestID\", uid.String()))\n\t\t// Add logger to context\n\t\tctx := context.WithValue(r.Context(), requestIDKey, l)\n\t\t// Request with this new context.\n\t\tr = r.WithContext(ctx)\n\n\t\t// wrap the ResponseWriter\n\t\tlw := &basicWriter{ResponseWriter: w}\n\n\t\t// Get the real IP even behind a proxy\n\t\trealIP := r.Header.Get(http.CanonicalHeaderKey(\"X-Forwarded-For\"))\n\t\tif realIP == \"\" {\n\t\t\t// if no content in header \"X-Forwarded-For\", get \"X-Real-IP\"\n\t\t\tif xrip := r.Header.Get(http.CanonicalHeaderKey(\"X-Real-IP\")); xrip != \"\" {\n\t\t\t\trealIP = xrip\n\t\t\t} else {\n\t\t\t\trealIP = r.RemoteAddr\n\t\t\t}\n\t\t}\n\n\t\t// Process request\n\t\tnext.ServeHTTP(lw, r)\n\t\tlw.maybeWriteHeader()\n\n\t\t// Stop timer\n\t\tend := time.Now()\n\t\tlatency := end.Sub(start)\n\t\tstatusCode := lw.Status()\n\n\t\tl.Info(\"request\",\n\t\t\tzap.String(\"method\", r.Method),\n\t\t\tzap.String(\"url\", r.RequestURI),\n\t\t\tzap.Int(\"code\", statusCode),\n\t\t\tzap.String(\"clientIP\", realIP),\n\t\t\tzap.Int(\"bytes\", lw.bytes),\n\t\t\tzap.Int64(\"duration\", int64(latency)/int64(time.Microsecond)),\n\t\t)\n\t}\n\n\treturn http.HandlerFunc(fn)\n}", "func (ws *Server) addLogger(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tgo func() {\n\t\t\tws.Logger.TMsg(\"client %s %s %s\", r.RemoteAddr, r.Method, r.RequestURI)\n\t\t}()\n\t\tnext.ServeHTTP(w, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}", "func ReqLogger(rw *http.ResponseWriter, responseStatus *int, URL *url.URL, Method string, start *time.Time) {\n\tif *responseStatus != 200 && *responseStatus != 308 {\n\t\t(*rw).WriteHeader(*responseStatus)\n\t}\n\tlog.Printf(\"%s %s %d %s\\n\", Method, URL, *responseStatus, time.Since(*start))\n}" ]
[ "0.7804974", "0.7682003", "0.74303913", "0.7404096", "0.73647726", "0.7348642", "0.7313029", "0.73093975", "0.7300574", "0.7281842", "0.72493076", "0.7230931", "0.72171444", "0.720737", "0.7204911", "0.7202087", "0.7200994", "0.71565276", "0.7132212", "0.712089", "0.7096644", "0.70901865", "0.70870316", "0.70853025", "0.7073343", "0.7049731", "0.70346963", "0.7023409", "0.70223564", "0.7017762", "0.69961673", "0.6994703", "0.69891083", "0.6960618", "0.69389856", "0.693322", "0.6926344", "0.6918546", "0.6893594", "0.68859684", "0.68848395", "0.68654495", "0.6861975", "0.6860598", "0.68558615", "0.6837197", "0.68322664", "0.6820903", "0.6817531", "0.6797521", "0.6786301", "0.67838", "0.67736554", "0.67715514", "0.67658544", "0.6747795", "0.6704098", "0.66953105", "0.6684576", "0.66840225", "0.6683503", "0.66705835", "0.66647625", "0.6661499", "0.6652963", "0.66527736", "0.6643757", "0.6641439", "0.66390336", "0.6634291", "0.6633255", "0.6626395", "0.6623355", "0.6621085", "0.66209483", "0.6611518", "0.66072196", "0.6606332", "0.6568006", "0.6567539", "0.65656126", "0.6563678", "0.6550092", "0.6543429", "0.65429807", "0.65427923", "0.65397906", "0.65361315", "0.6534326", "0.6530943", "0.6530012", "0.6528568", "0.6522043", "0.65180063", "0.65168715", "0.65133774", "0.6512329", "0.64982283", "0.64979345", "0.6486205" ]
0.672749
56
TrustAnchorString convert a TrustAnchor to a string encoded as XML.
func TrustAnchorString(t []*TrustAnchor) string { xta := new(XMLTrustAnchor) xta.KeyDigest = make([]*XMLKeyDigest, 0) for _, ta := range t { xta.Id = ta.Id // Sets the everytime, but that is OK. xta.Source = ta.Source xta.Zone = ta.Anchor.Hdr.Name xkd := new(XMLKeyDigest) xkd.Id = ta.AnchorId xkd.ValidFrom = ta.ValidFrom.Format("2006-01-02T15:04:05-07:00") if !ta.ValidUntil.IsZero() { xkd.ValidUntil = ta.ValidUntil.Format("2006-01-02T15:04:05-07:00") } xkd.KeyTag = ta.Anchor.KeyTag xkd.Algorithm = ta.Anchor.Algorithm xkd.DigestType = ta.Anchor.DigestType xkd.Digest = ta.Anchor.Digest xta.KeyDigest = append(xta.KeyDigest, xkd) } b, _ := xml.MarshalIndent(xta, "", "\t") return string(b) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (me TxsdPresentationAttributesTextContentElementsTextAnchor) ToXsdtString() xsdt.String {\n\treturn xsdt.String(me)\n}", "func (me TxsdPresentationAttributesTextContentElementsTextAnchor) String() string {\n\treturn xsdt.String(me).String()\n}", "func (s TlsValidationContextAcmTrust) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s VirtualGatewayTlsValidationContextAcmTrust) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s TlsValidationContextTrust) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ListenerTlsAcmCertificate) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o LookupCustomKeyStoreResultOutput) TrustAnchorCertificate() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupCustomKeyStoreResult) string { return v.TrustAnchorCertificate }).(pulumi.StringOutput)\n}", "func (s AssertionRule) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ListenerTlsCertificate) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s VirtualGatewayTlsValidationContextTrust) String() string {\n\treturn awsutil.Prettify(s)\n}", "func getTrustAnchor() (*trustAnchor, error) {\n\tresp, err := http.Get(trustAnchorURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"bad http response: %d\", resp.StatusCode)\n\t}\n\tbyteValue, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar anchor trustAnchor\n\tif err := xml.Unmarshal(byteValue, &anchor); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &anchor, nil\n}", "func (s NewAssertionRule) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (c X509AttributeCertificate) String() string {\n\treturn base64.StdEncoding.EncodeToString(c.Encoded)\n}", "func (s VirtualGatewayListenerTlsAcmCertificate) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (c certificate) String() string {\n\tb, err := asn1.Marshal(c)\n\tif err != nil {\n\t\treturn fmt.Sprintf(\"Failed marshaling cert: %v\", err)\n\t}\n\tblock := &pem.Block{\n\t\tBytes: b,\n\t\tType: \"CERTIFICATE\",\n\t}\n\tb = pem.EncodeToMemory(block)\n\treturn string(b)\n}", "func (n *Netconf) ToXMLString() (string, error) {\n\tdoc := etree.NewDocument()\n\toperation := doc.CreateElement(n.Operation)\n\tswitch n.Operation {\n\tcase \"get-config\":\n\t\tsource := operation.CreateElement(\"source\")\n\t\tif n.Source != nil {\n\t\t\tsource.CreateElement(*n.Source)\n\t\t} else {\n\t\t\tsource.CreateElement(\"running\")\n\t\t}\n\t\taddFilterIfPresent(n, operation)\n\tcase \"get\":\n\t\taddFilterIfPresent(n, operation)\n\tcase \"edit-config\":\n\t\tsource := operation.CreateElement(\"target\")\n\t\tif n.Target != nil {\n\t\t\tsource.CreateElement(*n.Target)\n\t\t} else {\n\t\t\tsource.CreateElement(\"running\")\n\t\t}\n\t\tconfig := operation.CreateElement(\"config\")\n\t\tif n.Config != nil {\n\t\t\tinner := etree.NewDocument()\n\t\t\terr := inner.ReadFromString(*n.Config)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Config data is not valid xml\")\n\t\t\t}\n\t\t\tconfig.AddChild(inner.Root().Copy())\n\t\t}\n\tdefault:\n\t\treturn \"\", errors.New(n.Operation + \" is not a supported operation\")\n\n\t}\n\treturn doc.WriteToString()\n}", "func (s AssociateTrustStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CACertificate) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s TlsValidationContextFileTrust) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ListenerTls) String() string {\n\treturn awsutil.Prettify(s)\n}", "func EncodedString(data interface{}) (string, error) {\n\tsaml, err := String(data)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tb64XML := EncodeString([]byte(saml))\n\treturn b64XML, nil\n}", "func (s AssociateTrustStoreInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s GetTrustStoreCertificateOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DescribeTrustsOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ListenerTlsFileCertificate) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateTrustStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (attr *EdgeAttribute) convertToString() error {\n\tswitch attr.Value.(type) {\n\tcase string:\n\t\treturn nil\n\t}\n\treturn attr.errInvalidEdgeAttribute()\n}", "func (s AutoMLResolvedAttributes) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s TopicRule) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ActivatedRule) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ResolverRuleConfig) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ResolverRuleConfig) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ResolverRuleConfig) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ResolverRuleAssociation) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ResolverRuleAssociation) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ResolverRuleAssociation) String() string {\n\treturn awsutil.Prettify(s)\n}", "func AstToString(a *Ast) (string, error) {\n\texpr := a.Expr()\n\tinfo := a.SourceInfo()\n\treturn parser.Unparse(expr, info)\n}", "func Anchorize(text string) string {\n\treturn string(anchorize([]byte(text), false))\n}", "func (s VirtualGatewayListenerTlsCertificate) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ResolverRule) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ResolverRule) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ResolverRule) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s PublishFindingToSnsParams) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s GetTrustStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s UpdateTrustStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s TopicRulePayload) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s VirtualGatewayTlsValidationContextFileTrust) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateLoadBalancerTlsCertificateOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Certificate) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Certificate) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Certificate) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CertificateSummary) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s AwsWafWebAclRule) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s AutoMLSecurityConfig) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s GetTrustStoreCertificateInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (n *Node) toxstring() string {\n\treturn strings.Replace(n.Nodestr, \"/\", \"+\", -1)\n}", "func (s DescribeTrustsInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s AssociateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s AssociateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s AssociateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func ReadTrustAnchor(q io.Reader) ([]*TrustAnchor, error) {\n\td := xml.NewDecoder(q)\n\tt := new(XMLTrustAnchor)\n\tif e := d.Decode(t); e != nil {\n\t\treturn nil, e\n\t}\n\tta := make([]*TrustAnchor, 0)\n\tvar err error\n\tfor _, digest := range t.KeyDigest {\n\t\tt1 := new(TrustAnchor)\n\t\tt1.Id = t.Id\n\t\tt1.Source = t.Source\n\t\tt1.AnchorId = digest.Id\n\t\tif t1.ValidFrom, err = time.Parse(\"2006-01-02T15:04:05-07:00\", digest.ValidFrom); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif digest.ValidUntil != \"\" {\n\t\t\tif t1.ValidUntil, err = time.Parse(\"2006-01-02T15:04:05-07:00\", digest.ValidUntil); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\td := new(RR_DS)\n\t\td.Hdr = RR_Header{Name: t.Zone, Class: ClassINET, Rrtype: TypeDS}\n\t\td.KeyTag = digest.KeyTag\n\t\td.Algorithm = digest.Algorithm\n\t\td.DigestType = digest.DigestType\n\t\td.Digest = digest.Digest\n\t\tt1.Anchor = d\n\t\t// Some checks here too?\n\t\tta = append(ta, t1)\n\t}\n\treturn ta, nil\n}", "func (me TLinkTargetType) ToXsdtString() xsdt.String { return xsdt.String(me) }", "func (s LookupAttribute) String() string {\n\treturn awsutil.Prettify(s)\n}", "func EncodedSignedString(data interface{}, privateKeyPath string) (string, error) {\n\tsigned, err := SignedString(data, privateKeyPath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tb64XML := EncodeString([]byte(signed))\n\treturn b64XML, nil\n}", "func (s RuleTriggerEventSource) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (a Address) ToString() string {\n\treturn base64.StdEncoding.EncodeToString(a)\n}", "func (s UpdateCACertificateParams) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (a AppliedConstraint) String() string {\n\treturn a.Constraint.String(a.Installable.Identifier())\n}", "func (s AssociationSummary) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s AssertionRuleUpdate) String() string {\n\treturn awsutil.Prettify(s)\n}", "func ConvertAclToXml(input AccessControlPolicy, returnMd5 bool, isObs bool) (data string, md5 string) {\n\txml := make([]string, 0, 4+len(input.Grants))\n\townerID := XmlTranscoding(input.Owner.ID)\n\txml = append(xml, fmt.Sprintf(\"<AccessControlPolicy><Owner><ID>%s</ID>\", ownerID))\n\tif !isObs && input.Owner.DisplayName != \"\" {\n\t\townerDisplayName := XmlTranscoding(input.Owner.DisplayName)\n\t\txml = append(xml, fmt.Sprintf(\"<DisplayName>%s</DisplayName>\", ownerDisplayName))\n\t}\n\tif isObs && input.Delivered != \"\" {\n\t\tobjectDelivered := XmlTranscoding(input.Delivered)\n\t\txml = append(xml, fmt.Sprintf(\"</Owner><Delivered>%s</Delivered><AccessControlList>\", objectDelivered))\n\t} else {\n\t\txml = append(xml, \"</Owner><AccessControlList>\")\n\t}\n\tfor _, grant := range input.Grants {\n\t\txml = append(xml, convertGrantToXML(grant, isObs, false))\n\t}\n\txml = append(xml, \"</AccessControlList></AccessControlPolicy>\")\n\tdata = strings.Join(xml, \"\")\n\tif returnMd5 {\n\t\tmd5 = Base64Md5([]byte(data))\n\t}\n\treturn\n}", "func (s CreateLoadBalancerTlsCertificateInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (c Certificate) String() string {\n\treturn fmt.Sprintf(\"Cert(id=%v,trustId=%v,issuerId=%v,trusteeId=%v,lvl=%v): %v\",\n\t\tformatUUID(c.Id), formatUUID(c.TrustId), formatUUID(c.IssuerId), formatUUID(c.TrusteeId), c.Level, c.ExpiresAt.Sub(c.IssuedAt))\n}", "func toYamlString(resource interface{}) string {\n\tdata, err := yaml.Marshal(resource)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\treturn string(data)\n}", "func dnsEncodeToString(input []byte) string {\n\tencoded := sliverBase32.EncodeToString(input)\n\t// {{if .Debug}}\n\tlog.Printf(\"[base32] %#v\", encoded)\n\t// {{end}}\n\treturn strings.TrimRight(encoded, \"=\")\n}", "func (c Certificate) String() string {\n\treturn fmt.Sprintf(\"ID: %d\\nUserId: %d\\nCreated: %s\\nLink: %s\\n Link: %t\\n\",\n\t\tc.ID, c.UserID, c.Created.Format(\"02-01-2006 15:04:05\"), c.Link, c.IsDeleted)\n}", "func (s IpRule) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s UpdateTrustStoreInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSafetyRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s AssociateResolverRuleInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s AssociateResolverRuleInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s AssociateResolverRuleInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s IamIdentity) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s RuleConfig) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (id *HashChainLinkIdentity) String() string {\n\tif id == nil {\n\t\treturn \"\"\n\t}\n\n\tswitch id.Type() {\n\tcase IdentityTypeLegacyID:\n\t\treturn fmt.Sprintf(\"'%s' (legacy)\", id.clientID)\n\tcase IdentityTypeMetadata:\n\t\treturn fmt.Sprintf(\"Client ID: '%s'; Machine ID: '%s'; Sequence number: %d; Request time: %d\",\n\t\t\tid.clientID, id.machineID, id.sequenceNr, id.requestTime)\n\tdefault:\n\t\treturn \"Unknown\"\n\t}\n}", "func (tm *TuringMachine) ToXMLString() string {\n\t// marshal (returns []byte)\n\tvar xmlBuf, err = xml.MarshalIndent(tm, \"\", \" \")\n\tif err != nil {\n\t\tlog.Fatalf(\"Error: Turing Machine XML Marshal err: %v\\n\", err)\n\t}\n\treturn string(xmlBuf)\n}", "func (s RuleSummary) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s RuleSummary) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s AuditCheckConfiguration) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s GetTrustStoreInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateTrustStoreInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s AssociationOverview) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ServerCertificateSummary) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (a Address) String() string {\n\tbech32Addr, err := bech32.Encode(AddressBech32HRP.String(), a[:])\n\tif err != nil {\n\t\treturn \"[malformed]\"\n\t}\n\treturn bech32Addr\n}", "func (p PhonebookContainer) ToXMLString() string {\n\treturn p.toXML(false)\n}", "func (s AutoMLProblemTypeResolvedAttributes) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CertificateValidity) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s AgentContactReference) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (cs *CheckStyle) String() string {\n\tcheckStyleXML, err := xml.Marshal(cs)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(checkStyleXML)\n}", "func (s ProfilerRuleConfiguration) String() string {\n\treturn awsutil.Prettify(s)\n}" ]
[ "0.57817805", "0.5648022", "0.5617549", "0.54700416", "0.5343101", "0.52947736", "0.52883303", "0.51470983", "0.5059527", "0.5048072", "0.49670568", "0.4913151", "0.4862687", "0.4851099", "0.4848319", "0.48402482", "0.4776784", "0.47474617", "0.47324777", "0.47219345", "0.47202057", "0.469182", "0.46671033", "0.4653128", "0.4638494", "0.46319738", "0.46076667", "0.459263", "0.45918342", "0.45892724", "0.45817915", "0.45817915", "0.45817915", "0.45816046", "0.45816046", "0.45816046", "0.4576036", "0.45711535", "0.45593405", "0.45561984", "0.45561984", "0.45561984", "0.45350096", "0.45301843", "0.4522172", "0.4520765", "0.4488514", "0.44836777", "0.448367", "0.448367", "0.448367", "0.4467528", "0.44616166", "0.44549683", "0.44489917", "0.44408798", "0.44301823", "0.44292194", "0.44292194", "0.44292194", "0.44185755", "0.44131696", "0.44078857", "0.4407691", "0.4407138", "0.43990877", "0.43931094", "0.43921962", "0.4391711", "0.43914878", "0.43818736", "0.43717933", "0.4369083", "0.4366074", "0.43654692", "0.43596593", "0.43426374", "0.43349388", "0.43320262", "0.43199593", "0.43199593", "0.43199593", "0.4313953", "0.43110833", "0.43054116", "0.4304297", "0.43040714", "0.43037036", "0.43025967", "0.4294723", "0.4291606", "0.42872435", "0.42746732", "0.42687345", "0.4268664", "0.42645854", "0.42640933", "0.4260762", "0.42590603", "0.42500907" ]
0.7828417
0
ReadTrustAnchor reads a root trust anchor from: and returns the data or an error.
func ReadTrustAnchor(q io.Reader) ([]*TrustAnchor, error) { d := xml.NewDecoder(q) t := new(XMLTrustAnchor) if e := d.Decode(t); e != nil { return nil, e } ta := make([]*TrustAnchor, 0) var err error for _, digest := range t.KeyDigest { t1 := new(TrustAnchor) t1.Id = t.Id t1.Source = t.Source t1.AnchorId = digest.Id if t1.ValidFrom, err = time.Parse("2006-01-02T15:04:05-07:00", digest.ValidFrom); err != nil { return nil, err } if digest.ValidUntil != "" { if t1.ValidUntil, err = time.Parse("2006-01-02T15:04:05-07:00", digest.ValidUntil); err != nil { return nil, err } } d := new(RR_DS) d.Hdr = RR_Header{Name: t.Zone, Class: ClassINET, Rrtype: TypeDS} d.KeyTag = digest.KeyTag d.Algorithm = digest.Algorithm d.DigestType = digest.DigestType d.Digest = digest.Digest t1.Anchor = d // Some checks here too? ta = append(ta, t1) } return ta, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func getTrustAnchor() (*trustAnchor, error) {\n\tresp, err := http.Get(trustAnchorURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"bad http response: %d\", resp.StatusCode)\n\t}\n\tbyteValue, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar anchor trustAnchor\n\tif err := xml.Unmarshal(byteValue, &anchor); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &anchor, nil\n}", "func (c *ca) TrustAnchors() []byte {\n\treturn c.bundle.TrustAnchors\n}", "func (o LookupCustomKeyStoreResultOutput) TrustAnchorCertificate() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupCustomKeyStoreResult) string { return v.TrustAnchorCertificate }).(pulumi.StringOutput)\n}", "func (s Signer) ReadCA() (string, error) {\n\treturn string(ssh.MarshalAuthorizedKey(s.CACert)), nil\n}", "func ReadRootCA(RootCACertFile string, RootCAKeyFile string) (rootCA []byte, rootKey []byte, err error) {\n\n // Check if files exist\n rootCAExists, err := FileExists(RootCACertFile)\n if err != nil {\n return nil, nil, err\n }\n\n rootKeyExists, err := FileExists(RootCAKeyFile)\n if err != nil {\n return nil, nil, err\n }\n\n // We need both key and cert to exist\n if (rootCAExists && rootKeyExists) {\n\n // If files exist, read rootCA first\n rootCA, err = ioutil.ReadFile(RootCACertFile)\n if err != nil {\n return nil, nil, errors.New(fmt.Sprintf(\"Error reading %s file\", RootCACertFile))\n }\n\n // Now check if rootCA is a valid DER certificate\n if _, err = x509.ParseCertificate(rootCA); err != nil {\n return nil, nil, err\n }\n\n // Read rootKey\n rootKey, err = ioutil.ReadFile(RootCAKeyFile)\n if err != nil {\n return nil, nil, errors.New(fmt.Sprintf(\"Error reading %s file\", RootCAKeyFile))\n }\n\n // Check if rootKey is a valid key - we already have tlsdump.ParsePrivateKey that does this\n if _, _, err = ParsePrivateKey(rootKey); err != nil {\n return nil, nil, err\n }\n\n return rootCA, rootKey, nil\n\n } else {\n // Custom error text\n var customError = \"\"\n\n if !rootCAExists {\n customError += fmt.Sprintf(\"%s does not exist\", RootCACertFile)\n }\n\n if !rootKeyExists {\n customError += fmt.Sprintf(\"\\n%s does not exist\", RootCAKeyFile)\n }\n\n return nil, nil, errors.New(customError)\n }\n\n // We should not get there (because both if and else have returns) but just in case\n return nil, nil, err\n\n}", "func TrustAnchorString(t []*TrustAnchor) string {\n\txta := new(XMLTrustAnchor)\n\txta.KeyDigest = make([]*XMLKeyDigest, 0)\n\tfor _, ta := range t {\n\t\txta.Id = ta.Id // Sets the everytime, but that is OK.\n\t\txta.Source = ta.Source\n\t\txta.Zone = ta.Anchor.Hdr.Name\n\t\txkd := new(XMLKeyDigest)\n\t\txkd.Id = ta.AnchorId\n\t\txkd.ValidFrom = ta.ValidFrom.Format(\"2006-01-02T15:04:05-07:00\")\n\t\tif !ta.ValidUntil.IsZero() {\n\t\t\txkd.ValidUntil = ta.ValidUntil.Format(\"2006-01-02T15:04:05-07:00\")\n\t\t}\n\t\txkd.KeyTag = ta.Anchor.KeyTag\n\t\txkd.Algorithm = ta.Anchor.Algorithm\n\t\txkd.DigestType = ta.Anchor.DigestType\n\t\txkd.Digest = ta.Anchor.Digest\n\t\txta.KeyDigest = append(xta.KeyDigest, xkd)\n\t}\n\tb, _ := xml.MarshalIndent(xta, \"\", \"\\t\")\n\treturn string(b)\n}", "func trustAnchorToDS(anchor *trustAnchor) []*dns.DS {\n\tvar res []*dns.DS\n\tfor _, key := range anchor.KeyDigests {\n\t\tds := &dns.DS{\n\t\t\tHdr: dns.RR_Header{\n\t\t\t\tName: \".\",\n\t\t\t\tRrtype: dns.TypeDS,\n\t\t\t\tClass: dns.ClassINET,\n\t\t\t},\n\t\t\tKeyTag: key.KeyTag,\n\t\t\tAlgorithm: key.Algorithm,\n\t\t\tDigestType: key.DigestType,\n\t\t\tDigest: strings.ToLower(key.Digest),\n\t\t}\n\t\tres = append(res, ds)\n\t}\n\treturn res\n}", "func readCert(t *testing.T) []byte {\n\tcert, err := ioutil.ReadFile(\"testdata/root.pem\")\n\tif err != nil {\n\t\tt.Fatalf(\"Error reading cert: %s\", err.Error())\n\t}\n\treturn cert\n}", "func readCert(t *testing.T) []byte {\n\tcert, err := ioutil.ReadFile(\"testdata/root.pem\")\n\tif err != nil {\n\t\tt.Fatalf(\"Error reading cert: %s\", err.Error())\n\t}\n\treturn cert\n}", "func readCACert(caCertPath string) ([]byte, error) {\n\tcaCert, err := os.ReadFile(caCertPath)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to read CA cert, cert. path: %v, error: %v\", caCertPath, err)\n\t\treturn nil, fmt.Errorf(\"failed to read CA cert, cert. path: %v, error: %v\", caCertPath, err)\n\t}\n\n\tb, _ := pem.Decode(caCert)\n\tif b == nil {\n\t\treturn nil, fmt.Errorf(\"could not decode pem\")\n\t}\n\tif b.Type != \"CERTIFICATE\" {\n\t\treturn nil, fmt.Errorf(\"ca certificate contains wrong type: %v\", b.Type)\n\t}\n\tif _, err := x509.ParseCertificate(b.Bytes); err != nil {\n\t\treturn nil, fmt.Errorf(\"ca certificate parsing returns an error: %v\", err)\n\t}\n\n\treturn caCert, nil\n}", "func GetTrustConfig(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *TrustConfigState, opts ...pulumi.ResourceOption) (*TrustConfig, error) {\n\tvar resource TrustConfig\n\terr := ctx.ReadResource(\"google-native:certificatemanager/v1:TrustConfig\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (e ENS) handleEthLinkSOA(name string, domain string) ([]dns.RR, error) {\n\tresults := make([]dns.RR, 0)\n\tif name == e.EthLinkRoot {\n\t\t// Create a synthetic SOA record\n\t\tnow := time.Now()\n\t\tser := ((now.Hour()*3600 + now.Minute()) * 100) / 86400\n\t\tdateStr := fmt.Sprintf(\"%04d%02d%02d%02d\", now.Year(), now.Month(), now.Day(), ser)\n\t\tresult, err := dns.NewRR(fmt.Sprintf(\"%s 10800 IN SOA ns3.ethdns.xyz. hostmaster.%s %s 3600 600 1209600 300\", name, name, dateStr))\n\t\tif err != nil {\n\t\t\treturn results, err\n\t\t}\n\t\tresults = append(results, result)\n\t}\n\treturn results, nil\n}", "func ReadCertificateAuthority(publicKeyFile, privateKeyFile string) (*KeyPair, error) {\n\troot := new(KeyPair)\n\n\trootKey, errRead := ioutil.ReadFile(privateKeyFile)\n\tif errRead != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read private key: %w\", errRead)\n\t}\n\n\tprivPemBlock, _ := pem.Decode(rootKey)\n\n\t// Note that we use PKCS8 to parse the private key here.\n\trootPrivKey, errParse := x509.ParsePKCS8PrivateKey(privPemBlock.Bytes)\n\tif errParse != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse private key: %w\", errParse)\n\t}\n\n\troot.PrivateKey = rootPrivKey.(*rsa.PrivateKey)\n\n\trootCert, errRead := ioutil.ReadFile(publicKeyFile)\n\tif errRead != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read public key: %w\", errRead)\n\t}\n\n\tpublicPemBlock, _ := pem.Decode(rootCert)\n\n\trootPubCrt, errParse := x509.ParseCertificate(publicPemBlock.Bytes)\n\tif errParse != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse public key: %w\", errParse)\n\t}\n\n\troot.PublicKey = rootPubCrt\n\n\treturn root, nil\n}", "func ReadSymlink(path string) (string, error) {\n\tvar realPath string\n\tvar err error\n\tif realPath, err = filepath.Abs(path); err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to get absolute path for %s: %s\", path, err)\n\t}\n\tif realPath, err = filepath.EvalSymlinks(realPath); err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to canonicalise path for %s: %s\", path, err)\n\t}\n\treturn realPath, nil\n}", "func GetTrustDomainFromURISAN(uriSan string) (string, error) {\n\tparsed, err := ParseIdentity(uriSan)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse URI SAN %s. Error: %v\", uriSan, err)\n\t}\n\treturn parsed.TrustDomain, nil\n}", "func (s *SmartContract) ReadSignature(ctx contractapi.TransactionContextInterface, id string) (string, error) {\n\tctcJSON, err := ctx.GetStub().GetState(id)\n\tfmt.Print(string(ctcJSON))\n\tif err != nil {\n\t\treturn string(ctcJSON), fmt.Errorf(\"Unexpected error : %q\", err)\n\t}\n\tctc := new(Contract)\n\t_ = json.Unmarshal(ctcJSON, ctc)\n\treturn ctc.Signature, nil\n}", "func (s *scanner) getAnchor() error {\n\t// Check out if a file exists, if not, create one.\n\tvar _, err = os.Stat(s.iguFilePath)\n\n\tvar f *os.File\n\tif os.IsNotExist(err) {\n\t\tf, err = os.Create(s.iguFilePath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\n\t\tw := bufio.NewWriter(f)\n\t\t_, err := w.WriteString(\"last:0\\n\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Read out the anchor save earlier.\n\tf, err = os.Open(s.iguFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tr := bufio.NewReader(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create a file scanner for reading\n\tfScanner := bufio.NewScanner(r)\n\n\tvar anchorLine string\n\tfor fScanner.Scan() {\n\t\t// We assume only one line. So we break after read one line.\n\t\tanchorLine = fScanner.Text()\n\t\tbreak\n\t}\n\n\tif err := fScanner.Err(); err != nil {\n\t\treturn err\n\t}\n\n\t// If the line is empty, we assume log file for a given service has never been read by Iguana before.\n\tif anchorLine == \"\" {\n\t\treturn nil\n\t}\n\n\tanc, err := extract(anchorLine)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.anchor = anc\n\n\treturn nil\n}", "func (o KubernetesClusterHttpProxyConfigPtrOutput) TrustedCa() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *KubernetesClusterHttpProxyConfig) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.TrustedCa\n\t}).(pulumi.StringPtrOutput)\n}", "func (o KubernetesClusterHttpProxyConfigOutput) TrustedCa() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v KubernetesClusterHttpProxyConfig) *string { return v.TrustedCa }).(pulumi.StringPtrOutput)\n}", "func (t *TLSConfig) ReadCertificate() (*tls.Certificate, error) {\n\tif !t.Enabled() {\n\t\treturn nil, nil\n\t}\n\tcert, err := tls.LoadX509KeyPair(t.Cert, t.Key)\n\treturn &cert, err\n}", "func ReadCertificate(data []byte) (certificate *Certificate, remainder []byte, err error) {\n\tcertificate, err = NewCertificate(data)\n\tif err != nil && err.Error() == \"certificate parsing warning: certificate data is longer than specified by length\" {\n\t\tremainder = certificate.ExcessBytes()\n\t\terr = nil\n\t}\n\treturn\n}", "func (t *SimpleChaincode) readCert(APIstub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tvar name, jsonResp string\n\tvar err error\n\tif len(args) != 1 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\tname = args[0]\n\tvalAsbytes, err := APIstub.GetState(name)\n\tif err != nil {\n\t\tjsonResp = \"{\\\"Error\\\":\\\"Failed to get state for \" + name + \"\\\"}\"\n\t\treturn shim.Error(jsonResp)\n\t} else if valAsbytes == nil {\n\t\tjsonResp = \"{\\\"Error\\\":\\\"Certificate does not exist: \" + name + \"\\\"}\"\n\t\treturn shim.Error(jsonResp)\n\t}\n\treturn shim.Success(valAsbytes)\n}", "func readCertificate(path string) (*x509.Certificate, error) {\n\tcertBytes, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, trace.ConvertSystemError(err)\n\t}\n\tblock, _ := pem.Decode(certBytes)\n\tif block == nil {\n\t\treturn nil, trace.BadParameter(\"failed to decode certificate at %v\", path)\n\t}\n\tcert, err := x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\treturn cert, nil\n}", "func (s *SecurityConfig) RootCA() *RootCA {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\treturn s.rootCA\n}", "func ReadCertAuthorityPackage(packages pack.PackageService, clusterName string) (utils.TLSArchive, error) {\n\tcaPackage := PlanetCertAuthorityPackage(clusterName)\n\t_, reader, err := packages.ReadPackage(caPackage)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tdefer reader.Close()\n\treturn utils.ReadTLSArchive(reader)\n}", "func getCACert(configURL string, certHash string, timeout int64) ([]byte, error) {\n\tc := &http.Client{Timeout: time.Second * time.Duration(timeout)}\n\tresp, err := c.Get(fixURL(configURL) + \"cacert\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp, err := readBody(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttestHash, err := cahash.Hash(p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcertHashb, err := hex.DecodeString(certHash)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !bytes.Equal(testHash, certHashb) {\n\t\treturn nil, ErrHashWrong\n\t}\n\treturn p, nil\n}", "func DecodeRawAddress(s []byte) (Address, error) {\n\tif len(s) == 0 {\n\t\treturn nil, errors.New(\"empty address\")\n\t}\n\n\theader := s[0]\n\tnetwork := Network(header & 0x0f)\n\n\treadAddrCred := func(bit byte, pos int) StakeCredential {\n\t\thashBytes := s[pos : pos+Hash28Size]\n\t\tif header&(1<<bit) == 0 {\n\t\t\treturn StakeCredential{Kind: KeyStakeCredentialType, Data: hashBytes}\n\t\t}\n\t\treturn StakeCredential{Kind: ScriptStakeCredentialype, Data: hashBytes}\n\t}\n\n\tswitch (header & 0xf0) >> 4 {\n\t// Base type\n\tcase 0b0000, 0b0001, 0b0010, 0b0011:\n\t\t// header + keyhash\n\t\tif len(s) != 57 {\n\t\t\treturn nil, errors.New(\"Invalid length for base address\")\n\t\t}\n\t\treturn &BaseAddress{Network: network, Payment: readAddrCred(4, 1),\n\t\t\tStake: readAddrCred(5, Hash28Size+1)}, nil\n\t// Pointer type\n\tcase 0b0100, 0b0101:\n\t\t// header + keyhash + 3 natural numbers (min 1 byte each)\n\t\tif len(s) < 32 {\n\t\t\treturn nil, errors.New(\"Invalid length for pointer address\")\n\t\t}\n\t\tbyteIndex := 1\n\t\tpaymentCred := readAddrCred(4, 1)\n\t\tslot, slotBytes, ok := VariableNatDecode(s[byteIndex:])\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"slot variable decode failed\")\n\t\t}\n\t\tbyteIndex += slotBytes\n\n\t\ttxIndex, txBytes, ok := VariableNatDecode(s[byteIndex:])\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"txIndex variable decode failed\")\n\t\t}\n\t\tbyteIndex += txBytes\n\n\t\tcertIndex, certBytes, ok := VariableNatDecode(s[byteIndex:])\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"certIndex variable decode failed\")\n\t\t}\n\t\tbyteIndex += certBytes\n\n\t\tif byteIndex > len(s) {\n\t\t\treturn nil, errors.New(\"byte index is out range of pointer lenght\")\n\t\t}\n\n\t\treturn &PointerAddress{\n\t\t\tNetwork: network, Payment: paymentCred,\n\t\t\tStake: StakePoint{Slot: slot, TxIndex: txIndex, CertIndex: certIndex},\n\t\t}, nil\n\t// Enterprise type\n\tcase 0b0110, 0b0111:\n\t\t// header + keyhash\n\t\tif len(s) != 29 {\n\t\t\treturn nil, errors.New(\"invalid length for enterprise address\")\n\t\t}\n\t\treturn &EnterpriseAddress{Network: network, Payment: readAddrCred(4, 1)}, nil\n\t// Reward type\n\tcase 0b1110, 0b1111:\n\t\tif len(s) != 29 {\n\t\t\treturn nil, errors.New(\"invalid length for reward address\")\n\t\t}\n\t\treturn &Reward{Network: network, Payment: readAddrCred(4, 1)}, nil\n\t// Legacy byron type\n\tcase 0b1000:\n\t\tvar byron LegacyAddress\n\t\tif err := cbor.Unmarshal(s, &byron); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &byron, nil\n\t}\n\treturn nil, errors.New(\"unsupports address type\")\n}", "func ReadCertificate(certPath string) (interface{}, error) {\n\tbytes, err := ioutil.ReadFile(certPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tblock, _ := pem.Decode(bytes)\n\tvar cert *x509.Certificate\n\tcert, err = x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cert.PublicKey, nil\n}", "func Readlink(name string) (string, error)", "func readSignedCertificate(client clientset.Interface, csrName string,\n\twatchTimeout, readInterval time.Duration,\n\tmaxNumRead int, caCertPath string, appendCaCert bool, usev1 bool,\n) ([]byte, []byte, error) {\n\t// First try to read the signed CSR through a watching mechanism\n\tcertPEM := readSignedCsr(client, csrName, watchTimeout, readInterval, maxNumRead, usev1)\n\n\tif len(certPEM) == 0 {\n\t\treturn []byte{}, []byte{}, fmt.Errorf(\"no certificate returned for the CSR: %q\", csrName)\n\t}\n\tcertsParsed, _, err := util.ParsePemEncodedCertificateChain(certPEM)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"decoding certificate failed\")\n\t}\n\tcaCert := []byte{}\n\tcertChain := []byte{}\n\tcertChain = append(certChain, certPEM...)\n\tif appendCaCert && caCertPath != \"\" {\n\t\tcaCert, err = readCACert(caCertPath)\n\t\tif err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"error when retrieving CA cert: (%v)\", err)\n\t\t}\n\t\t// Verify the certificate chain before returning the certificate\n\t\troots := x509.NewCertPool()\n\t\tif roots == nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"failed to create cert pool\")\n\t\t}\n\t\tif ok := roots.AppendCertsFromPEM(caCert); !ok {\n\t\t\treturn nil, nil, fmt.Errorf(\"failed to append CA certificate\")\n\t\t}\n\t\tintermediates := x509.NewCertPool()\n\t\tif len(certsParsed) > 1 {\n\t\t\tfor _, cert := range certsParsed[1:] {\n\t\t\t\tintermediates.AddCert(cert)\n\t\t\t}\n\t\t}\n\t\t_, err = certsParsed[0].Verify(x509.VerifyOptions{\n\t\t\tRoots: roots,\n\t\t\tIntermediates: intermediates,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"failed to verify the certificate chain: %v\", err)\n\t\t}\n\t\tcertChain = append(certChain, caCert...)\n\t}\n\treturn certChain, caCert, nil\n}", "func readCAFile(f string) ([]byte, error) {\n\tdata, err := ioutil.ReadFile(f)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to load specified CA cert %s: %s\", f, err)\n\t}\n\treturn data, nil\n}", "func parseSpecialAnchor(p RuleProcessor, s string) (tail string, err error) {\n\ti := strings.IndexByte(s, ']')\n\tif i == -1 {\n\t\treturn \"\", errors.New(\"unmatched bracket\")\n\t}\n\ta := strings.TrimSpace(s[:i])\n\ts = s[i+1:]\n\tif strings.HasPrefix(a, \"before \") {\n\t\tl, err := strconv.ParseUint(skipSpace(a[len(\"before \"):]), 10, 3)\n\t\tif err != nil {\n\t\t\treturn s, err\n\t\t}\n\t\treturn parseAnchor(p, int(l), s)\n\t}\n\treturn s, p.Reset(fmt.Sprintf(specialAnchor, a), 0)\n}", "func (w *Wallet) calcNextBlake3DiffFromAnchor(prevNode, blake3Anchor *wire.BlockHeader) uint32 {\n\t// Calculate the time and height deltas as the difference between the\n\t// provided block and the blake3 anchor block.\n\t//\n\t// Notice that if the difficulty prior to the activation point were being\n\t// maintained, this would need to be the timestamp and height of the parent\n\t// of the blake3 anchor block (except when the anchor is the genesis block)\n\t// in order for the absolute calculations to exactly match the behavior of\n\t// relative calculations.\n\t//\n\t// However, since the initial difficulty is reset with the agenda, no\n\t// additional offsets are needed.\n\ttimeDelta := prevNode.Timestamp.Unix() - blake3Anchor.Timestamp.Unix()\n\theightDelta := int64(prevNode.Height) - int64(blake3Anchor.Height)\n\n\t// Calculate the next target difficulty using the ASERT algorithm.\n\t//\n\t// Note that the difficulty of the anchor block is NOT used for the initial\n\t// difficulty because the difficulty must be reset due to the change to\n\t// blake3 for proof of work. The initial difficulty comes from the chain\n\t// parameters instead.\n\tparams := w.chainParams\n\tnextDiff := blockchain.CalcASERTDiff(params.WorkDiffV2Blake3StartBits,\n\t\tparams.PowLimit, int64(params.TargetTimePerBlock.Seconds()), timeDelta,\n\t\theightDelta, params.WorkDiffV2HalfLifeSecs)\n\n\t// Prevent the difficulty from going higher than a maximum allowed\n\t// difficulty on the test network. This is to prevent runaway difficulty on\n\t// testnet by ASICs and GPUs since it's not reasonable to require\n\t// high-powered hardware to keep the test network running smoothly.\n\t//\n\t// Smaller numbers result in a higher difficulty, so imposing a maximum\n\t// difficulty equates to limiting the minimum target value.\n\tif w.minTestNetTarget != nil && nextDiff < w.minTestNetDiffBits {\n\t\tnextDiff = w.minTestNetDiffBits\n\t}\n\n\treturn nextDiff\n}", "func msanread(addr unsafe.Pointer, sz uintptr) { throw(\"msan\") }", "func (f *FakeFileSystem) Readlink(name string) (string, error) {\n\treturn f.ReadlinkName, f.ReadlinkError\n}", "func loadCaCertPem(in io.Reader) ([]byte, error) {\n\tcaCertPemBytes, err := ioutil.ReadAll(in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tblock, _ := pem.Decode(caCertPemBytes)\n\tif block == nil {\n\t\treturn nil, errors.New(\"could not decode pem\")\n\t}\n\tif block.Type != \"CERTIFICATE\" {\n\t\treturn nil, fmt.Errorf(\"ca bundle contains wrong pem type: %q\", block.Type)\n\t}\n\tif _, err := x509.ParseCertificate(block.Bytes); err != nil {\n\t\treturn nil, fmt.Errorf(\"ca bundle contains invalid x509 certificate: %v\", err)\n\t}\n\treturn caCertPemBytes, nil\n}", "func TestParseAnchor(t *testing.T) {\n\tgot, err := ParseString(sampleYamlAnchors)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\twant := &Config{\n\t\tPipeline: []map[string]*yaml.Container{\n\t\t\tmap[string]*yaml.Container{\n\t\t\t\t\"notify_fail\": &yaml.Container{\n\t\t\t\t\tImage: \"plugins/slack\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tmap[string]*yaml.Container{\n\t\t\t\t\"notify_success\": &yaml.Container{\n\t\t\t\t\tImage: \"plugins/slack\",\n\t\t\t\t\tConstraints: yaml.Constraints{\n\t\t\t\t\t\tStatus: yaml.Constraint{\n\t\t\t\t\t\t\tInclude: []string{\"success\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tdiff := pretty.Diff(got, want)\n\tif len(diff) != 0 {\n\t\tt.Errorf(\"Failed to parse yaml with anchors. Diff %s\", diff)\n\t}\n}", "func ReadCert(filename string) (*x509.Certificate, error) {\n\tblock, err := ReadBlock(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := IsType(block, certPEMType); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcert, err := x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse certificate: %w\", err)\n\t}\n\n\treturn cert, nil\n}", "func (asc *AsenaSmartContract) Read(ctx contractapi.TransactionContextInterface, key string) (ResultAsBytes []byte, err error) {\n\n\tfmt.Println(\"GetState: called with key:\", key)\n\n\tResultAsBytes, err = ctx.GetStub().GetState(key)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to read from world state. %s\", err.Error())\n\t}\n\n\treturn ResultAsBytes, nil\n}", "func parseOpenSSL(line string) (hash, filename string, sum []byte, err error) {\n\tif !strings.Contains(line, \"(\") {\n\t\treturn \"\", \"\", nil, ErrNotOpenSSLLine\n\t}\n\tchunks := strings.SplitN(strings.TrimRight(line, \"\\n\"), \")= \", 2)\n\tif len(chunks) != 2 {\n\t\treturn \"\", \"\", nil, ErrNotOpenSSLLine\n\t}\n\tchunksprime := strings.SplitN(chunks[0], \"(\", 2)\n\tif len(chunks) != 2 {\n\t\treturn \"\", \"\", nil, ErrNotOpenSSLLine\n\t}\n\tsum, err = hex.DecodeString(chunks[1])\n\tif err != nil {\n\t\treturn \"\", \"\", nil, err\n\t}\n\treturn chunksprime[0], chunksprime[1], sum, nil\n}", "func (repo *Repository) Readlink(id SHA1) (string, error) {\n\n\tb, err := repo.OpenObject(id)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif b.Type() != ObjBlob {\n\t\treturn \"\", fmt.Errorf(\"id must point to a blob\")\n\t}\n\n\tblob := b.(*Blob)\n\n\t//TODO: check size and don't read unreasonable large blobs\n\tdata, err := ioutil.ReadAll(blob)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(data), nil\n}", "func httpClientForRootCAs(certificateAuthorityData, clientCertificateData, clientKeyData string, insecureSkipTLSVerify bool) (*tls.Config, error) {\n\ttlsConfig := tls.Config{}\n\n\tif certificateAuthorityData != \"\" {\n\t\ttlsConfig = tls.Config{RootCAs: x509.NewCertPool()}\n\t\trootCA := []byte(certificateAuthorityData)\n\n\t\tif !tlsConfig.RootCAs.AppendCertsFromPEM(rootCA) {\n\t\t\treturn nil, fmt.Errorf(\"no certs found in root CA file\")\n\t\t}\n\t}\n\n\tif clientCertificateData != \"\" && clientKeyData != \"\" {\n\t\tcert, err := tls.X509KeyPair([]byte(clientCertificateData), []byte(clientKeyData))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttlsConfig.Certificates = []tls.Certificate{cert}\n\t}\n\n\ttlsConfig.InsecureSkipVerify = insecureSkipTLSVerify\n\n\treturn &tlsConfig, nil\n}", "func (imsl *Symlink) Readlink(ctx context.Context) (string, error) {\n\treturn imsl.target, nil\n}", "func (p *Predicate) TimeAnchor() (*time.Time, error) {\n\tif p.anchor == nil {\n\t\treturn nil, fmt.Errorf(\"predicate.TimeAnchor cannot return anchor for immutable predicate %v\", p)\n\t}\n\treturn p.anchor, nil\n}", "func (_Bfs *BfsSession) Readlink(absolutePath string) (common.Address, error) {\n\treturn _Bfs.Contract.Readlink(&_Bfs.CallOpts, absolutePath)\n}", "func (c *CertificatesRPCServer) GetTrustRoots(ctx context.Context, empty *ckmproto.Empty) (*ckmproto.TrustRoots, error) {\n\tcerts := make([]*ckmproto.Certificate, 0)\n\tfor _, c := range c.certificateMgr.Ca().TrustRoots() {\n\t\tcerts = append(certs, &ckmproto.Certificate{Certificate: c.Raw})\n\t}\n\treturn &ckmproto.TrustRoots{TrustRoots: certs}, nil\n}", "func (_Bfs *BfsCaller) Readlink(opts *bind.CallOpts, absolutePath string) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Bfs.contract.Call(opts, out, \"readlink\", absolutePath)\n\treturn *ret0, err\n}", "func parse(s string) (anchor string, file, recurse bool, err error) {\n\ts = strings.TrimSpace(s)\n\tif len(s) == 0 || s[0] != '/' {\n\t\treturn \"\", false, false, circuit.NewError(\"invalid anchor\")\n\t}\n\tif len(s) > 3 && s[len(s)-3:] == \"...\" {\n\t\trecurse = true\n\t\ts = s[:len(s)-3]\n\t}\n\t_, leaf := path.Split(s)\n\tif _, err := circuit.ParseWorkerID(leaf); err == nil {\n\t\treturn s, true, false, nil\n\t}\n\treturn s, false, recurse, nil\n}", "func (_Bfs *BfsCallerSession) Readlink(absolutePath string) (common.Address, error) {\n\treturn _Bfs.Contract.Readlink(&_Bfs.CallOpts, absolutePath)\n}", "func (s *Storage) ReadCanonicalHash(n *big.Int) common.Hash {\n\tdata := s.get(CANONICAL, n.Bytes())\n\tif len(data) == 0 {\n\t\treturn common.Hash{}\n\t}\n\treturn common.BytesToHash(data)\n}", "func (c *Client) GetArtworkForRootID(rootID string) ([]Artwork, error) {\n\turl := fmt.Sprint(c.BaseURL, APIVersion, \"/metadata/programs/\", rootID)\n\n\t// setup the request\n\treq, httpErr := http.NewRequest(\"GET\", url, nil)\n\tif httpErr != nil {\n\t\treturn nil, httpErr\n\t}\n\n\t_, data, err := c.SendRequest(req, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprogramArtwork := make([]Artwork, 0)\n\n\tif err = json.Unmarshal(data, &programArtwork); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn programArtwork, err\n}", "func (hg *HostGroup) SSRead(ctx context.Context, params SectionParams) ([]byte, error) {\n\treturn hg.client.PostInOut(ctx, \"/api/v1.0/HostGroup.SS_Read\", params, nil)\n}", "func (c *CertificatesRPCServer) GetCaTrustChain(ctx context.Context, empty *ckmproto.Empty) (*ckmproto.CaTrustChain, error) {\n\tcerts := make([]*ckmproto.Certificate, 0)\n\tfor _, c := range c.certificateMgr.Ca().TrustChain() {\n\t\tcerts = append(certs, &ckmproto.Certificate{Certificate: c.Raw})\n\t}\n\treturn &ckmproto.CaTrustChain{Certificates: certs}, nil\n}", "func (this *school) ReadFrom(r io.Reader) (int64, error) {\n\t// var addrs = map[uint64]uintptr{}\n\tvar header [2]uint64\n\tn, err := io.ReadAtLeast(r, ((*[16]byte)(unsafe.Pointer(&header[0])))[:], 16)\n\tif err == nil {\n\t\tif header[0] != 11738600557355911093 {\n\t\t\terr = errors.New(\"school: incompatible signature header\")\n\t\t} else {\n\t\t\tdata := make([]byte, header[1])\n\t\t\tif n, err = io.ReadAtLeast(r, data, len(data)); err == nil {\n\t\t\t\tvar pos0 int\n\t\t\t\terr = this.unmarshalFrom(&pos0, data /*, addrs*/)\n\t\t\t}\n\t\t\tn += 16\n\t\t}\n\t}\n\treturn int64(n), err\n}", "func CheckTrust(host string) bool {\n\tif i, ok := trustList[host]; ok {\n\t\treturn i\n\t}\n\ttrustList[host], _ = RunCmd(\"ssh %s \\\"hostname\\\" &>/dev/null\", host)\n\treturn trustList[host]\n}", "func (us *DelegationService) ReadDelegation(id string) (*models.Delegation, error) {\n\t// No much to do in this case, except forwarding to the storage layer\n\treturn us.storeInterface.ReadDelegation(id)\n}", "func readCiphertext(filename string) ([]byte, int, *secret.SealedBytes, error) {\n\tplaintextBytes, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, 0, nil, err\n\t}\n\n\tvar ec EncodedCiphertext\n\terr = json.Unmarshal(plaintextBytes, &ec)\n\tif err != nil {\n\t\treturn nil, 0, nil, err\n\t}\n\n\tsealedBytes := &secret.SealedBytes{\n\t\tCiphertext: ec.Ciphertext,\n\t\tNonce: ec.CiphertextNonce,\n\t}\n\n\treturn ec.KeySalt, ec.KeyIter, sealedBytes, nil\n}", "func findTrustedCerts(cfg *Config, objects []*Object) ([]*x509.Certificate, error) {\n\tvar out []*x509.Certificate\n\n\tcerts := filterObjectsByClass(objects, \"CKO_CERTIFICATE\")\n\ttrusts := filterObjectsByClass(objects, \"CKO_NSS_TRUST\")\n\n\tfor _, cert := range certs {\n\t\tderBytes := cert.attrs[\"CKA_VALUE\"].value\n\t\thash := sha1.New()\n\t\thash.Write(derBytes)\n\t\tdigest := hash.Sum(nil)\n\n\t\tx509, err := x509.ParseCertificate(derBytes)\n\t\tif err != nil {\n\t\t\t// This is known to occur because of a broken certificate in NSS.\n\t\t\t// https://bugzilla.mozilla.org/show_bug.cgi?id=707995\n\t\t\tcontinue\n\t\t}\n\n\t\t// TODO(agl): wtc tells me that Mozilla might get rid of the\n\t\t// SHA1 records in the future and use issuer and serial number\n\t\t// to match trust records to certificates (which is what NSS\n\t\t// currently uses). This needs some changes to the crypto/x509\n\t\t// package to keep the raw names around.\n\n\t\tvar trust *Object\n\t\tfor _, possibleTrust := range trusts {\n\t\t\tif bytes.Equal(digest, possibleTrust.attrs[\"CKA_CERT_SHA1_HASH\"].value) {\n\t\t\t\ttrust = possibleTrust\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\ttrustType := trust.attrs[\"CKA_TRUST_SERVER_AUTH\"].value\n\n\t\tvar trusted bool\n\t\tswitch string(trustType) {\n\t\tcase \"CKT_NSS_NOT_TRUSTED\":\n\t\t\t// An explicitly distrusted cert\n\t\t\ttrusted = false\n\t\tcase \"CKT_NSS_TRUSTED_DELEGATOR\":\n\t\t\t// A cert trusted for issuing SSL server certs.\n\t\t\ttrusted = true\n\t\tcase \"CKT_NSS_TRUST_UNKNOWN\", \"CKT_NSS_MUST_VERIFY_TRUST\":\n\t\t\t// A cert not trusted for issuing SSL server certs, but is trusted for other purposes.\n\t\t\ttrusted = false\n\t\t}\n\n\t\tif !trusted && !cfg.IncludedUntrustedFlag {\n\t\t\tcontinue\n\t\t}\n\n\t\tout = append(out, x509)\n\t}\n\n\treturn out, nil\n}", "func TstAddressSAddrP2PKECDSA(addr string) []byte {\n\t_, decoded, _, _ := bech32.Decode(addr)\n\treturn decoded[:PublicKeySizeECDSA]\n}", "func parseCaCert(cc []byte) ([]byte, error) {\n\n\t// decode Pem from certificate into block\n\tblock, rest := pem.Decode([]byte(cc))\n\tif block == nil {\n\t\tif klog.V(3) {\n\t\t\ts := string(rest)\n\t\t\tklog.Infof(\"tried to decode pem: %v\", s)\n\t\t}\n\t\treturn nil, errors.New(\"error decoding the pem block\")\n\t}\n\n\t// parse the decoded pem block to x509 encoded block\n\tb, err := tryParseX509(block)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// encodes the x509 encoded block to a valid x509 certificate encoded pem.\n\tpem := pem.EncodeToMemory(&pem.Block{\n\t\tType: \"CERTIFICATE\",\n\t\tBytes: b,\n\t})\n\n\treturn pem, nil\n}", "func ReadTxSignAndBroadcast(txe *exec.TxExecution, err error) error {\n\t// if there's an error just return.\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// if there is nothing to unpack then just return.\n\tif txe == nil {\n\t\treturn nil\n\t}\n\n\t// Unpack and display for the user.\n\thash := fmt.Sprintf(\"%X\", txe.Receipt.TxHash)\n\theight := fmt.Sprintf(\"%d\", txe.Height)\n\n\tif txe.Receipt.CreatesContract {\n\t\tlog.WithField(\"addr\", txe.Receipt.ContractAddress).Warn()\n\t\tlog.WithField(\"txHash\", hash).Info()\n\t} else {\n\t\tlog.WithField(\"=>\", hash).Warn(\"Transaction Hash\")\n\t\tlog.WithField(\"=>\", height).Debug(\"Block height\")\n\t\tret := txe.GetResult().GetReturn()\n\t\tif len(ret) != 0 {\n\t\t\tlog.WithField(\"=>\", hex.EncodeUpperToString(ret)).Warn(\"Return Value\")\n\t\t\tlog.WithField(\"=>\", txe.Exception).Debug(\"Exception\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (cs *ChainService) getAnchorsFromHash(blockHash hash) ChainAnchor {\n\t/* TODO: use config */\n\tanchors := make(ChainAnchor, 0, 1000)\n\tanchors = append(anchors, blockHash)\n\n\t// collect 10 latest hashes\n\tlatestNo := cs.getBestBlockNo()\n\tfor i := 0; i < 10; i++ {\n\t\tlogger.Infof(\"Latest %d\", latestNo)\n\t\tblockHash, err := cs.getHashByNo(latestNo)\n\t\tif err != nil {\n\t\t\tlogger.Infof(\"assertion - hash get failed\")\n\t\t\t// assertion!\n\t\t\treturn nil\n\t\t}\n\t\tanchors = append(anchors, blockHash)\n\t\tif latestNo == 0 {\n\t\t\treturn anchors\n\t\t}\n\t\tlatestNo--\n\t}\n\n\t// collect exponential\n\tvar dec types.BlockNo = 1\n\tfor i := 0; i < 10; i++ {\n\t\tblockHash, err := cs.getHashByNo(latestNo)\n\t\tif err != nil {\n\t\t\t// assertion!\n\t\t\treturn nil\n\t\t}\n\t\tanchors = append(anchors, blockHash)\n\t\tif latestNo <= dec {\n\t\t\tif latestNo == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlatestNo = 0\n\t\t} else {\n\t\t\tlatestNo -= dec\n\t\t\tdec *= 2\n\t\t}\n\t}\n\n\treturn anchors\n}", "func readCertFile(filename string) []byte {\n\tdataBytes, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tzap.S().Fatalf(\"Failed to read certificate or key file `%s` : `%s`\", filename, err)\n\t}\n\n\treturn dataBytes\n}", "func (idopts *installIdentityOptions) readValues() (*identityWithAnchorsAndTrustDomain, error) {\n\tissuerData, err := issuercerts.LoadIssuerDataFromFiles(idopts.keyPEMFile, idopts.crtPEMFile, idopts.trustPEMFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcreds, err := issuerData.VerifyAndBuildCreds(idopts.issuerName())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to verify issuer certs stored on disk: %s\", err)\n\t}\n\n\treturn &identityWithAnchorsAndTrustDomain{\n\t\tTrustDomain: idopts.trustDomain,\n\t\tTrustAnchorsPEM: issuerData.TrustAnchors,\n\t\tIdentity: &l5dcharts.Identity{\n\t\t\tIssuer: &l5dcharts.Issuer{\n\t\t\t\tScheme: consts.IdentityIssuerSchemeLinkerd,\n\t\t\t\tClockSkewAllowance: idopts.clockSkewAllowance.String(),\n\t\t\t\tIssuanceLifetime: idopts.issuanceLifetime.String(),\n\t\t\t\tCrtExpiry: creds.Crt.Certificate.NotAfter,\n\t\t\t\tCrtExpiryAnnotation: k8s.IdentityIssuerExpiryAnnotation,\n\t\t\t\tTLS: &l5dcharts.TLS{\n\t\t\t\t\tKeyPEM: creds.EncodePrivateKeyPEM(),\n\t\t\t\t\tCrtPEM: creds.EncodeCertificatePEM(),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func (s *Store) Get(suffix string) (string, error) {\n\tanchorBytes, err := s.store.Get(suffix)\n\tif err != nil {\n\t\tif errors.Is(err, storage.ErrDataNotFound) {\n\t\t\treturn \"\", didanchor.ErrDataNotFound\n\t\t}\n\n\t\treturn \"\", orberrors.NewTransient(fmt.Errorf(\"failed to get content from the underlying storage provider: %w\", err))\n\t}\n\n\tanchor := string(anchorBytes)\n\n\tlogger.Debugf(\"retrieved latest anchor[%s] for suffix[%s]\", anchor, suffix)\n\n\treturn anchor, nil\n}", "func (cert *Certificate) GetRootDomainReference() *insolar.Reference {\n\tref, err := insolar.NewReferenceFromBase58(cert.RootDomainReference)\n\tif err != nil {\n\t\tlog.Errorf(\"Invalid domain reference in cert: %s\\n\", cert.Reference)\n\t\treturn nil\n\t}\n\treturn ref\n}", "func (_Casper *CasperSession) Trusted() (common.Address, error) {\n\treturn _Casper.Contract.Trusted(&_Casper.CallOpts)\n}", "func (c *converter) readCertAuthFileIfNecessary(cfg api.Config) error {\n\tcurrentCluster := cfg.Clusters[cfg.Contexts[cfg.CurrentContext].Cluster]\n\tif len(currentCluster.CertificateAuthority) > 0 {\n\t\tfileContent, err := c.fileReader.Read(currentCluster.CertificateAuthority)\n\t\tif err != nil {\n\t\t\treturn FailedToReadCAFile(err, currentCluster.CertificateAuthority)\n\t\t}\n\n\t\tcurrentCluster.CertificateAuthorityData = fileContent\n\t\tcurrentCluster.CertificateAuthority = \"\" // dont need to record the filename in the config; we have the data present\n\t}\n\n\treturn nil\n}", "func (f *UserSettableSymlink) Readlink() (string, error) {\n\treturn \"\", status.Error(codes.InvalidArgument, \"Target of user settable symlinks can only be obtained through the virtual file system\")\n}", "func (c *LogVerifier) VerifyRoot(trusted *types.LogRootV1, newRoot *trillian.SignedLogRoot, consistency [][]byte) (*types.LogRootV1, error) {\n\tif trusted == nil {\n\t\treturn nil, fmt.Errorf(\"VerifyRoot() error: trusted == nil\")\n\t}\n\tif newRoot == nil {\n\t\treturn nil, fmt.Errorf(\"VerifyRoot() error: newRoot == nil\")\n\t}\n\n\tvar r types.LogRootV1\n\tif err := r.UnmarshalBinary(newRoot.LogRoot); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Implicitly trust the first root we get.\n\tif trusted.TreeSize != 0 {\n\t\t// Verify consistency proof.\n\t\tif err := proof.VerifyConsistency(c.hasher, trusted.TreeSize, r.TreeSize, consistency, trusted.RootHash, r.RootHash); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to verify consistency proof from %d->%d %x->%x: %v\", trusted.TreeSize, r.TreeSize, trusted.RootHash, r.RootHash, err)\n\t\t}\n\t}\n\treturn &r, nil\n}", "func (fs osFsEval) Readlink(path string) (string, error) {\n\treturn os.Readlink(path)\n}", "func (w *wrapper) Readlink(path string) (int, string) {\n\tif sfs, ok := w.underlying.(billy.Symlink); ok {\n\t\tfn, err := sfs.Readlink(path)\n\t\tif err != nil {\n\t\t\treturn convertError(err), \"\"\n\t\t}\n\t\treturn 0, fn\n\t}\n\treturn -fuse.ENOSYS, \"\"\n}", "func getTLScerts(c, k, ca string) ([]byte, []byte, []byte, error) {\n\tres := [][]byte{}\n\tvar err error\n\tvar a []byte\n\tfor _, l := range []string{c, k, ca} {\n\t\ta, err = ioutil.ReadFile(l)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"getTLScerts failed to load file %s: %s\", l, err)\n\t\t\tbreak\n\t\t}\n\t\tres = append(res, a)\n\t}\n\tif err != nil {\n\t\tisX := false\n\t\thost := \"host\"\n\t\trsaBits := 2048\n\t\tecdsaCurve := \"\"\n\t\tvalidFor := 365 * 24 * time.Hour\n\t\tvalidFrom := \"\"\n\t\tisCA := true\n\t\tlog.Println(\"creating CA\")\n\t\tcacert, cakey, err := internal.Ca(host, rsaBits, ecdsaCurve, validFrom, validFor)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to create certificate: %s\", err)\n\t\t}\n\t\tca_key_pair, err := tls.X509KeyPair(pem.EncodeToMemory(&cacert), pem.EncodeToMemory(&cakey))\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to make ca key pair: %s\", err)\n\t\t}\n\t\tlog.Println(\"creating certificate\")\n\t\tisCA = false\n\t\tcert, priv, err := internal.CaSignedCert(cert_common_name, host, rsaBits, ecdsaCurve, validFrom, validFor, isCA, isX, &ca_key_pair)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to make signed cert %s\", err)\n\t\t}\n\t\treturn pem.EncodeToMemory(&cert), pem.EncodeToMemory(&priv), pem.EncodeToMemory(&cacert), nil\n\t}\n\treturn res[0], res[1], res[2], nil\n}", "func bindAnchorChain(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(AnchorChainABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func lookupTrustInfo(cli command.Cli, remote string) ([]trustTagRow, []client.RoleWithSignatures, []data.Role, error) {\n\tctx := context.Background()\n\timgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, image.AuthResolver(cli), remote)\n\tif err != nil {\n\t\treturn []trustTagRow{}, []client.RoleWithSignatures{}, []data.Role{}, err\n\t}\n\ttag := imgRefAndAuth.Tag()\n\tnotaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPullOnly)\n\tif err != nil {\n\t\treturn []trustTagRow{}, []client.RoleWithSignatures{}, []data.Role{}, trust.NotaryError(imgRefAndAuth.Reference().Name(), err)\n\t}\n\n\tif err = clearChangeList(notaryRepo); err != nil {\n\t\treturn []trustTagRow{}, []client.RoleWithSignatures{}, []data.Role{}, err\n\t}\n\tdefer clearChangeList(notaryRepo)\n\n\t// Retrieve all released signatures, match them, and pretty print them\n\tallSignedTargets, err := notaryRepo.GetAllTargetMetadataByName(tag)\n\tif err != nil {\n\t\tlogrus.Debug(trust.NotaryError(remote, err))\n\t\t// print an empty table if we don't have signed targets, but have an initialized notary repo\n\t\tif _, ok := err.(client.ErrNoSuchTarget); !ok {\n\t\t\treturn []trustTagRow{}, []client.RoleWithSignatures{}, []data.Role{}, fmt.Errorf(\"no signatures or cannot access %s\", remote)\n\t\t}\n\t}\n\tsignatureRows := matchReleasedSignatures(allSignedTargets)\n\n\t// get the administrative roles\n\tadminRolesWithSigs, err := notaryRepo.ListRoles()\n\tif err != nil {\n\t\treturn []trustTagRow{}, []client.RoleWithSignatures{}, []data.Role{}, fmt.Errorf(\"no signers for %s\", remote)\n\t}\n\n\t// get delegation roles with the canonical key IDs\n\tdelegationRoles, err := notaryRepo.GetDelegationRoles()\n\tif err != nil {\n\t\tlogrus.Debugf(\"no delegation roles found, or error fetching them for %s: %v\", remote, err)\n\t}\n\n\treturn signatureRows, adminRolesWithSigs, delegationRoles, nil\n}", "func (f *lightFetcher) lastTrustedTreeNode(p *peer) (*types.Header, []common.Hash) {\n\tunapprovedHashes := make([]common.Hash, 0)\n\tcurrent := f.chain.CurrentHeader()\n\n\tif f.lastTrustedHeader == nil {\n\t\treturn current, unapprovedHashes\n\t}\n\n\tcanonical := f.chain.CurrentHeader()\n\tif canonical.Number.Uint64() > f.lastTrustedHeader.Number.Uint64() {\n\t\tcanonical = f.chain.GetHeaderByNumber(f.lastTrustedHeader.Number.Uint64())\n\t}\n\tcommonAncestor := rawdb.FindCommonAncestor(f.handler.backend.chainDb, canonical, f.lastTrustedHeader)\n\tif commonAncestor == nil {\n\t\tlog.Error(\"Common ancestor of last trusted header and canonical header is nil\", \"canonical hash\", canonical.Hash(), \"trusted hash\", f.lastTrustedHeader.Hash())\n\t\treturn current, unapprovedHashes\n\t}\n\n\tfor current.Hash() == commonAncestor.Hash() {\n\t\tif f.isTrustedHash(current.Hash()) {\n\t\t\tbreak\n\t\t}\n\t\tunapprovedHashes = append(unapprovedHashes, current.Hash())\n\t\tcurrent = f.chain.GetHeader(current.ParentHash, current.Number.Uint64()-1)\n\t}\n\treturn current, unapprovedHashes\n}", "func ReadTargetHeadingTrackIndicator(data []byte) TargetHeadingTrackIndicator {\n\tbits := (data[4] & 0x08) >> 3\n\treturn TargetHeadingTrackIndicator(bits)\n}", "func getSaml(cfg *OktaConfig, sessionToken string) (*OktaSamlResponse, error) {\n\tres, err := http.Get(\n\t\tfmt.Sprintf(\n\t\t\t\"%s?%s=%s\",\n\t\t\tcfg.AppURL,\n\t\t\t\"onetimetoken\",\n\t\t\tsessionToken,\n\t\t),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn processSamlResponse(res)\n\n}", "func ParseCert(binCert []byte) (common.Address, common.Address, string, error) {\n\tvar contrAddr, parentAddr common.Address\n\tvar retDesc string\n\tca, err := x509.ParseCertificate(binCert)\n\tif err!=nil {\n\t\treturn common.Address{}, common.Address{}, \"\", err\n\t}\n\n\tfor i:=0; i<len(ca.Subject.Names); i++ {\n\t\tretDesc += fmt.Sprint(ca.Subject.Names[i].Value) + \" \";\n\t}\n\t// iterate in the extension to get the information\n\tfor _, element := range ca.Extensions {\n\t\tif element.Id.String() == \"1.2.752.115.33.2\" { // CA Address\n\t\t\tfmt.Printf(\"\\tCaContractIdentifier: %+#+x\\n\", element.Value)\n\t\t\tval:=element.Value[2:]\n\t\t\tif( len(val) != len(common.Address{}.Bytes()) ) {\n\t\t\t\treturn common.Address{}, common.Address{}, \"\",\n\t\t\t\t\tGeneralError{\"ParseCert: wrong length of CA addr\"}\n\t\t\t}\n\t\t\tcontrAddr = common.BytesToAddress(val)\n\t\t}\n\t\tif element.Id.String() == \"1.2.752.115.33.1\" { //Parent Address\n\t\t\tfmt.Printf(\"\\tIssuerCaContractIdentifier: %+#+x\\n\", element.Value)\n\t\t\tval:=element.Value[2:]\n\t\t\tif( len(val) != len(common.Address{}.Bytes()) ) {\n\t\t\t\treturn common.Address{}, common.Address{}, \"\",\n\t\t\t\t\tGeneralError{\"ParseCert: wrong length of CA addr\"}\n\t\t\t}\n\t\t\tparentAddr = common.BytesToAddress(val)\n\t\t}\n\t}\n\treturn contrAddr, parentAddr, retDesc, nil\n}", "func (c *ConsulStore) Read(root string, key Key, tag string) ([]byte, error) {\n\n\t//Convert to string as Consul only supports string based keys\n\tk := key.String()\n\tif k == \"\" {\n\t\treturn nil, pkgerrors.New(\"Key.String() returned an empty string\")\n\t}\n\n\tk = root + \"/\" + k + \"/\" + tag\n\tpair, _, err := c.client.Get(k, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif pair == nil {\n\t\treturn nil, nil\n\t}\n\treturn pair.Value, nil\n}", "func (server *SingleInstance) TLSCertificate() *x509.Certificate {\n\tbytes, err := ioutil.ReadFile(path.Join(server.path, \"neo4jhome\", \"certificates\", \"neo4j.cert\"))\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tder, _ := pem.Decode(bytes)\n\tif der == nil {\n\t\treturn nil\n\t}\n\n\tcert, err := x509.ParseCertificate(der.Bytes)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn cert\n}", "func (ks *Keystore) StoreTrustedKeyRoot(r io.Reader) (string, error) {\n\treturn storeTrustedKey(ks.LocalRootPath, r)\n}", "func ReadAccessKey() string {\n\tfile, err := os.Open(\"./access\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer file.Close()\n\n\tb, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn string(b)\n}", "func GetAnalysis(RID string) (types.Analysis, error) {\n\n\tanalysis := types.Analysis{}\n\tgetAnalysisURL := config.HuskyAPI + \"/analysis/\" + RID\n\n\thttpClient, err := util.NewClient(config.HuskyUseTLS)\n\tif err != nil {\n\t\treturn analysis, err\n\t}\n\n\treq, err := http.NewRequest(\"GET\", getAnalysisURL, nil)\n\tif err != nil {\n\t\treturn analysis, err\n\t}\n\n\treq.Header.Add(\"Husky-Token\", config.HuskyToken)\n\n\tresp, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn analysis, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn analysis, err\n\t}\n\n\terr = json.Unmarshal(body, &analysis)\n\tif err != nil {\n\t\treturn analysis, err\n\t}\n\n\treturn analysis, nil\n}", "func tryParseX509(block *pem.Block) ([]byte, error) {\n\t// if certificate is already x509 encoded, return the certificate, otherwise continue and parse.\n\t_, err := x509.ParseCertificate(block.Bytes)\n\tif err == nil {\n\t\treturn block.Bytes, nil\n\t}\n\n\tb, err := pkcs7.Parse(block.Bytes)\n\tif err == nil {\n\t\tif len(b.Certificates) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"expected one or more certificates\")\n\t\t}\n\t\treturn b.Certificates[0].Raw, nil\n\t}\n\n\terr = fmt.Errorf(\"parsing PKCS7: %w\", err)\n\treturn nil, err\n}", "func ReadSymlinkedFile(path string) (string, error) {\n\trealPath, err := ReadSymlink(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\trealPathInfo, err := os.Stat(realPath)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to stat target '%s' of '%s': %s\", realPath, path, err)\n\t}\n\tif !realPathInfo.Mode().IsRegular() {\n\t\treturn \"\", fmt.Errorf(\"canonical path points does not point to a file '%s'\", realPath)\n\t}\n\treturn realPath, nil\n}", "func (cm *CertManager) GetRootCertificate() (certificate.Certificater, error) {\n\treturn cm.ca, nil\n}", "func (o ArgoCDSpecGrafanaRouteTlsOutput) CaCertificate() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ArgoCDSpecGrafanaRouteTls) *string { return v.CaCertificate }).(pulumi.StringPtrOutput)\n}", "func (fsys *FS) Readlink(path string) (errc int, linkPath string) {\n\tdefer fs.Trace(path, \"\")(\"linkPath=%q, errc=%d\", &linkPath, &errc)\n\treturn -fuse.ENOSYS, \"\"\n}", "func isAnchor(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"a\"\n}", "func ReadAuthCookie(r *http.Request) (map[string]string, error) {\n\tcookie, error := r.Cookie(\"SID\")\n\tif error != nil {\n\t\treturn nil, error\n\t}\n\n\tcookieValues := make(map[string]string)\n\tif error = s.Decode(\"SID\", cookie.Value, &cookieValues); error != nil {\n\t\treturn nil, error\n\t}\n\n\treturn cookieValues, nil\n}", "func (p *Pollydent) ReadAloud(msg string) (err error) {\r\n\tif msgLen := len([]rune(msg)); msgLen > 1500 {\r\n\t\terrMsg := \"Message size is %d. Please pass with the length of 1500 or less.\"\r\n\t\terr = fmt.Errorf(errMsg, msgLen)\r\n\t\treturn err\r\n\t}\r\n\r\n\treader, err := p.speaker.Send(SpeechParams{Message: msg})\r\n\tif err != nil {\r\n\t\treturn\r\n\t}\r\n\tp.Play(reader)\r\n\treturn\r\n}", "func (fs *Mysqlfs) Readlink(link string) (string, error) {\n\tf, err := fs.storage.GetFile(link)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif f == nil {\n\t\treturn \"\", os.ErrNotExist\n\t}\n\n\tif !isSymlink(f.Mode) {\n\t\treturn \"\", &os.PathError{\n\t\t\tOp: \"readlink\",\n\t\t\tPath: link,\n\t\t\tErr: fmt.Errorf(\"not a symlink\"),\n\t\t}\n\t}\n\n\treturn string(f.Content), nil\n}", "func readAddr(r io.Reader, b []byte) (Addr, error) {\n\tif len(b) < MaxAddrLen {\n\t\treturn nil, io.ErrShortBuffer\n\t}\n\t_, err := io.ReadFull(r, b[:1]) // read 1st byte for address type\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch b[0] {\n\tcase AtypIPv4:\n\t\t_, err = io.ReadFull(r, b[1:1+net.IPv4len+2])\n\t\treturn b[:1+net.IPv4len+2], err\n\tcase AtypDomainName:\n\t\t_, err = io.ReadFull(r, b[1:2]) // read 2nd byte for domain length\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t_, err = io.ReadFull(r, b[2:2+int(b[1])+2])\n\t\treturn b[:1+1+int(b[1])+2], err\n\tcase AtypIPv6:\n\t\t_, err = io.ReadFull(r, b[1:1+net.IPv6len+2])\n\t\treturn b[:1+net.IPv6len+2], err\n\t}\n\n\treturn nil, ErrAddressNotSupported\n}", "func xchainAKToEVMAddress(addr string) (crypto.Address, error) {\n\trawAddr := base58.Decode(addr)\n\tif len(rawAddr) < 21 {\n\t\treturn crypto.ZeroAddress, errors.New(\"bad address\")\n\t}\n\tripemd160Hash := rawAddr[1:21]\n\n\treturn crypto.AddressFromBytes(ripemd160Hash)\n}", "func ReadBytes(data []byte, certificateResolver string) (*LocalNamedStore, error) {\n\to := LocalStore{}\n\tif err := json.Unmarshal(data, &o); err != nil {\n\t\t// fallback to traefik v1 (no resolver parent key in JSON)\n\t\tv := &LocalNamedStore{}\n\t\tif err := json.Unmarshal(data, v); err == nil {\n\t\t\treturn v, nil\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"unable to parse file: %w\", err)\n\t}\n\n\tif v, ok := o[certificateResolver]; ok {\n\t\treturn v, nil\n\t}\n\n\treturn nil, ErrCertificateResolverNotFound\n}", "func (storage *PublishedStorage) ReadLink(path string) (string, error) {\n\tparams := &s3.HeadObjectInput{\n\t\tBucket: aws.String(storage.bucket),\n\t\tKey: aws.String(filepath.Join(storage.prefix, path)),\n\t}\n\toutput, err := storage.s3.HeadObject(params)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn aws.StringValue(output.Metadata[\"SymLink\"]), nil\n}", "func (v *VFS) Readlink(name string) (string, error) {\n\tf, err := v.FileSystem.Open(name)\n\tif nil != err {\n\t\treturn \"\", err\n\t}\n\tb, err := ioutil.ReadAll(f)\n\tif nil != err {\n\t\treturn \"\", err\n\t}\n\treturn string(b), nil\n}", "func (n *node) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) {\n\tif n.te.Type != \"symlink\" {\n\t\treturn \"\", syscall.EINVAL\n\t}\n\treturn n.te.LinkName, nil\n}", "func DecodeOpReturn(b [80]byte) (*Anchor, error) {\n\t// Check signature.\n\tif b[0] != 0x42 || b[1] != 0x42 || b[2] != 0x63 || b[3] != 0x31 {\n\t\treturn nil, fmt.Errorf(\"%w (AnchorSignature: %s)\", ErrInvalidSignature, b[0:4])\n\t}\n\n\tvar a Anchor\n\n\t// Check Version and BTCNet.\n\ta.Version = b[4]\n\tif _, ok := validAnchorVersions[a.Version]; !ok {\n\t\treturn nil, fmt.Errorf(\"%w (AnchorVersion: %v)\", ErrInvalidVersion, a.Version)\n\t}\n\ta.BTCNet = BTCNet(b[5])\n\tif n := a.BTCNet.String(); n == \"\" {\n\t\treturn nil, fmt.Errorf(\"%w (AnchorBTCNet: %v)\", ErrInvalidBTCNet, a.BTCNet)\n\t}\n\n\t// Copy timestamp\n\tvar ts [8]byte\n\tcopy(ts[0:8], b[8:16])\n\ta.Timestamp = time.Unix(int64(getUint64BE(&ts)), 0)\n\n\t// Copy BBc1DomainID and BBc1TransactionID\n\tvar d, t [32]byte\n\tcopy(d[:], b[16:48])\n\tcopy(t[:], b[48:80])\n\ta.BBc1DomainID = d\n\ta.BBc1TransactionID = t\n\n\treturn &a, nil\n}" ]
[ "0.70352846", "0.54166305", "0.5376895", "0.5034828", "0.50329363", "0.5005947", "0.4975937", "0.49141535", "0.49141535", "0.47976542", "0.4571982", "0.45457885", "0.44578916", "0.44233346", "0.44013694", "0.43893903", "0.4377805", "0.4350652", "0.4340766", "0.42625472", "0.42603463", "0.42407286", "0.4234731", "0.42247078", "0.42057118", "0.41851124", "0.41844276", "0.4180099", "0.41794285", "0.4174428", "0.41528544", "0.41347557", "0.41182095", "0.41065082", "0.40741867", "0.40737858", "0.40429178", "0.4042721", "0.4029572", "0.40279984", "0.40219504", "0.40138122", "0.4010203", "0.40065235", "0.39945078", "0.3991956", "0.39796025", "0.39793044", "0.39791977", "0.39763168", "0.39640915", "0.396189", "0.39566174", "0.39483887", "0.3908527", "0.39004013", "0.38965577", "0.3890139", "0.38878673", "0.38867277", "0.38820243", "0.387596", "0.38703164", "0.38547888", "0.38351136", "0.3834627", "0.38328624", "0.38261378", "0.38102433", "0.38074374", "0.38057926", "0.3795386", "0.37913603", "0.3773732", "0.37732917", "0.3752859", "0.37487704", "0.37456656", "0.37405258", "0.37400714", "0.37394744", "0.37331763", "0.3733005", "0.37309158", "0.37294722", "0.37263486", "0.37200364", "0.37186074", "0.37184942", "0.3717141", "0.3713571", "0.370729", "0.37046888", "0.37046155", "0.37036902", "0.37011337", "0.37009346", "0.36999342", "0.36973765", "0.369364" ]
0.75947404
0
NewStreamToSubStream instantiates a new StreamToSubStream process
func NewStreamToSubStream(wf *scipipe.Workflow, name string) *StreamToSubStream { stss := &StreamToSubStream{ name: name, In: scipipe.NewInPort("in"), OutSubStream: scipipe.NewOutPort("out_substream"), } wf.AddProc(stss) return stss }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) {\n\treturn s.conn.CreateStream(headers, s, fin)\n}", "func newStream(id common.StreamId, hostStr string, handler MutationHandler) (*Stream, error) {\n\n\t// TODO: use constant\n\tmutch := make(chan interface{}, 1000)\n\tstopch := make(chan bool)\n\n\ts := &Stream{id: id,\n\t\thostStr: hostStr,\n\t\thandler: handler,\n\t\tmutch: mutch,\n\t\tstopch: stopch,\n\t\tstatus: false}\n\n\treturn s, nil\n}", "func (p *StreamToSubStream) Run() {\n\tdefer p.OutSubStream.Close()\n\n\tscipipe.Debug.Println(\"Creating new information packet for the substream...\")\n\tsubStreamIP := scipipe.NewIP(\"\")\n\tscipipe.Debug.Printf(\"Setting in-port of process %s to IP substream field\\n\", p.Name())\n\tsubStreamIP.SubStream = p.In\n\n\tscipipe.Debug.Printf(\"Sending sub-stream IP in process %s...\\n\", p.Name())\n\tp.OutSubStream.Send(subStreamIP)\n\tscipipe.Debug.Printf(\"Done sending sub-stream IP in process %s.\\n\", p.Name())\n}", "func newStream(id uint32, frameSize int, sess *Session) *Stream {\n\ts := new(Stream)\n\ts.id = id\n\ts.chReadEvent = make(chan struct{}, 1)\n\ts.frameSize = frameSize\n\ts.sess = sess\n\ts.die = make(chan struct{})\n\treturn s\n}", "func newStream(session *Session, id uint32, state streamState) *Stream {\n\ts := &Stream{\n\t\tid: id,\n\t\tsession: session,\n\t\tstate: state,\n\t\tcontrolHdr: header(make([]byte, headerSize)),\n\t\tcontrolErr: make(chan error, 1),\n\t\tsendHdr: header(make([]byte, headerSize)),\n\t\tsendErr: make(chan error, 1),\n\t\trecvWindow: initialStreamWindow,\n\t\tsendWindow: initialStreamWindow,\n\t\trecvNotifyCh: make(chan struct{}, 1),\n\t\tsendNotifyCh: make(chan struct{}, 1),\n\t\testablishCh: make(chan struct{}, 1),\n\t}\n\ts.readDeadline.Store(time.Time{})\n\ts.writeDeadline.Store(time.Time{})\n\treturn s\n}", "func (s *subgraphStream) processNewStream(ctx context.Context, unmarshalledBlocks chan *unmarshalledBlock) error {\n\ts.logger.Info(\"retrieving cursor\")\n\tcursor, err := s.in.loadCursor()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to load cursor of subgraph %q: %w\", s.subgraph.PackageName, err)\n\t}\n\n\t// Launch streaming thing\n\n\tforkSteps := []pbbstream.ForkStep{pbbstream.ForkStep_STEP_IRREVERSIBLE}\n\tif s.withReversible {\n\t\tforkSteps = []pbbstream.ForkStep{\n\t\t\tpbbstream.ForkStep_STEP_NEW,\n\t\t\tpbbstream.ForkStep_STEP_UNDO,\n\t\t}\n\t}\n\n\ts.logger.Info(\"requesting blocks\", zap.String(\"cursor\", cursor), zap.Int64(\"start_block_num\", s.startBlock), zap.Uint64(\"stop_block_num\", s.stopBlock))\n\tstream, err := s.streamFactory.StreamBlocks(ctx, &pbbstream.BlocksRequestV2{\n\t\tStartBlockNum: s.startBlock,\n\t\tStartCursor: cursor,\n\t\tStopBlockNum: s.stopBlock,\n\t\tForkSteps: forkSteps,\n\t\tIncludeFilterExpr: s.subgraph.IncludeFilter,\n\t\tDetails: pbbstream.BlockDetails_BLOCK_DETAILS_FULL,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to create blocks stream of subgraph %q: %w\", s.subgraph.PackageName, err)\n\t}\n\n\ts.logger.Info(\"streaming blocks\", zap.Uint64(\"stop_block_num\", s.stopBlock))\n\n\tdefer s.metrics.BlockRate.Clean()\n\n\tfor {\n\t\tif s.IsTerminating() {\n\t\t\ts.logger.Info(\"stopping streaming loop\")\n\t\t\treturn nil\n\t\t}\n\n\t\tresponse, err := stream.Recv()\n\t\tif (response == nil) && (err == nil) {\n\t\t\terr = io.EOF // FIXME in bstream lib, stepd hack\n\t\t}\n\n\t\tif err == io.EOF {\n\t\t\tif s.stopBlock != 0 {\n\t\t\t\tif s.lastBlockRef.Num() == s.stopBlock {\n\t\t\t\t\ts.logger.Info(\"reached our stop block\", zap.Stringer(\"last_block_ref\", s.lastBlockRef))\n\t\t\t\t\treturn firehose.ErrStopBlockReached\n\t\t\t\t}\n\t\t\t\treturn fmt.Errorf(\"stream ended with EOF but last block seen %q does not match expected stop block %q\", s.lastBlockRef.Num(), s.stopBlock)\n\t\t\t}\n\n\t\t\tvar cursor string\n\t\t\tif response != nil {\n\t\t\t\tcursor = response.Cursor\n\t\t\t}\n\t\t\ts.logger.Error(\"received EOF when not expected, reconnecting\",\n\t\t\t\tzap.String(\"cursor\", cursor),\n\t\t\t\tzap.Stringer(\"last_block\", s.lastBlockRef),\n\t\t\t\tzap.Uint64(\"stop_block\", s.stopBlock),\n\t\t\t)\n\t\t\treturn nil\n\t\t}\n\n\t\tif err != nil {\n\t\t\ts.logger.Error(\"stream encountered a remote error, retrying\",\n\t\t\t\tzap.Stringer(\"last_block\", s.lastBlockRef),\n\t\t\t\tzap.Error(err),\n\t\t\t)\n\t\t\treturn nil\n\t\t}\n\n\t\tblock := response.Block.(*pbcodec.Block)\n\t\ts.lastBlockRef = block.AsRef()\n\n\t\tunmarshalledBlocks <- &unmarshalledBlock{\n\t\t\tblock: block,\n\t\t\tcursor: response.Cursor,\n\t\t\tstep: response.Step,\n\t\t}\n\t}\n}", "func NewSub(url string, opts SubOpts, metr *Metrics) Sub {\n\taddr := MustParseURL(url)\n\tl := prometheus.Labels{lAddr: addr.String()}\n\n\ts := &sub{\n\t\tSubOpts: opts,\n\t\taddr: addr,\n\t\tqueue: &ringBuf{},\n\t\tpool: pool{New: func() interface{} { return newIStream() }},\n\t\t// metrics\n\t\tmetrics: metr,\n\t\tnumStreams: metr.numStreams.With(l),\n\t\tstreamDurationSec: metr.streamDurationSec.With(l),\n\t\tlostBytes: metr.lostBytes.With(l),\n\t\tlostMsgs: metr.lostMsgs.With(l),\n\t}\n\ts.hasData = sync.NewCond(&s.mu)\n\n\treturn s\n}", "func NewStream(\n\tURI string,\n\tstoringDirectory string,\n\tkeepFiles bool,\n\taudio bool,\n\tloggingOpts ProcessLoggingOpts,\n\twaitTimeOut time.Duration,\n) (*Stream, string) {\n\tid := uuid.New().String()\n\tpath := fmt.Sprintf(\"%s/%s\", storingDirectory, id)\n\terr := os.MkdirAll(path, os.ModePerm)\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t\treturn nil, \"\"\n\t}\n\tprocess := NewProcess(keepFiles, audio)\n\tcmd := process.Spawn(path, URI)\n\n\t// Create nil pointer in case logging is not enabled\n\tcmdLogger := (*lumberjack.Logger)(nil)\n\t// Create logger otherwise\n\tif loggingOpts.Enabled {\n\t\tcmdLogger = &lumberjack.Logger{\n\t\t\tFilename: fmt.Sprintf(\"%s/%s.log\", loggingOpts.Directory, id),\n\t\t\tMaxSize: loggingOpts.MaxSize,\n\t\t\tMaxBackups: loggingOpts.MaxBackups,\n\t\t\tMaxAge: loggingOpts.MaxAge,\n\t\t\tCompress: loggingOpts.Compress,\n\t\t}\n\t\tcmd.Stderr = cmdLogger\n\t\tcmd.Stdout = cmdLogger\n\t}\n\tstream := Stream{\n\t\tID: id,\n\t\tCMD: cmd,\n\t\tProcess: process,\n\t\tMux: &sync.Mutex{},\n\t\tPath: fmt.Sprintf(\"/%s/index.m3u8\", filepath.Join(\"stream\", id)),\n\t\tStorePath: path,\n\t\tStreak: hotstreak.New(hotstreak.Config{\n\t\t\tLimit: 10,\n\t\t\tHotWait: time.Minute * 2,\n\t\t\tActiveWait: time.Minute * 4,\n\t\t}).Activate(),\n\t\tOriginalURI: URI,\n\t\tKeepFiles: keepFiles,\n\t\tLoggingOpts: &loggingOpts,\n\t\tLogger: cmdLogger,\n\t\tRunning: false,\n\t\tWaitTimeOut: waitTimeOut,\n\t}\n\tlogrus.Debugf(\"%s store path created | Stream\", stream.StorePath)\n\treturn &stream, id\n}", "func NewSub(w *model.Watcher, d *dao.Dao, c *conf.Config) (n *Sub, err error) {\n\tn = &Sub{\n\t\tc: c,\n\t\tw: w,\n\t\troutine: _defRoutine,\n\t\tbackoff: netutil.DefaultBackoffConfig,\n\t\tasyncRty: make(chan *rtyMsg, 100),\n\t\tdao: d,\n\t\tticker: time.NewTicker(time.Minute),\n\t}\n\tn.ctx, n.cancel = context.WithCancel(context.Background())\n\tif clu, ok := c.Clusters[w.Cluster]; ok {\n\t\tn.cluster = clu\n\t} else {\n\t\terr = errClusterNotSupport\n\t\treturn\n\t}\n\tif len(w.Filters) != 0 {\n\t\tn.parseFilter()\n\t}\n\terr = n.parseCallback()\n\tif err != nil {\n\t\terr = errCallbackParse\n\t\treturn\n\t}\n\t// init clients\n\tn.clients = NewClients(c, w)\n\terr = n.dial()\n\tif err != nil {\n\t\treturn\n\t}\n\tif w.Concurrent != 0 {\n\t\tn.routine = w.Concurrent\n\t}\n\tgo n.asyncRtyproc()\n\tfor i := 0; i < n.routine; i++ {\n\t\tgo n.serve()\n\t}\n\tcountProm.Incr(_opCurrentConsumer, w.Group, w.Topic)\n\treturn\n}", "func (t testConn) NewStream(ctx context.Context) (network.Stream, error) { return nil, nil }", "func CreateChildStream(reading bool) (ChildStream, error) {\n\tif runtime.GOOS == \"windows\" {\n\t\treturn NewChildSocketStream()\n\t} else {\n\t\treturn NewChildPipeStream(reading)\n\t}\n}", "func NewStreamCore() *StorageST {\n\targConfigPatch := flag.String(\"config\", \"config.json\", \"config patch (/etc/server/config.json or config.json)\")\n\targDebug := flag.Bool(\"debug\", true, \"set debug mode\")\n\tdebug = *argDebug\n\tflag.Parse()\n\tvar tmp StorageST\n\tdata, err := ioutil.ReadFile(*argConfigPatch)\n\tif err != nil {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"module\": \"config\",\n\t\t\t\"func\": \"NewStreamCore\",\n\t\t\t\"call\": \"ReadFile\",\n\t\t}).Errorln(err.Error())\n\t\tos.Exit(1)\n\t}\n\terr = json.Unmarshal(data, &tmp)\n\tif err != nil {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"module\": \"config\",\n\t\t\t\"func\": \"NewStreamCore\",\n\t\t\t\"call\": \"Unmarshal\",\n\t\t}).Errorln(err.Error())\n\t\tos.Exit(1)\n\t}\n\tdebug = tmp.Server.Debug\n\tfor i, i2 := range tmp.Streams {\n\t\tfor i3, i4 := range i2.Channels {\n\t\t\ti4.clients = make(map[string]ClientST)\n\t\t\ti4.ack = time.Now().Add(-255 * time.Hour)\n\t\t\ti4.hlsSegmentBuffer = make(map[int]SegmentOld)\n\t\t\ti4.signals = make(chan int, 100)\n\t\t\ti2.Channels[i3] = i4\n\t\t}\n\t\ttmp.Streams[i] = i2\n\t}\n\treturn &tmp\n}", "func NewStream(reverse bool, manaul bool) *Streams {\n\n\t//our internal buffer\n\tlist := immute.CreateList(make([]interface{}, 0))\n\n\t//drain event handler\n\tdrains := NewEvent(\"drain\")\n\n\tsm := &Streams{\n\t\tNewRoller(),\n\t\tlist,\n\t\tdrains,\n\t\tmanaul,\n\t\treverse,\n\t\ttrue,\n\t}\n\n\tsm.ReceiveDone(func(data interface{}) {\n\t\tsm.finished = true\n\t\tsm.Stream()\n\t})\n\n\treturn sm\n}", "func newFileStream(ctx context.Context, wg *sync.WaitGroup, waker waker.Waker, pathname string, fi os.FileInfo, lines chan<- *logline.LogLine, streamFromStart bool) (LogStream, error) {\n\tfs := &fileStream{ctx: ctx, pathname: pathname, lastReadTime: time.Now(), lines: lines, stopChan: make(chan struct{})}\n\tif err := fs.stream(ctx, wg, waker, fi, streamFromStart); err != nil {\n\t\treturn nil, err\n\t}\n\treturn fs, nil\n}", "func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) {\n\t// MUST synchronize stream creation (all the way to writing the frame)\n\t// as stream IDs **MUST** increase monotonically.\n\ts.nextIdLock.Lock()\n\tdefer s.nextIdLock.Unlock()\n\n\tstreamId := s.getNextStreamId()\n\tif streamId == 0 {\n\t\treturn nil, fmt.Errorf(\"Unable to get new stream id\")\n\t}\n\n\tstream := &Stream{\n\t\tstreamId: streamId,\n\t\tparent: parent,\n\t\tconn: s,\n\t\tstartChan: make(chan error),\n\t\theaders: headers,\n\t\tdataChan: make(chan []byte),\n\t\theaderChan: make(chan http.Header),\n\t\tcloseChan: make(chan bool),\n\t}\n\n\tdebugMessage(\"(%p) (%p) Create stream\", s, stream)\n\n\ts.addStream(stream)\n\n\treturn stream, s.sendStream(stream, fin)\n}", "func newStreamWatcher(r io.ReadCloser) watchInterface {\n\tsw := &streamWatcher{\n\t\tr: r,\n\t\tdecoder: json.NewDecoder(r),\n\t\tresult: make(chan Event),\n\t}\n\tgo sw.receive()\n\treturn sw\n}", "func NewStream() Stream {\n\treturn []Streamer{}\n}", "func newEventStreamCreatable(bidi bool) Event {\n\te := Event{\n\t\tType: EventStreamCreatable,\n\t}\n\tif bidi {\n\t\te.ID = 1\n\t}\n\treturn e\n}", "func NewGCPPubSub(conf Config, mgr interop.Manager, log log.Modular, stats metrics.Type) (input.Streamed, error) {\n\tvar c reader.Async\n\tvar err error\n\tif c, err = reader.NewGCPPubSub(conf.GCPPubSub, log, stats); err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewAsyncReader(TypeGCPPubSub, true, c, log, stats)\n}", "func NewStream(id string, retriever Retriever, options ...*Option) Stream {\n\tmtrx := createMetrics(id, \"stream\")\n\topt := defaultOptions.merge(options...)\n\n\tedge := newEdge(opt.BufferSize)\n\tinput := newEdge(opt.BufferSize)\n\n\tx := &builder{\n\t\tvertex: vertex{\n\t\t\tid: id,\n\t\t\tvertexType: \"stream\",\n\t\t\tmetrics: mtrx,\n\t\t\tinput: input,\n\t\t\toption: opt,\n\t\t\thandler: func(p []*Packet) {\n\t\t\t\tedge.channel <- p\n\t\t\t},\n\t\t},\n\t\tvertacies: map[string]*vertex{},\n\t\trecorder: func(s1, s2, s3 string, p []*Packet) {},\n\t}\n\n\tx.connector = func(ctx context.Context, b *builder) error {\n\t\ti := retriever(ctx)\n\n\t\tgo func() {\n\t\tLoop:\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\tbreak Loop\n\t\t\t\tcase data := <-i:\n\t\t\t\t\tif len(data) < 1 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tpayload := make([]*Packet, len(data))\n\t\t\t\t\tfor i, item := range data {\n\t\t\t\t\t\tpacket := &Packet{\n\t\t\t\t\t\t\tID: uuid.New().String(),\n\t\t\t\t\t\t\tData: item,\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif *x.option.Span {\n\t\t\t\t\t\t\tpacket.newSpan(ctx, mtrx.tracer, \"stream.begin\", id, \"stream\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tpayload[i] = packet\n\t\t\t\t\t}\n\n\t\t\t\t\tinput.channel <- payload\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\treturn x.next.cascade(ctx, x, edge)\n\t}\n\n\treturn x\n}", "func NewStream() *Stream {\n\tselect {\n\tcase stream := <-streams:\n\t\treturn stream\n\tdefault:\n\t\tstream := &Stream{nextCorrelationId(), make([]byte, 4096), make(chan error)}\n\t\treturn stream\n\t}\n}", "func (cfg *Config) StartStream() (*Stream, error) {\n\tif err := cfg.createCmd(); err != nil {\n\t\treturn nil, err\n\t}\n\tpt, err := pty.Start(cfg.cmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstr := &Stream{\n\t\tcmd: cfg.cmd,\n\n\t\tpmu: sync.Mutex{},\n\t\tpt: pt,\n\n\t\twg: sync.WaitGroup{},\n\t\trmu: sync.RWMutex{},\n\n\t\t// pre-allocate\n\t\tqueue: make([]Row, 0, 500),\n\t\tpid2Row: make(map[int64]Row, 500),\n\t\terr: nil,\n\t\terrc: make(chan error, 1),\n\n\t\tready: false,\n\t\treadyc: make(chan struct{}, 1),\n\t}\n\tstr.rcond = sync.NewCond(&str.rmu)\n\n\tstr.wg.Add(1)\n\tgo str.enqueue()\n\tgo str.dequeue()\n\n\t<-str.readyc\n\treturn str, nil\n}", "func NewChildSocketStream() (*ChildSocketStream, error) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlistener, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ChildSocketStream{\n\t\tlistener: listener,\n\t}, nil\n}", "func (l *Lgr) newStreamEntry(streamKey string, msg string, level string, complete bool) {\n // Get new connection from Redis pool.\n conn := l.RedisPool.Get()\n defer conn.Close()\n\n // Add message to log stream.\n if _, err := conn.Do(\"XADD\", streamKey, \"*\", \"msg\", msg, \"level\", level, \"complete\", complete); err != nil {\n l.Errorf(\"error logging to stream: %s\", err.Error())\n }\n}", "func (sp *StreamPool) NewStream(ctx xctx.XContext, netStream network.Stream) (*Stream, error) {\n\tstream, err := NewStream(sp.ctx, sp.srv, netStream)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := sp.AddStream(ctx, stream); err != nil {\n\t\tstream.Close()\n\t\tsp.srv.kdht.RoutingTable().RemovePeer(stream.PeerID())\n\t\tctx.GetLog().Warn(\"New stream is deleted\", \"error\", err)\n\t\treturn nil, ErrNewStream\n\t}\n\n\treturn stream, nil\n}", "func NewPushStream(ctx context.Context, nodeID string, subName string, subscriptionCore subscription.ICore, subscriberCore subscriber.ICore, config *HTTPClientConfig) *PushStream {\n\tpushCtx, cancelFunc := context.WithCancel(ctx)\n\n\t// get subscription Model details\n\tsubModel, err := subscriptionCore.Get(pushCtx, subName)\n\tif err != nil {\n\t\tlogger.Ctx(pushCtx).Errorf(\"error fetching subscription: %s\", err.Error())\n\t\treturn nil\n\t}\n\n\t// set http connection timeout from the subscription\n\tif subModel.AckDeadlineSec != 0 {\n\t\t// make sure to convert sec to milli-sec\n\t\tconfig.ConnectTimeoutMS = int(subModel.AckDeadlineSec) * 1e3\n\t}\n\thttpclient := NewHTTPClientWithConfig(config)\n\n\treturn &PushStream{\n\t\tctx: pushCtx,\n\t\tcancelFunc: cancelFunc,\n\t\tnodeID: nodeID,\n\t\tsubscription: subModel,\n\t\tsubscriptionCore: subscriptionCore,\n\t\tsubscriberCore: subscriberCore,\n\t\tdoneCh: make(chan struct{}),\n\t\thttpClient: httpclient,\n\t}\n}", "func New(path Path, tracksLen int) *StreamProc {\n\tsp := &StreamProc{\n\t\tpath: path,\n\t}\n\n\tsp.tracks = make([]*track, tracksLen)\n\tfor i := range sp.tracks {\n\t\tsp.tracks[i] = &track{}\n\t}\n\n\treturn sp\n}", "func (c *Chain) handleNewStream(s p2p.Stream, msg proto.Message) {\n\t// check message type\n\tswitch message := msg.(type) {\n\tcase *Message:\n\t\tc.handleMessage(s, message)\n\tdefault:\n\t\ts.Reset()\n\t\tglog.Errorf(\"unexpected message: %v\", msg)\n\t}\n}", "func (h *BlankHost) newStreamHandler(s inet.Stream) {\n\n\tprotoID, handle, err := h.Mux().Negotiate(s)\n\tif err != nil {\n\t\tlog.Warning(\"protocol mux failed: %s\", err)\n\t\ts.Close()\n\t\treturn\n\t}\n\n\ts.SetProtocol(protocol.ID(protoID))\n\n\tgo handle(protoID, s)\n}", "func NewSubscriber(addr string, bufSize int) *Subscriber {\n\ts, err := sub.NewSocket()\n\tif err != nil {\n\t\tlog.Fatalf(\"new sub socket failed: %s\\n\", err)\n\t}\n\ts.AddTransport(ipc.NewTransport())\n\ts.AddTransport(tcp.NewTransport())\n\n\tif err := s.Dial(addr); err != nil {\n\t\tlog.Fatal(\"can't dial on pub socket:\", err)\n\t}\n\tlog.Printf(\"sub dial to: %s\\n\", addr)\n\ts.SetOption(mangos.OptionSubscribe, []byte(\"\"))\n\n\tsub := &Subscriber{\n\t\ts: s,\n\t\tmsgs: make(chan interface{}, bufSize),\n\t}\n\n\tgo sub.subscribing()\n\treturn sub\n}", "func (g *group) NewStream() Stream {\n\ts := newStream(g.level, g.out)\n\ts.SetRowHandler(g.handleRow)\n\ts.SetCloseHandler(g.handleStream)\n\tg.idCounter++\n\ts.id = g.idCounter\n\treturn s\n}", "func NewStream(cfg Config, b Break) *Stream {\n\tif err := checkConfig(cfg); err != nil {\n\t\treturn &Stream{err: err}\n\t}\n\tjobs := make(chan *wfl.Job, cfg.BufferSize)\n\n\tgo func() {\n\t\tfor b == nil || b(cfg.Template.Next()) {\n\t\t\tjt, _ := copystructure.Copy(cfg.Template.Jt)\n\t\t\tjobs <- wfl.NewJob(cfg.Workflow).RunT(jt.(drmaa2interface.JobTemplate))\n\t\t}\n\t\tclose(jobs)\n\t}()\n\n\treturn &Stream{\n\t\tjch: jobs,\n\t\tconfig: cfg,\n\t}\n}", "func NewStream(executor executor.Executor) executor.Launcher {\n\treturn New(executor, \"stress-ng-stream\", fmt.Sprintf(\"--stream=%d\", StressngStreamProcessNumber.Value()))\n}", "func handleStream(ns network.Stream) {\n\tlog.Println(\"received new stream\")\n\tserver.launchNewPeer(ns, \"\")\n}", "func newEventStreamReadable(id uint64) Event {\n\treturn Event{\n\t\tType: EventStreamReadable,\n\t\tID: id,\n\t}\n}", "func streamCreate(w http.ResponseWriter, r *http.Request) {\n\tclaims := r.Context().Value(ctxClaims).(*account.Claims)\n\ts, err := stream.CreateStream(uuid.FromStringOrNil(claims.Issuer))\n\tif err != nil {\n\t\te := ErrInvalidInput\n\t\te.Send(w)\n\t\treturn\n\t}\n\tvar res Success\n\tres.Message = \"New stream created\"\n\tres.Payload = s\n\tres.Send(w)\n}", "func NewStream(t mockConstructorTestingTNewStream) *Stream {\n\tmock := &Stream{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewStream(name string, fs FileSystem) (*Stream, error) {\n\tf, err := fs.Create(name)\n\tsf := &Stream{\n\t\tfile: f,\n\t\tfs: fs,\n\t\tb: newBroadcaster(),\n\t}\n\tsf.inc()\n\treturn sf, err\n}", "func NewStream() *Stream {\n\treturn &Stream{\n\t\trecv: make(chan []byte),\n\t\tdone: make(chan struct{}),\n\t}\n}", "func (mgr *StreamMgr) NewStream(ctx context.Context, peer peer.ID, pid protocol.ID) (Stream, error) {\n\treturn mgr.host.NewStream(ctx, peer, pid)\n}", "func NewSubproc(id string, _ gosec.Config) (gosec.Rule, []ast.Node) {\n\trule := &subprocess{issue.MetaData{ID: id}, gosec.NewCallList()}\n\trule.Add(\"os/exec\", \"Command\")\n\trule.Add(\"os/exec\", \"CommandContext\")\n\trule.Add(\"syscall\", \"Exec\")\n\trule.Add(\"syscall\", \"ForkExec\")\n\trule.Add(\"syscall\", \"StartProcess\")\n\trule.Add(\"golang.org/x/sys/execabs\", \"Command\")\n\trule.Add(\"golang.org/x/sys/execabs\", \"CommandContext\")\n\treturn rule, []ast.Node{(*ast.CallExpr)(nil)}\n}", "func (c *Connection) CreateStream() *Stream {\n\tnextStream := c.maxStream + 1\n\n\t// Client opens odd streams\n\tif c.role == RoleClient {\n\t\tif (nextStream & 1) == 0 {\n\t\t\tnextStream++\n\t\t}\n\t} else {\n\t\tif (nextStream & 1) == 1 {\n\t\t\tnextStream++\n\t\t}\n\t}\n\n\treturn c.ensureStream(nextStream)\n}", "func NewStreaming() *Streaming {\n\treturn &Streaming{varX: &Streaming{varX: nil, varY: nil}, varY: &Streaming{varX: nil, varY: nil}}\n}", "func RunStream(plugin plugin.YomoStreamPlugin, endpoint string) {\n\tlog.SetPrefix(fmt.Sprintf(\"[%s:%v]\", plugin.Name(), os.Getpid()))\n\tlog.Printf(\"plugin service start... [%s]\", endpoint)\n\n\t// binding plugin\n\tpluginStream := framework.NewStreamPlugin(plugin)\n\n\t// decoding\n\tdeStream1 := txtkv.NewStreamDecoder(plugin.Observed())\n\n\t//过滤\n\tdeStream2 := txtkv.NewFilterDecoder(plugin.Observed())\n\n\t// encoding\n\tenStream := txtkv.NewStreamEncoder(plugin.Observed())\n\n\tdeStream := io.MultiWriter(deStream1.Writer, deStream2.Writer)\n\n\t// activation service\n\tframework.NewServer(endpoint, deStream, enStream.Reader)\n\n\tgo func() { io.CopyN(pluginStream.Writer, deStream1.Reader, 1024) }() // nolint\n\tgo func() { io.CopyN(enStream.Writer, pluginStream.Reader, 1024) }() // nolint\n\tgo func() { io.CopyN(enStream.Writer, deStream2.Reader, 1024) }() // nolint\n}", "func New(w int, l Log, p Proc) Streams {\n\tif w <= 0 {\n\t\tw = 1\n\t}\n\n\tif l == nil {\n\t\tl = events\n\t}\n\n\tsm := Stream{\n\t\tlog: l,\n\t\tuuid: uuid.NewV4().String(),\n\t\tworkers: w,\n\t\tproc: p,\n\t\tdata: make(dataSink),\n\t\terr: make(errorSink),\n\t\tnc: make(chan struct{}),\n\t}\n\n\t// initialize the total data workers needed.\n\tfor i := 0; i < w; i++ {\n\t\tgo sm.initDW()\n\t}\n\n\treturn &sm\n}", "func (t *Task) NewSubTask() *Task {\n\treturn newTask(t.wg)\n}", "func NewStreaming() *Streaming {\n\treturn &Streaming{}\n}", "func NewStream(r *http.Request, w http.ResponseWriter) (*Stream, error) {\n\tf, ok := w.(http.Flusher)\n\n\tif ok {\n\t\tw.Header().Set(\"Content-Type\", \"text/event-stream\")\n\t\tw.Header().Set(\"Cache-Control\", \"no-cache\")\n\t\tw.Header().Set(\"Connection\", \"keep-alive\")\n\t} else {\n\t\treturn nil, ErrBadStream\n\t}\n\n\treturn &Stream{\n\t\twriter: w,\n\t\tflusher: f,\n\t\trequest: r,\n\t}, nil\n}", "func SubscribeToStream(conn *EventStoreConnection, streamID string, resolveLinkTos bool, eventAppeared eventAppeared, dropped dropped) (*Subscription, error) {\n\tsubscriptionData := &protobuf.SubscribeToStream{\n\t\tEventStreamId: proto.String(streamID),\n\t\tResolveLinkTos: proto.Bool(resolveLinkTos),\n\t}\n\tdata, err := proto.Marshal(subscriptionData)\n\tif err != nil {\n\t\tlog.Fatal(\"marshaling error: \", err)\n\t}\n\n\tlog.Printf(\"[info] Subscription Data: %+v\\n\", subscriptionData)\n\tcorrelationID := newPackageCorrelationID()\n\tpkg, err := newPackage(subscribeToStream, data, correlationID.Bytes(), conn.Config.Login, conn.Config.Password)\n\tif err != nil {\n\t\tlog.Printf(\"[error] failed to subscribe to stream package\")\n\t}\n\tif !conn.connected {\n\t\treturn nil, errors.New(\"the connection is closed\")\n\t}\n\tresultChan := make(chan TCPPackage)\n\tsendPackage(pkg, conn, resultChan)\n\tresult := <-resultChan\n\tsubscriptionConfirmation := &protobuf.SubscriptionConfirmation{}\n\tproto.Unmarshal(result.Data, subscriptionConfirmation)\n\tlog.Printf(\"[info] SubscribeToStream: %+v\\n\", subscriptionConfirmation)\n\tsubscription, err := NewSubscription(conn, correlationID, resultChan, eventAppeared, dropped)\n\tif err != nil {\n\t\tlog.Printf(\"[error] Failed to create new subscription: %+v\\n\", err)\n\t}\n\tconn.subscriptions[correlationID] = subscription\n\treturn subscription, nil\n}", "func NewStream(ctx context.Context, w http.ResponseWriter) Stream {\n\tresult := &stream{\n\t\tctx: ctx,\n\t\tw: w,\n\t}\n\n\treturn result\n}", "func (fss *StreamingService) Stream(wg *sync.WaitGroup) error { return nil }", "func TestCreateStream(t *testing.T) {\n\tdefer cleanupStorage(t)\n\n\t// Use a central NATS server.\n\tns := natsdTest.RunDefaultServer()\n\tdefer ns.Shutdown()\n\n\t// Configure server.\n\ts1Config := getTestConfig(\"a\", true, 5050)\n\ts1 := runServerWithConfig(t, s1Config)\n\tdefer s1.Stop()\n\n\tgetMetadataLeader(t, 10*time.Second, s1)\n\n\tclient, err := lift.Connect([]string{\"localhost:5050\"})\n\trequire.NoError(t, err)\n\tdefer client.Close()\n\n\terr = client.CreateStream(context.Background(), \"foo\", \"bar\")\n\trequire.NoError(t, err)\n\n\t// Creating the same stream returns ErrStreamExists.\n\terr = client.CreateStream(context.Background(), \"foo\", \"bar\")\n\trequire.Equal(t, lift.ErrStreamExists, err)\n}", "func newMockStream() *mockStream {\n\t// just need a new context specific to this stream,\n\t// easiest way to get it.\n\tctx, _ := context.WithCancel(context.TODO())\n\n\treturn &mockStream{\n\t\tsendChan: make(chan *Response, 100),\n\t\trecvChan: make(chan *Request, 100),\n\t\tctx: ctx,\n\t}\n}", "func (c *Chat) Sub(subName string, topics []LogCat) chan LogLineParsed {\n\tnewSub := make(chan LogLineParsed, 10)\n\tc.logger.newSubs <- subToChatPump{\n\t\tName: subName,\n\t\tSubbed: topics,\n\t\tC: newSub,\n\t}\n\n\treturn newSub\n}", "func (q *QueryServiceTestHelper) NewStream(gocontext.Context, *grpc.StreamDesc, string, ...grpc.CallOption) (grpc.ClientStream, error) {\n\treturn nil, fmt.Errorf(\"not supported\")\n}", "func (c *subContext) openStream(ctx context.Context, epID epapi.ID, indCh chan<- indication.Indication) error {\n\tresponse, err := c.epClient.Get(ctx, epID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconn, err := c.conns.Connect(fmt.Sprintf(\"%s:%d\", response.IP, response.Port))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient := termination.NewClient(conn)\n\tresponseCh := make(chan e2tapi.StreamResponse)\n\trequestCh, err := client.Stream(ctx, responseCh)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trequestCh <- e2tapi.StreamRequest{\n\t\tAppID: e2tapi.AppID(c.config.AppID),\n\t\tInstanceID: e2tapi.InstanceID(c.config.InstanceID),\n\t\tSubscriptionID: e2tapi.SubscriptionID(c.sub.ID),\n\t}\n\n\tfor response := range responseCh {\n\t\tindCh <- indication.Indication{\n\t\t\tEncodingType: encoding.Type(response.Header.EncodingType),\n\t\t\tPayload: indication.Payload{\n\t\t\t\tHeader: response.IndicationHeader,\n\t\t\t\tMessage: response.IndicationMessage,\n\t\t\t},\n\t\t}\n\t}\n\treturn nil\n}", "func (c *Client) Sub(name string, args ...interface{}) (chan string, error) {\n\n\tif args == nil {\n\t\tlog.Println(\"no args passed\")\n\t\tif err := c.ddp.Sub(name); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tif err := c.ddp.Sub(name, args[0], false); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tmsgChannel := make(chan string, default_buffer_size)\n\tc.ddp.CollectionByName(\"stream-room-messages\").AddUpdateListener(genericExtractor{msgChannel, \"update\"})\n\n\treturn msgChannel, nil\n}", "func NewChildPipeStream(reading bool) (*ChildPipeStream, error) {\n\treader, writer, err := os.Pipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif reading {\n\t\treturn &ChildPipeStream{\n\t\t\tparentPipe: reader,\n\t\t\tchildPipe: writer,\n\t\t}, nil\n\t} else {\n\t\treturn &ChildPipeStream{\n\t\t\tparentPipe: writer,\n\t\t\tchildPipe: reader,\n\t\t}, nil\n\t}\n}", "func NewStream(cfg StreamConfig) (Stream, error) {\n\tstream := Stream{}\n\tvar nc *nats.Conn\n\tvar err error\n\n\t// Connect Options.\n\topts := []nats.Option{nats.Name(cfg.Name)}\n\tif cfg.WaitInMinutes != 0 {\n\t\topts = appendWaitOpts(cfg, opts)\n\t}\n\n\t// Provide Authentication information\n\topts = append(opts, nats.UserInfo(cfg.User, cfg.Password))\n\n\t//Connect to NATS\n\tif cfg.URI != \"\" {\n\t\tnc, err = nats.Connect(cfg.URI, opts...)\n\t} else {\n\t\tnc, err = nats.Connect(nats.DefaultURL, opts...)\n\t}\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tstream.Conn = nc\n\treturn stream, err\n}", "func (w Wrapper) SubscribeStream(streampath string, chn chan messenger.Message) (*nats.Subscription, error) {\n\t_, _, streampath, _, substream, err := util.SplitStreamPath(streampath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstrm, err := w.AdminOperator().ReadStream(streampath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn w.SubscribeStreamByID(strm.StreamID, substream, chn)\n}", "func NewStream(s *loghttp.Stream) *logproto.Stream {\n\tret := &logproto.Stream{\n\t\tEntries: make([]logproto.Entry, len(s.Entries)),\n\t\tLabels: s.Labels.String(),\n\t}\n\n\tfor i, e := range s.Entries {\n\t\tret.Entries[i] = NewEntry(e)\n\t}\n\n\treturn ret\n}", "func newEventStreamStop(id, code uint64) Event {\n\treturn Event{\n\t\tType: EventStreamStop,\n\t\tID: id,\n\t\tData: code,\n\t}\n}", "func (hs *Handshake) handleNewStream(s p2p.Stream, msg proto.Message) {\n\t// check message type\n\tswitch message := msg.(type) {\n\tcase *Message:\n\t\ths.handleMessage(s, message)\n\tdefault:\n\t\ts.Reset()\n\t\tglog.Errorf(\"unexpected message: %v\", msg)\n\t}\n}", "func startSubscriber(agent string, topic interface{}, subSkt chan string) {\n\tlog.Infof(\"startSubscriber(%s)\", agent)\n\tsockName := getSocketName(agent, topic)\n\tdir := path.Dir(sockName)\n\tif _, err := os.Stat(dir); err != nil {\n\t\tlog.Infof(\"startSubscriber(%s): Create %s\\n\", agent, dir)\n\t\tif err := os.MkdirAll(dir, 0700); err != nil {\n\t\t\tlog.Fatalf(\"startSubscriber(%s): Exception while creating %s. %s\",\n\t\t\t\tagent, dir, err)\n\t\t}\n\t}\n\tif _, err := os.Stat(sockName); err == nil {\n\t\t// This could either be a left-over in the filesystem\n\t\t// or some other process (or ourselves) using the same\n\t\t// name to publish. Try connect to see if it is the latter.\n\t\t_, err := net.Dial(\"unix\", sockName)\n\t\tif err == nil {\n\t\t\tlog.Fatalf(\"connectAndRead(%s): Can not publish %s since its already used\",\n\t\t\t\tagent, sockName)\n\t\t}\n\t\tif err := os.Remove(sockName); err != nil {\n\t\t\tlog.Fatalf(\"connectAndRead(%s): Exception while removing pre-existing sock %s. %s\",\n\t\t\t\tagent, sockName, err)\n\t\t}\n\t}\n\tlistener, err := net.Listen(\"unix\", sockName)\n\tif err != nil {\n\t\tlog.Fatalf(\"connectAndRead(%s): Exception while listening at sock %s. %s\",\n\t\t\tagent, sockName, err)\n\t}\n\tdefer listener.Close()\n\tfor {\n\t\tc, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"connectAndRead(%s) failed %s\\n\", sockName, err)\n\t\t\tcontinue\n\t\t}\n\t\tgo serveConnection(c, subSkt)\n\t}\n}", "func NewStream(w io.Writer) *Stream {\n\treturn &Stream{\n\t\tw: w,\n\t\tbuf: make([]byte, 0, initialStreamBufSize),\n\t}\n}", "func CreateStream(mw *Model) *Stream {\n\treturn &Stream{\n\t\tsw: C.CreateStream(mw.w),\n\t}\n}", "func (g *smartContractGW) createStream(res http.ResponseWriter, req *http.Request, params httprouter.Params) {\n\tlog.Infof(\"--> %s %s\", req.Method, req.URL)\n\n\tif g.sm == nil {\n\t\tg.gatewayErrReply(res, req, errors.New(errEventSupportMissing), 405)\n\t\treturn\n\t}\n\n\tvar spec events.StreamInfo\n\tif err := json.NewDecoder(req.Body).Decode(&spec); err != nil {\n\t\tg.gatewayErrReply(res, req, ethconnecterrors.Errorf(ethconnecterrors.RESTGatewayEventStreamInvalid, err), 400)\n\t\treturn\n\t}\n\n\tnewSpec, err := g.sm.AddStream(req.Context(), &spec)\n\tif err != nil {\n\t\tg.gatewayErrReply(res, req, err, 400)\n\t\treturn\n\t}\n\n\tstatus := 200\n\tlog.Infof(\"<-- %s %s [%d]\", req.Method, req.URL, status)\n\tres.Header().Set(\"Content-Type\", \"application/json\")\n\tres.WriteHeader(status)\n\tenc := json.NewEncoder(res)\n\tenc.SetIndent(\"\", \" \")\n\tenc.Encode(&newSpec)\n}", "func (s *server) Stream(in *tt.Empty, stream tt.TamTam_StreamServer) error {\n\tch := make(chan []byte)\n\tctx := stream.Context()\n\tutil.AddBroadcastChannel(ctx, ch)\n\tdefer util.RemoveBroadcastChannel(ctx)\n\tdefer log.Info().Msg(\"Broadcast listener went away\")\n\tfor {\n\t\tselect {\n\t\tcase v := <-ch:\n\t\t\tlog.Debug().Msgf(\"Streaming %d bytes to subscriber\", len(v))\n\t\t\tif err := stream.Send(&tt.Message{Bytes: v}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (s *Server) jsStreamCreateRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) {\n\tif c == nil || !s.JetStreamEnabled() {\n\t\treturn\n\t}\n\tci, acc, _, msg, err := s.getRequestInfo(c, rmsg)\n\tif err != nil {\n\t\ts.Warnf(badAPIRequestT, msg)\n\t\treturn\n\t}\n\n\tvar resp = JSApiStreamCreateResponse{ApiResponse: ApiResponse{Type: JSApiStreamCreateResponseType}}\n\n\t// Determine if we should proceed here when we are in clustered mode.\n\tif s.JetStreamIsClustered() {\n\t\tjs, cc := s.getJetStreamCluster()\n\t\tif js == nil || cc == nil {\n\t\t\treturn\n\t\t}\n\t\tif js.isLeaderless() {\n\t\t\tresp.Error = NewJSClusterNotAvailError()\n\t\t\ts.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))\n\t\t\treturn\n\t\t}\n\t\t// Make sure we are meta leader.\n\t\tif !s.JetStreamIsLeader() {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif hasJS, doErr := acc.checkJetStream(); !hasJS {\n\t\tif doErr {\n\t\t\tresp.Error = NewJSNotEnabledForAccountError()\n\t\t\ts.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))\n\t\t}\n\t\treturn\n\t}\n\n\tvar cfg StreamConfig\n\tif err := json.Unmarshal(msg, &cfg); err != nil {\n\t\tresp.Error = NewJSInvalidJSONError()\n\t\ts.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))\n\t\treturn\n\t}\n\n\tstreamName := streamNameFromSubject(subject)\n\tif streamName != cfg.Name {\n\t\tresp.Error = NewJSStreamMismatchError()\n\t\ts.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))\n\t\treturn\n\t}\n\n\t// Check for path like separators in the name.\n\tif strings.ContainsAny(streamName, `\\/`) {\n\t\tresp.Error = NewJSStreamNameContainsPathSeparatorsError()\n\t\ts.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))\n\t\treturn\n\t}\n\n\t// Can't create a stream with a sealed state.\n\tif cfg.Sealed {\n\t\tresp.Error = NewJSStreamInvalidConfigError(fmt.Errorf(\"stream configuration for create can not be sealed\"))\n\t\ts.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))\n\t\treturn\n\t}\n\n\t// If we are told to do mirror direct but are not mirroring, error.\n\tif cfg.MirrorDirect && cfg.Mirror == nil {\n\t\tresp.Error = NewJSStreamInvalidConfigError(fmt.Errorf(\"stream has no mirror but does have mirror direct\"))\n\t\ts.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))\n\t\treturn\n\t}\n\n\t// Hand off to cluster for processing.\n\tif s.JetStreamIsClustered() {\n\t\ts.jsClusteredStreamRequest(ci, acc, subject, reply, rmsg, &cfg)\n\t\treturn\n\t}\n\n\tif err := acc.jsNonClusteredStreamLimitsCheck(&cfg); err != nil {\n\t\tresp.Error = err\n\t\ts.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))\n\t\treturn\n\t}\n\n\tmset, err := acc.addStream(&cfg)\n\tif err != nil {\n\t\tif IsNatsErr(err, JSStreamStoreFailedF) {\n\t\t\ts.Warnf(\"Stream create failed for '%s > %s': %v\", acc, streamName, err)\n\t\t\terr = errStreamStoreFailed\n\t\t}\n\t\tresp.Error = NewJSStreamCreateError(err, Unless(err))\n\t\ts.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp))\n\t\treturn\n\t}\n\tresp.StreamInfo = &StreamInfo{\n\t\tCreated: mset.createdTime(),\n\t\tState: mset.state(),\n\t\tConfig: mset.config(),\n\t}\n\tresp.DidCreate = true\n\ts.sendAPIResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(resp))\n}", "func NewStream(host core.Host, pid core.PeerID, protoIDs ...core.ProtocolID) (core.Stream, error) {\n\n\tstream, err := host.NewStream(context.Background(), pid, protoIDs...)\n\t// EOF表示底层连接断开, 增加一次重试\n\tif err == io.EOF {\n\t\tlog.Debug(\"NewStream\", \"msg\", \"RetryConnectEOF\")\n\t\tstream, err = host.NewStream(context.Background(), pid, protoIDs...)\n\t}\n\tif err != nil {\n\t\tlog.Error(\"NewStream\", \"pid\", pid.Pretty(), \"msgID\", protoIDs, \" err\", err)\n\t\treturn nil, err\n\t}\n\treturn stream, nil\n}", "func NewStream(in io.Reader, out io.Writer) Stream {\r\n\treturn &plainStream{\r\n\t\tin: json.NewDecoder(in),\r\n\t\tout: out,\r\n\t}\r\n}", "func (c *Component) Stream(subj string) *Stream {\n\t// Pick the cbs that will be executed from this event.\n\treturn &Stream{\n\t\tsubject: subj,\n\t\thooks: make([]func(), 0),\n\t\tnc: c.nc,\n\t\tcomp: c,\n\t\tpubfilters: make([]func(data []byte)[]byte, 0),\n\t}\n}", "func NewStream(ctx context.Context, method string, conn *Conn) (Stream, error) {\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\n\treturn newStreamClient(ctx, method, conn)\n}", "func NewStreamPool(maxStreamLimit int32, no *Node, log log.Logger) (*StreamPool, error) {\n\treturn &StreamPool{\n\t\tlog: log,\n\t\tstreams: common.NewLRUCache(int(maxStreamLimit)),\n\t\tquitCh: make(chan bool, 1),\n\t\tstreamLength: 0,\n\t\tmaxStreamLimit: maxStreamLimit,\n\t\tno: no,\n\t}, nil\n}", "func New(name string, size int) *StreamLogger {\n\tlogger := &StreamLogger{\n\t\tname: name,\n\t\tdataQueue: make(chan Formatter, size),\n\t\tsubscribed: make(map[io.Writer]subscription),\n\t}\n\tgo logger.stream()\n\treturn logger\n}", "func NewStream(room *Room) *Stream {\n\treturn &Stream{\n\t\troom: room,\n\t\toutgoing: make(chan *Message),\n\t\tstop: make(chan bool),\n\t}\n}", "func (r *Runner) Stream(ctx context.Context, study diviner.Study, nparallel int) *Streamer {\n\ts := &Streamer{\n\t\trunner: r,\n\t\tstudy: study,\n\t\tnparallel: nparallel,\n\t\tstopc: make(chan struct{}),\n\t\tdonec: make(chan error),\n\t}\n\tgo func() {\n\t\ts.donec <- s.do(ctx)\n\t}()\n\treturn s\n}", "func (s *DeliveryStreamService) Create(ctx context.Context, input []byte) (*firehose.CreateDeliveryStreamOutput, error) {\n\ti := &firehose.CreateDeliveryStreamInput{}\n\tif err := json.Unmarshal(input, i); err != nil {\n\t\treturn nil, awserr.NewUnmarshalError(err, \"Unmarshal error\", input)\n\t}\n\tif err := i.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\tarn := s.arnName(*i.DeliveryStreamName)\n\tdsCtx, dsCancel := context.WithCancel(context.Background())\n\trecordCh := make(chan *deliveryRecord, 128)\n\tdsType := \"DirectPut\"\n\tif i.DeliveryStreamType != nil {\n\t\tdsType = *i.DeliveryStreamType\n\t}\n\tds := &deliveryStream{\n\t\tarn: arn,\n\t\tdeliveryStreamName: *i.DeliveryStreamName,\n\t\tdeliveryStreamType: dsType,\n\t\trecordCh: recordCh,\n\t\tcloser: dsCancel,\n\t\tdestDesc: &firehose.DestinationDescription{},\n\t\tcreatedAt: time.Now(),\n\t}\n\t//nolint\n\tif i.S3DestinationConfiguration != nil {\n\t\ts3dest := &s3Destination{\n\t\t\tdeliveryName: *i.DeliveryStreamName,\n\t\t\tbucketARN: *i.S3DestinationConfiguration.BucketARN,\n\t\t\tbufferingHints: i.S3DestinationConfiguration.BufferingHints,\n\t\t\tcompressionFormat: i.S3DestinationConfiguration.CompressionFormat,\n\t\t\terrorOutputPrefix: i.S3DestinationConfiguration.ErrorOutputPrefix,\n\t\t\tprefix: i.S3DestinationConfiguration.Prefix,\n\t\t\tinjectedConf: s.s3InjectedConf,\n\t\t\tawsConf: s.awsConf,\n\t\t}\n\t\tconf, err := s3dest.Setup(dsCtx)\n\t\tif err != nil {\n\t\t\treturn nil, awserr.New(firehose.ErrCodeResourceNotFoundException, \"invalid BucketName\", err)\n\t\t}\n\t\tds.destDesc.S3DestinationDescription = &firehose.S3DestinationDescription{\n\t\t\tBucketARN: i.S3DestinationConfiguration.BucketARN,\n\t\t\tBufferingHints: i.S3DestinationConfiguration.BufferingHints,\n\t\t\tCompressionFormat: i.S3DestinationConfiguration.CompressionFormat,\n\t\t\tEncryptionConfiguration: i.S3DestinationConfiguration.EncryptionConfiguration,\n\t\t\tErrorOutputPrefix: i.S3DestinationConfiguration.ErrorOutputPrefix,\n\t\t\tPrefix: i.S3DestinationConfiguration.Prefix,\n\t\t\tRoleARN: i.S3DestinationConfiguration.RoleARN,\n\t\t}\n\t\tgo s3dest.Run(dsCtx, conf, recordCh)\n\t}\n\tif ds.deliveryStreamType == \"KinesisStreamAsSource\" && i.KinesisStreamSourceConfiguration != nil {\n\t\tconsumer, err := newKinesisConsumer(ctx, s.awsConf, i.KinesisStreamSourceConfiguration, s.kinesisInjectedConf)\n\t\tif err != nil {\n\t\t\tds.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\tds.sourceDesc = &firehose.SourceDescription{\n\t\t\tKinesisStreamSourceDescription: &firehose.KinesisStreamSourceDescription{\n\t\t\t\tDeliveryStartTimestamp: aws.Time(ds.createdAt),\n\t\t\t\tKinesisStreamARN: i.KinesisStreamSourceConfiguration.KinesisStreamARN,\n\t\t\t\tRoleARN: i.KinesisStreamSourceConfiguration.RoleARN,\n\t\t\t},\n\t\t}\n\t\tgo consumer.Run(dsCtx, recordCh)\n\t}\n\ts.pool.Add(ds)\n\toutput := &firehose.CreateDeliveryStreamOutput{\n\t\tDeliveryStreamARN: &arn,\n\t}\n\treturn output, nil\n}", "func (sp *StreamPool) Add(s net.Stream) *Stream {\n\t// filter by StreamLimit first\n\taddrStr := s.Conn().RemoteMultiaddr().String()\n\tpeerID := s.Conn().RemotePeer()\n\tif ok := sp.no.streamLimit.AddStream(addrStr, peerID); !ok {\n\t\ts.Reset()\n\t\treturn nil\n\t}\n\tstream := NewStream(s, sp.no)\n\tif err := sp.AddStream(stream); err != nil {\n\t\tstream.Close()\n\t\tsp.DelStream(stream)\n\t\tsp.no.kdht.RoutingTable().Remove(stream.p)\n\t\tsp.log.Warn(\"New stream is deleted\")\n\t\treturn nil\n\t}\n\treturn stream\n}", "func NewSubscriber(log *base.LogObject, agent string, topic interface{}) <-chan string {\n\tsubChan := make(chan string)\n\tgo startSubscriber(log, agent, topic, subChan)\n\treturn subChan\n}", "func (agent *Agent) OpenStream(vbId uint16, flags DcpStreamAddFlag, vbUuid VbUuid, startSeqNo,\n\tendSeqNo, snapStartSeqNo, snapEndSeqNo SeqNo, evtHandler StreamObserver, filter *StreamFilter, cb OpenStreamCallback) (PendingOp, error) {\n\tvar req *memdQRequest\n\thandler := func(resp *memdQResponse, _ *memdQRequest, err error) {\n\t\tif resp != nil && resp.Magic == resMagic {\n\t\t\t// This is the response to the open stream request.\n\t\t\tif err != nil {\n\t\t\t\treq.Cancel()\n\n\t\t\t\t// All client errors are handled by the StreamObserver\n\t\t\t\tcb(nil, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnumEntries := len(resp.Value) / 16\n\t\t\tentries := make([]FailoverEntry, numEntries)\n\t\t\tfor i := 0; i < numEntries; i++ {\n\t\t\t\tentries[i] = FailoverEntry{\n\t\t\t\t\tVbUuid: VbUuid(binary.BigEndian.Uint64(resp.Value[i*16+0:])),\n\t\t\t\t\tSeqNo: SeqNo(binary.BigEndian.Uint64(resp.Value[i*16+8:])),\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcb(entries, nil)\n\t\t\treturn\n\t\t}\n\n\t\tif err != nil {\n\t\t\treq.Cancel()\n\t\t\tstreamId := noStreamId\n\t\t\tif filter != nil {\n\t\t\t\tstreamId = filter.StreamId\n\t\t\t}\n\t\t\tevtHandler.End(vbId, streamId, err)\n\t\t\treturn\n\t\t}\n\n\t\t// This is one of the stream events\n\t\tswitch resp.Opcode {\n\t\tcase cmdDcpSnapshotMarker:\n\t\t\tvbId := uint16(resp.Vbucket)\n\t\t\tnewStartSeqNo := binary.BigEndian.Uint64(resp.Extras[0:])\n\t\t\tnewEndSeqNo := binary.BigEndian.Uint64(resp.Extras[8:])\n\t\t\tsnapshotType := binary.BigEndian.Uint32(resp.Extras[16:])\n\t\t\tvar streamId uint16\n\t\t\tif resp.FrameExtras != nil && resp.FrameExtras.HasStreamId {\n\t\t\t\tstreamId = resp.FrameExtras.StreamId\n\t\t\t}\n\t\t\tevtHandler.SnapshotMarker(newStartSeqNo, newEndSeqNo, vbId, streamId, SnapshotState(snapshotType))\n\t\tcase cmdDcpMutation:\n\t\t\tvbId := uint16(resp.Vbucket)\n\t\t\tseqNo := binary.BigEndian.Uint64(resp.Extras[0:])\n\t\t\trevNo := binary.BigEndian.Uint64(resp.Extras[8:])\n\t\t\tflags := binary.BigEndian.Uint32(resp.Extras[16:])\n\t\t\texpiry := binary.BigEndian.Uint32(resp.Extras[20:])\n\t\t\tlockTime := binary.BigEndian.Uint32(resp.Extras[24:])\n\t\t\tvar streamId uint16\n\t\t\tif resp.FrameExtras != nil && resp.FrameExtras.HasStreamId {\n\t\t\t\tstreamId = resp.FrameExtras.StreamId\n\t\t\t}\n\t\t\tevtHandler.Mutation(seqNo, revNo, flags, expiry, lockTime, resp.Cas, resp.Datatype, vbId, resp.CollectionID, streamId, resp.Key, resp.Value)\n\t\tcase cmdDcpDeletion:\n\t\t\tvbId := uint16(resp.Vbucket)\n\t\t\tseqNo := binary.BigEndian.Uint64(resp.Extras[0:])\n\t\t\trevNo := binary.BigEndian.Uint64(resp.Extras[8:])\n\t\t\tvar streamId uint16\n\t\t\tif resp.FrameExtras != nil && resp.FrameExtras.HasStreamId {\n\t\t\t\tstreamId = resp.FrameExtras.StreamId\n\t\t\t}\n\t\t\tevtHandler.Deletion(seqNo, revNo, resp.Cas, resp.Datatype, vbId, resp.CollectionID, streamId, resp.Key, resp.Value)\n\t\tcase cmdDcpExpiration:\n\t\t\tvbId := uint16(resp.Vbucket)\n\t\t\tseqNo := binary.BigEndian.Uint64(resp.Extras[0:])\n\t\t\trevNo := binary.BigEndian.Uint64(resp.Extras[8:])\n\t\t\tvar streamId uint16\n\t\t\tif resp.FrameExtras != nil && resp.FrameExtras.HasStreamId {\n\t\t\t\tstreamId = resp.FrameExtras.StreamId\n\t\t\t}\n\t\t\tevtHandler.Expiration(seqNo, revNo, resp.Cas, vbId, resp.CollectionID, streamId, resp.Key)\n\t\tcase cmdDcpEvent:\n\t\t\tvbId := uint16(resp.Vbucket)\n\t\t\tseqNo := binary.BigEndian.Uint64(resp.Extras[0:])\n\t\t\teventCode := StreamEventCode(binary.BigEndian.Uint32(resp.Extras[8:]))\n\t\t\tversion := resp.Extras[12]\n\t\t\tvar streamId uint16\n\t\t\tif resp.FrameExtras != nil && resp.FrameExtras.HasStreamId {\n\t\t\t\tstreamId = resp.FrameExtras.StreamId\n\t\t\t}\n\n\t\t\tswitch eventCode {\n\t\t\tcase StreamEventCollectionCreate:\n\t\t\t\tmanifestUid := binary.BigEndian.Uint64(resp.Value[0:])\n\t\t\t\tscopeId := binary.BigEndian.Uint32(resp.Value[8:])\n\t\t\t\tcollectionId := binary.BigEndian.Uint32(resp.Value[12:])\n\t\t\t\tvar ttl uint32\n\t\t\t\tif version == 1 {\n\t\t\t\t\tttl = binary.BigEndian.Uint32(resp.Value[16:])\n\t\t\t\t}\n\t\t\t\tevtHandler.CreateCollection(seqNo, version, vbId, manifestUid, scopeId, collectionId, ttl, streamId, resp.Key)\n\t\t\tcase StreamEventCollectionDelete:\n\t\t\t\tmanifestUid := binary.BigEndian.Uint64(resp.Value[0:])\n\t\t\t\tscopeId := binary.BigEndian.Uint32(resp.Value[8:])\n\t\t\t\tcollectionId := binary.BigEndian.Uint32(resp.Value[12:])\n\t\t\t\tevtHandler.DeleteCollection(seqNo, version, vbId, manifestUid, scopeId, collectionId, streamId)\n\t\t\tcase StreamEventCollectionFlush:\n\t\t\t\tmanifestUid := binary.BigEndian.Uint64(resp.Value[0:])\n\t\t\t\tcollectionId := binary.BigEndian.Uint32(resp.Value[8:])\n\t\t\t\tevtHandler.FlushCollection(seqNo, version, vbId, manifestUid, collectionId)\n\t\t\tcase StreamEventScopeCreate:\n\t\t\t\tmanifestUid := binary.BigEndian.Uint64(resp.Value[0:])\n\t\t\t\tscopeId := binary.BigEndian.Uint32(resp.Value[8:])\n\t\t\t\tevtHandler.CreateScope(seqNo, version, vbId, manifestUid, scopeId, streamId, resp.Key)\n\t\t\tcase StreamEventScopeDelete:\n\t\t\t\tmanifestUid := binary.BigEndian.Uint64(resp.Value[0:])\n\t\t\t\tscopeId := binary.BigEndian.Uint32(resp.Value[8:])\n\t\t\t\tevtHandler.DeleteScope(seqNo, version, vbId, manifestUid, scopeId, streamId)\n\t\t\tcase StreamEventCollectionChanged:\n\t\t\t\tmanifestUid := binary.BigEndian.Uint64(resp.Value[0:])\n\t\t\t\tcollectionId := binary.BigEndian.Uint32(resp.Value[8:])\n\t\t\t\tttl := binary.BigEndian.Uint32(resp.Value[12:])\n\t\t\t\tevtHandler.ModifyCollection(seqNo, version, vbId, manifestUid, collectionId, ttl, streamId)\n\t\t\t}\n\t\tcase cmdDcpStreamEnd:\n\t\t\tvbId := uint16(resp.Vbucket)\n\t\t\tcode := streamEndStatus(binary.BigEndian.Uint32(resp.Extras[0:]))\n\t\t\tvar streamId uint16\n\t\t\tif resp.FrameExtras != nil && resp.FrameExtras.HasStreamId {\n\t\t\t\tstreamId = resp.FrameExtras.StreamId\n\t\t\t}\n\t\t\tevtHandler.End(vbId, streamId, getStreamEndError(code))\n\t\t\treq.Cancel()\n\t\t}\n\t}\n\n\textraBuf := make([]byte, 48)\n\tbinary.BigEndian.PutUint32(extraBuf[0:], uint32(flags))\n\tbinary.BigEndian.PutUint32(extraBuf[4:], 0)\n\tbinary.BigEndian.PutUint64(extraBuf[8:], uint64(startSeqNo))\n\tbinary.BigEndian.PutUint64(extraBuf[16:], uint64(endSeqNo))\n\tbinary.BigEndian.PutUint64(extraBuf[24:], uint64(vbUuid))\n\tbinary.BigEndian.PutUint64(extraBuf[32:], uint64(snapStartSeqNo))\n\tbinary.BigEndian.PutUint64(extraBuf[40:], uint64(snapEndSeqNo))\n\n\tvar val []byte\n\tval = nil\n\tif filter != nil {\n\t\tconvertedFilter := streamFilter{}\n\t\tfor _, cid := range filter.Collections {\n\t\t\tconvertedFilter.Collections = append(convertedFilter.Collections, fmt.Sprintf(\"%x\", cid))\n\t\t}\n\t\tif filter.Scope != noScopeId {\n\t\t\tconvertedFilter.Scope = fmt.Sprintf(\"%x\", filter.Scope)\n\t\t}\n\t\tif filter.ManifestUid != noManifestUid {\n\t\t\tconvertedFilter.ManifestUid = fmt.Sprintf(\"%x\", filter.ManifestUid)\n\t\t}\n\t\tif filter.StreamId != noStreamId {\n\t\t\tconvertedFilter.StreamId = filter.StreamId\n\t\t}\n\t\tvar err error\n\t\tval, err = json.Marshal(convertedFilter)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treq = &memdQRequest{\n\t\tmemdPacket: memdPacket{\n\t\t\tMagic: reqMagic,\n\t\t\tOpcode: cmdDcpStreamReq,\n\t\t\tDatatype: 0,\n\t\t\tCas: 0,\n\t\t\tExtras: extraBuf,\n\t\t\tKey: nil,\n\t\t\tValue: val,\n\t\t\tVbucket: vbId,\n\t\t},\n\t\tCallback: handler,\n\t\tReplicaIdx: 0,\n\t\tPersistent: true,\n\t}\n\treturn agent.dispatchOp(req)\n}", "func NewStandardSubscriber(address string, subTypes []string, options *access.NotificationSubscriptionOptions) (*StandardSubscriber, error) {\n\tnotificationChannel := make(chan string, notificationBuffer)\n\tid, err := uuid.NewV4()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &StandardSubscriber{\n\t\tid: id.String(),\n\t\tnotificationChannel: notificationChannel,\n\t\taddr: address,\n\t\tsinceTime: time.Now(),\n\t\tacceptedTypes: subTypes,\n\t\tsubscriberOptions: options,\n\t}, nil\n}", "func (c *Client) NewStream(ctx context.Context, name types.StreamName, opts ...Option) (io.WriteCloser, error) {\n\tfullOpts, err := c.mkOptions(ctx, name, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tret, err := c.dial.DialStream(fullOpts.forProcess, fullOpts.desc)\n\treturn ret, errors.Annotate(err, \"attempting to connect stream %q\", name).Err()\n}", "func (bsnet *impl) handleNewStream(s network.Stream) {\n\tdefer s.Close()\n\n\tif bsnet.receiver == nil {\n\t\t_ = s.Reset()\n\t\treturn\n\t}\n\n\treader := msgio.NewVarintReaderSize(s, network.MessageSizeMax)\n\tfor {\n\t\treceived, err := bsmsg.FromMsgReader(reader)\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\t_ = s.Reset()\n\t\t\t\tbsnet.receiver.ReceiveError(err)\n\t\t\t\tlog.Debugf(\"bitswap net handleNewStream from %s error: %s\", s.Conn().RemotePeer(), err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tp := s.Conn().RemotePeer()\n\t\tctx := context.Background()\n\t\tlog.Debugf(\"bitswap net handleNewStream from %s\", s.Conn().RemotePeer())\n\t\tbsnet.connectEvtMgr.OnMessage(s.Conn().RemotePeer())\n\t\tbsnet.receiver.ReceiveMessage(ctx, p, received)\n\t\tatomic.AddUint64(&bsnet.stats.MessagesRecvd, 1)\n\t}\n}", "func newImageStream(kogitoApp *v1alpha1.KogitoApp, tagRefName string) *imgv1.ImageStream {\n\tresult := strings.Split(tagRefName, \":\")\n\tif len(result) == 1 {\n\t\tresult = append(result, openshift.ImageTagLatest)\n\t}\n\n\tis := &imgv1.ImageStream{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: result[0],\n\t\t\tNamespace: kogitoApp.Namespace,\n\t\t},\n\t\tSpec: imgv1.ImageStreamSpec{\n\t\t\tLookupPolicy: imgv1.ImageLookupPolicy{\n\t\t\t\tLocal: true,\n\t\t\t},\n\t\t\tTags: []imgv1.TagReference{\n\t\t\t\t{\n\t\t\t\t\tName: result[1],\n\t\t\t\t\tReferencePolicy: imgv1.TagReferencePolicy{\n\t\t\t\t\t\tType: imgv1.LocalTagReferencePolicy,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\taddDefaultMeta(&is.ObjectMeta, kogitoApp)\n\tmeta.SetGroupVersionKind(&is.TypeMeta, meta.KindImageStream)\n\n\treturn is\n}", "func StreamCreateURL(url string, flags Flags) (Channel, error) {\n\tcurl := C.CString(url)\n\tdefer C.free(unsafe.Pointer(curl))\n\tch := C.BASS_StreamCreateURL(curl, 0, cuint(flags), nil, nil)\n\treturn channelToError(ch)\n}", "func (n *ForNode) handleStreamMsg(msg StreamMsg) {\n\tif n.inChan == nil {\n\t\tn.inChan = make(chan Msg, msg.Len.Len())\n\t}\n\tif n.nodeType == nil {\n\t\tn.nodeType = &streamNodeType{-1, msg.Len, make(map[string]bool), false}\n\t}\n\n\ti := msg.Idx.String()\n\tn.subnodes[i] = n.body.Clone(n.globals)\n\tn.subnodes[i].ParentChans()[n.id] = n.inChan\n\tn.nodeToIdx[n.subnodes[i].ID()] = msg.Idx\n\tSetVarNodes(n.subnodes[i], n.name, msg.Data)\n\n\t// Start node if the body is not a loop,\n\t// or if there are less running nodes than the fanout.\n\tif nodeType, ok := n.nodeType.(*streamNodeType); ok {\n\t\tif !n.isLoop || nodeType.numCurrIdxs < n.fanout {\n\t\t\tnodeType.visitedNodes[i] = true\n\t\t\tstartNode(n.globals, n.subnodes[i])\n\t\t\tnodeType.numCurrIdxs++\n\t\t}\n\t}\n}", "func NewStream(it Iterator) Stream {\n\treturn Stream{it: it}\n}", "func (p *BlsCosi) startSubProtocol(tree *onet.Tree) (*SubBlsCosi, error) {\n\n\tpi, err := p.CreateProtocol(p.subProtocolName, tree)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcosiSubProtocol := pi.(*SubBlsCosi)\n\tcosiSubProtocol.Msg = p.Msg\n\tcosiSubProtocol.Data = p.Data\n\t// Fail fast enough if the subleader is failing to try\n\t// at least three leaves as new subleader\n\tcosiSubProtocol.Timeout = p.Timeout / time.Duration(p.SubleaderFailures+1)\n\t// Give one leaf for free but as we don't know how many leaves\n\t// could fail from the other trees, we need as much as possible\n\t// responses. The main protocol will deal with early answers.\n\tcosiSubProtocol.Threshold = tree.Size() - 1\n\n\tlog.Lvlf3(\"Starting sub protocol with subleader %v\", tree.Root.Children[0].ServerIdentity)\n\terr = cosiSubProtocol.Start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cosiSubProtocol, err\n}", "func (ggSession *GreengrassSession) CreateSub(source, target, subject string) error {\n\tsourceArn := ggSession.mapSubToArn(source)\n\ttargetArn := ggSession.mapSubToArn(target)\n\n\tnewUUID, err := uuid.NewV4()\n\tif err != nil {\n\t\treturn err\n\t}\n\tuuidString := newUUID.String()\n\n\t// Check if we need to create the initial version\n\tif ggSession.config.SubscriptionDefinition.ID == \"\" {\n\t\tnewSubscription, err := ggSession.greengrass.CreateSubscriptionDefinition(&greengrass.CreateSubscriptionDefinitionInput{\n\t\t\tInitialVersion: &greengrass.SubscriptionDefinitionVersion{\n\t\t\t\tSubscriptions: []*greengrass.Subscription{\n\t\t\t\t\t&greengrass.Subscription{\n\t\t\t\t\t\tSource: &sourceArn,\n\t\t\t\t\t\tTarget: &targetArn,\n\t\t\t\t\t\tSubject: &subject,\n\t\t\t\t\t\tId: &uuidString,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Printf(\"Created new subscription\\n\")\n\t\tggSession.config.SubscriptionDefinition.ID = *newSubscription.Id\n\t\tggSession.config.SubscriptionDefinition.VersionArn = *newSubscription.LatestVersionArn\n\n\t\tggSession.updateGroup()\n\n\t\treturn nil\n\t}\n\n\t// Add subscription to existing\n\tsubscription, _ := ggSession.greengrass.GetSubscriptionDefinition(&greengrass.GetSubscriptionDefinitionInput{\n\t\tSubscriptionDefinitionId: &ggSession.config.SubscriptionDefinition.ID,\n\t})\n\n\tsubscriptionVersion, _ := ggSession.greengrass.GetSubscriptionDefinitionVersion(&greengrass.GetSubscriptionDefinitionVersionInput{\n\t\tSubscriptionDefinitionId: subscription.Id,\n\t\tSubscriptionDefinitionVersionId: subscription.LatestVersion,\n\t})\n\tsubscriptions := subscriptionVersion.Definition.Subscriptions\n\n\tsubscriptions = append(subscriptions, &greengrass.Subscription{\n\t\tSource: &sourceArn,\n\t\tTarget: &targetArn,\n\t\tSubject: &subject,\n\t\tId: &uuidString,\n\t})\n\n\toutput, err := ggSession.greengrass.CreateSubscriptionDefinitionVersion(&greengrass.CreateSubscriptionDefinitionVersionInput{\n\t\tSubscriptionDefinitionId: subscription.Id,\n\t\tSubscriptions: subscriptions,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tggSession.config.SubscriptionDefinition.VersionArn = *output.Arn\n\tfmt.Printf(\"Updated subscription\\n\")\n\n\tggSession.updateGroup()\n\n\treturn nil\n}", "func (m *Manager) ForStream(id string) interop.Manager { return m }", "func NewRawFastqStream(name string, inFh *xopen.Reader, inReader *bufio.Reader, seqChan chan *simpleSeq, qBase int, id string, ctrlChanIn, ctrlChanOut chan SeqStreamCtrl, gaps bool) chan *simpleSeq {\n\tlineCounter := 0\n\n\tgo func() {\n\t\tsbuff := make(FqLines, 0, 1000)\n\t\tvar err error\n\n\tMAIN_FQ:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase cmd := <-ctrlChanIn:\n\t\t\t\tif inReader == nil {\n\t\t\t\t\tinFh, err = xopen.Ropen(name)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tbuffSize := 128 * 1024\n\t\t\t\t\t\tinReader = bufio.NewReaderSize(inFh, buffSize)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif cmd == StreamQuit {\n\t\t\t\t\t\t\tctrlChanOut <- StreamExited\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontinue MAIN_FQ\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t\tif cmd == StreamTry {\n\t\t\t\t\tsbuff, err = streamFastq(name, inReader, sbuff, seqChan, ctrlChanIn, ctrlChanOut, &lineCounter, qBase, gaps, false)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t\t}\n\n\t\t\t\t} else if cmd == StreamQuit {\n\t\t\t\t\tsbuff, err = streamFastq(name, inReader, sbuff, seqChan, ctrlChanIn, ctrlChanOut, &lineCounter, qBase, gaps, true)\n\t\t\t\t\tfor _, l := range sbuff {\n\t\t\t\t\t\tems := fmt.Sprintf(\"Discarded line: %s\", err)\n\t\t\t\t\t\tserr := &simpleSeq{Err: errors.New(ems), StartLine: lineCounter, Seq: l.Line, File: name}\n\t\t\t\t\t\tseqChan <- serr\n\t\t\t\t\t}\n\t\t\t\t\tctrlChanOut <- StreamExited\n\t\t\t\t\tinFh.Close()\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\tlog.Fatal(\"Invalid command:\", int(cmd))\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(BIG_SLEEP)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn seqChan\n}", "func New() *Stream {\n\ts := &Stream{\n\t\tops: make([]api.Operator, 0),\n\t\tctx: context.Background(),\n\t}\n\ts.log = autoctx.GetLogger(s.ctx)\n\treturn s\n}", "func NewStream(options ...Option) Stream {\n\ts := Stream{\n\t\ttimeout: defaultTimeout,\n\t\taddr: defaultAddr,\n\t}\n\n\tfor _, opt := range options {\n\t\topt(&s)\n\t}\n\n\treturn s\n}", "func newNpipeIO(ctx context.Context, stdin, stdout, stderr string, terminal bool) (_ upstreamIO, err error) {\n\tctx, span := trace.StartSpan(ctx, \"newNpipeIO\")\n\tdefer span.End()\n\tdefer func() { oc.SetSpanStatus(span, err) }()\n\tspan.AddAttributes(\n\t\ttrace.StringAttribute(\"stdin\", stdin),\n\t\ttrace.StringAttribute(\"stdout\", stdout),\n\t\ttrace.StringAttribute(\"stderr\", stderr),\n\t\ttrace.BoolAttribute(\"terminal\", terminal))\n\n\tnio := &npipeio{\n\t\tstdin: stdin,\n\t\tstdout: stdout,\n\t\tstderr: stderr,\n\t\tterminal: terminal,\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tnio.Close(ctx)\n\t\t}\n\t}()\n\tif stdin != \"\" {\n\t\tc, err := winio.DialPipe(stdin, nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tnio.sin = c\n\t}\n\tif stdout != \"\" {\n\t\tc, err := winio.DialPipe(stdout, nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tnio.sout = c\n\t}\n\tif stderr != \"\" {\n\t\tc, err := winio.DialPipe(stderr, nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tnio.serr = c\n\t}\n\treturn nio, nil\n}", "func New(name string) (*Stream, error) {\n\treturn NewStream(name, StdFileSystem)\n}", "func NewSubscriber(t *Topic) *Subscriber {\n\tsub := Subscriber{topic: t}\n\t_, ok := t.Message.(Encodeable)\n\tif !ok {\n\t\tsub.decoder = gob.NewDecoder(&sub.buf)\n\t}\n\treturn &sub\n}", "func RunStream(plugin plugin.YomoStreamPlugin, endpoint string) {\n\tlogger.Infof(\"plugin service [%s] start... [%s]\", plugin.Name(), endpoint)\n\n\t// activation service\n\tpanic(\"not impl\")\n}", "func CloneRefOfStream(n *Stream) *Stream {\n\tif n == nil {\n\t\treturn nil\n\t}\n\tout := *n\n\tout.Comments = CloneComments(n.Comments)\n\tout.SelectExpr = CloneSelectExpr(n.SelectExpr)\n\tout.Table = CloneTableName(n.Table)\n\treturn &out\n}", "func (c Client) NewStream(model string, content_type string, options map[string]interface{}) (<-chan Event, io.WriteCloser, error) {\n\ttoken, err := authorization.GetToken(c.watsonClient.Creds)\n\tif err != nil {\n\t\treturn nil, nil, errors.New(\"failed to acquire auth token: \" + err.Error())\n\t}\n\tu, err := url.Parse(c.watsonClient.Creds.Url)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu.Scheme = \"wss\"\n\tq := url.Values{}\n\tq.Set(\"watson-token\", token)\n\tif len(model) > 0 {\n\t\tq.Set(\"model\", model)\n\t}\n\tu.RawQuery = q.Encode()\n\tu.Path += c.version + \"/recognize\"\n\n\torigin, err := url.Parse(c.watsonClient.Creds.Url)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tconfig := &websocket.Config{\n\t\tLocation: u,\n\t\tOrigin: origin,\n\t\tVersion: websocket.ProtocolVersionHybi13,\n\t}\n\tws, err := websocket.DialConfig(config)\n\tif err != nil {\n\t\treturn nil, nil, errors.New(\"error dialing websocket: \" + err.Error())\n\t}\n\toutput := make(chan Event, 100)\n\ts := stream{\n\t\tinput: output,\n\t\tcontentType: content_type,\n\t\tws: ws,\n\t\toptions: options,\n\t}\n\treturn output, &s, nil\n}" ]
[ "0.68944746", "0.63039505", "0.62568027", "0.6222708", "0.61755145", "0.61426306", "0.60578835", "0.5902729", "0.58879554", "0.58243144", "0.5664884", "0.5650889", "0.56348413", "0.5619227", "0.5600658", "0.55868405", "0.55131215", "0.5471899", "0.5466271", "0.54618245", "0.54381216", "0.542179", "0.5418192", "0.5389282", "0.5387705", "0.5368374", "0.53618836", "0.53499365", "0.533198", "0.5321665", "0.53129447", "0.53107506", "0.5310343", "0.53103364", "0.53027666", "0.5298965", "0.5269399", "0.52650535", "0.52601737", "0.524673", "0.5208737", "0.52010125", "0.5187495", "0.51579684", "0.51272833", "0.512508", "0.51199216", "0.5116065", "0.50970054", "0.5093247", "0.50844216", "0.50764877", "0.50729895", "0.5062068", "0.5051882", "0.50227904", "0.50226384", "0.5021453", "0.5012922", "0.49856076", "0.4969947", "0.4962684", "0.4955547", "0.49516317", "0.4947571", "0.4947188", "0.49428737", "0.49423197", "0.49331647", "0.4926673", "0.4912359", "0.4911484", "0.49071154", "0.49020338", "0.48961353", "0.4894924", "0.48907086", "0.48744097", "0.48692507", "0.4859099", "0.48450884", "0.48397163", "0.48378506", "0.4836649", "0.48293898", "0.4828006", "0.48184615", "0.48135418", "0.48128662", "0.4808382", "0.48001915", "0.4789381", "0.47830603", "0.4782641", "0.47779688", "0.47695872", "0.47684434", "0.47657642", "0.4764863", "0.4757544" ]
0.6649371
1
Name returns the name of the StreamToSubStream process
func (p *StreamToSubStream) Name() string { return p.name }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *Echo) GetStreamName() string {\n\treturn \"\"\n}", "func (s *Stream) Name() string { return s.file.Name() }", "func (o *ExportData) GetStreamName() string {\n\treturn \"\"\n}", "func (o *Kanban) GetStreamName() string {\n\treturn \"\"\n}", "func (o *ProjectWebhook) GetStreamName() string {\n\treturn \"\"\n}", "func (o StreamProcessorOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *StreamProcessor) pulumi.StringPtrOutput { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o StreamOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Stream) pulumi.StringPtrOutput { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (s *StreamInfo) Name() string {\n\treturn s.name\n}", "func (o StreamInputIotHubOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *StreamInputIotHub) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (format *HttpStreamFormat) Name() string {\n return \"http-stream\"\n}", "func (o LookupStreamingImageResultOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v LookupStreamingImageResult) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (r *FirehoseDeliveryStream) Name() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"name\"])\n}", "func ReleaseStreamFor(name string) string {\n\tif name == LatestReleaseName {\n\t\treturn StableImageStream\n\t}\n\n\treturn fmt.Sprintf(\"%s-%s\", StableImageStream, name)\n}", "func (o FolderSinkOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *FolderSink) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o TopicRuleKinesisOutput) StreamName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TopicRuleKinesis) string { return v.StreamName }).(pulumi.StringOutput)\n}", "func ReleaseNameFrom(stream string) string {\n\tif stream == StableImageStream {\n\t\treturn LatestReleaseName\n\t}\n\n\treturn strings.TrimPrefix(stream, fmt.Sprintf(\"%s-\", StableImageStream))\n}", "func (a *ExternalAgentProcess) Name() string {\n\treturn path.Base(a.cmd.Path)\n}", "func (o TopicRuleErrorActionKinesisOutput) StreamName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TopicRuleErrorActionKinesis) string { return v.StreamName }).(pulumi.StringOutput)\n}", "func ChildProcessName(ctx context.Context) string {\n\tif v := ctx.Value(oversightValue(\"name\")); v != nil {\n\t\treturn v.(string)\n\t}\n\treturn \"\"\n}", "func (t *TransformCmd) Name() string {\n\treturn t.fs.Name()\n}", "func (o LoggerEventhubOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LoggerEventhub) string { return v.Name }).(pulumi.StringOutput)\n}", "func (s *StreamingDriver) GetName() string {\n\treturn streamDriverName\n}", "func (p *StreamPublisher) GetName() string {\n\treturn p.Name\n}", "func (o TopicRuleFirehoseOutput) DeliveryStreamName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TopicRuleFirehose) string { return v.DeliveryStreamName }).(pulumi.StringOutput)\n}", "func (standardProcess) Name() string {\n\treturn \"standard.process\"\n}", "func (c Sub) Name() string {\n\treturn \"SUB\"\n}", "func GetActiveSubroutineName(program uint32, shadertype uint32, index uint32, bufsize int32, length *int32, name *uint8) {\n\tsyscall.Syscall6(gpGetActiveSubroutineName, 6, uintptr(program), uintptr(shadertype), uintptr(index), uintptr(bufsize), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(name)))\n}", "func (p *procBase) Name() string {\n\treturn p.name\n}", "func (o TaskOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Task) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o TaskOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Task) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (p *FilteredPaginationSubProtocolV1) Name() string {\n\treturn \"/pagination-with-filter/version/1\"\n}", "func (ys *YoutubeStreamService) Name() string {\n\treturn \"youtube_stream_service\"\n}", "func windowsPipeName(addr string) string {\n\treturn `\\\\.\\pipe\\` + addr\n}", "func (o TopicRuleKinesisPtrOutput) StreamName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TopicRuleKinesis) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.StreamName\n\t}).(pulumi.StringPtrOutput)\n}", "func (o TopicRuleErrorActionFirehoseOutput) DeliveryStreamName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TopicRuleErrorActionFirehose) string { return v.DeliveryStreamName }).(pulumi.StringOutput)\n}", "func (o StreamInputIotHubOutput) StreamAnalyticsJobName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *StreamInputIotHub) pulumi.StringOutput { return v.StreamAnalyticsJobName }).(pulumi.StringOutput)\n}", "func (p *FilteredPaginationSubProtocolV0) Name() string {\n\treturn \"/pagination-with-filter/version/0\"\n}", "func (o DatastoreFileshareOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DatastoreFileshare) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o TopicRuleErrorActionKinesisPtrOutput) StreamName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TopicRuleErrorActionKinesis) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.StreamName\n\t}).(pulumi.StringPtrOutput)\n}", "func (o QueueOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Queue) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (p *Process) Name() (string, error) {\n\treturn p.NameWithContext(context.Background())\n}", "func (p *Port) ParentStream() *Port {\n\treturn p.parStr\n}", "func (o ImagePipelineOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ImagePipeline) pulumi.StringPtrOutput { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (s *Source) Name() string {\n\treturn \"crtsh\"\n}", "func (se *StdoutEmitter) Name() string {\n\treturn se.name\n}", "func (l *LogSink) Name() string {\n\treturn l.Sink\n}", "func NewStreamToSubStream(wf *scipipe.Workflow, name string) *StreamToSubStream {\n\tstss := &StreamToSubStream{\n\t\tname: name,\n\t\tIn: scipipe.NewInPort(\"in\"),\n\t\tOutSubStream: scipipe.NewOutPort(\"out_substream\"),\n\t}\n\twf.AddProc(stss)\n\treturn stss\n}", "func (o TopicRuleFirehosePtrOutput) DeliveryStreamName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TopicRuleFirehose) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.DeliveryStreamName\n\t}).(pulumi.StringPtrOutput)\n}", "func (sh SubdirectoryHeader) Name() string {\n\treturn string(sh.SubdirectoryName[0 : sh.TypeAndNameLength&0xf])\n}", "func GetActiveSubroutineName(program uint32, shadertype uint32, index uint32, bufsize int32, length *int32, name *int8) {\n C.glowGetActiveSubroutineName(gpGetActiveSubroutineName, (C.GLuint)(program), (C.GLenum)(shadertype), (C.GLuint)(index), (C.GLsizei)(bufsize), (*C.GLsizei)(unsafe.Pointer(length)), (*C.GLchar)(unsafe.Pointer(name)))\n}", "func Ptsname(fd int) (name string, err error) {\n\tr0, _, e1 := syscall_syscall(SYS___PTSNAME_A, uintptr(fd), 0, 0)\n\tname = u2s(unsafe.Pointer(r0))\n\tif e1 != 0 {\n\t\terr = errnoErr(e1)\n\t}\n\treturn\n}", "func (o PublicDelegatedPrefixPublicDelegatedSubPrefixOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v PublicDelegatedPrefixPublicDelegatedSubPrefix) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o PolicyFileShareOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *PolicyFileShare) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o ApplicationStatusSyncComparedToDestinationOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusSyncComparedToDestination) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o FlowOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Flow) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (config *Configuration) PIDFileName() string {\n name := \"~/.run/\" + config.ServiceName + \".pid\"\n name = Util.AbsolutePath(name)\n return name\n}", "func (s *ShellTask) TaskName() string {\n\treturn s.name\n}", "func (s *ShellTask) TaskName() string {\n\treturn s.name\n}", "func (t *Task) Name() string { t.mutex.RLock(); defer t.mutex.RUnlock(); return t.name }", "func (o BlobOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Blob) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (ct *CreateThread) Name() command.Name {\n\treturn CreateThreadName\n}", "func (proc BuildOrderProcess) Name() string {\n\treturn proc.name\n}", "func (o IntegrationRuntimeManagedOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *IntegrationRuntimeManaged) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (j *JobScaleCommand) Name() string { return \"job scale\" }", "func (o RegistryTaskBaseImageTriggerOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v RegistryTaskBaseImageTrigger) string { return v.Name }).(pulumi.StringOutput)\n}", "func (p *processor) Name() string {\n\treturn p.name\n}", "func (o *OMXPlayer) SubtitleStream(index int) int {\n\treturn -1\n}", "func (ScrapeVserver) Name() string {\n\treturn VserverSubsystem\n}", "func (o TopicRuleErrorActionFirehosePtrOutput) DeliveryStreamName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TopicRuleErrorActionFirehose) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.DeliveryStreamName\n\t}).(pulumi.StringPtrOutput)\n}", "func (o LoggerEventhubPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *LoggerEventhub) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Name\n\t}).(pulumi.StringPtrOutput)\n}", "func (o PublisherOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Publisher) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o RegistryTaskSourceTriggerOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v RegistryTaskSourceTrigger) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o SignalingChannelOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *SignalingChannel) pulumi.StringPtrOutput { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (p *StreamToSubStream) Run() {\n\tdefer p.OutSubStream.Close()\n\n\tscipipe.Debug.Println(\"Creating new information packet for the substream...\")\n\tsubStreamIP := scipipe.NewIP(\"\")\n\tscipipe.Debug.Printf(\"Setting in-port of process %s to IP substream field\\n\", p.Name())\n\tsubStreamIP.SubStream = p.In\n\n\tscipipe.Debug.Printf(\"Sending sub-stream IP in process %s...\\n\", p.Name())\n\tp.OutSubStream.Send(subStreamIP)\n\tscipipe.Debug.Printf(\"Done sending sub-stream IP in process %s.\\n\", p.Name())\n}", "func (o StreamingJobSkuOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v StreamingJobSku) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o RouterNatSubnetworkToNatOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v RouterNatSubnetworkToNat) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (p *Process) Name() string {\n\treturn p.Builder.Name\n}", "func (o ApplicationStatusSyncComparedToSourceHelmFileParametersOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusSyncComparedToSourceHelmFileParameters) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o HostingReleaseOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *HostingRelease) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (w *Worker) Name() string {\n\tw.mu.RLock()\n\tdefer w.mu.RUnlock()\n\n\treturn w.wr.name\n}", "func (o EventIntegrationOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *EventIntegration) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (p *Port) Stream() *Port {\n\treturn p.sub\n}", "func GetActiveSubroutineName(program uint32, shadertype uint32, index uint32, bufSize int32, length *int32, name *uint8) {\n\tC.glowGetActiveSubroutineName(gpGetActiveSubroutineName, (C.GLuint)(program), (C.GLenum)(shadertype), (C.GLuint)(index), (C.GLsizei)(bufSize), (*C.GLsizei)(unsafe.Pointer(length)), (*C.GLchar)(unsafe.Pointer(name)))\n}", "func GetActiveSubroutineName(program uint32, shadertype uint32, index uint32, bufSize int32, length *int32, name *uint8) {\n\tC.glowGetActiveSubroutineName(gpGetActiveSubroutineName, (C.GLuint)(program), (C.GLenum)(shadertype), (C.GLuint)(index), (C.GLsizei)(bufSize), (*C.GLsizei)(unsafe.Pointer(length)), (*C.GLchar)(unsafe.Pointer(name)))\n}", "func (c *Chrome) SubtitleStream(s int) error {\n\tmsg := wsMessage{\n\t\tComponent: \"player\",\n\t\tMethod: \"subtitleStream\",\n\t\tArguments: map[string]string{\n\t\t\t\"rate\": strconv.Itoa(s),\n\t\t},\n\t}\n\n\tsend := c.ConnViewer.getChanSend()\n\tsend <- msg\n\tres := <-c.ConnViewer.getChanReceive()\n\treturn handleRes(res)\n}", "func (o StreamingJobSkuResponseOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v StreamingJobSkuResponse) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func ProcName(_ int, procName string) string { return procName }", "func ProcName(_ int, procName string) string { return procName }", "func (o SubscriptionOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Subscription) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func Name() string {\n\treturn types.SubModuleName\n}", "func Name() string {\n\treturn types.SubModuleName\n}", "func Name() string {\n\treturn types.SubModuleName\n}", "func Name() string {\n\treturn types.SubModuleName\n}", "func (o AnomalySubscriptionOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *AnomalySubscription) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o RegistryTaskTimerTriggerOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v RegistryTaskTimerTrigger) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o TopicRuleTimestreamDimensionOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TopicRuleTimestreamDimension) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o DataConnectorOfficePowerBiOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DataConnectorOfficePowerBi) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o ApplicationSpecDestinationOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationSpecDestination) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (s *StopEvent) Name() string {\n\treturn s.name\n}", "func (o ApplicationStatusWorkflowContextbackendOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusWorkflowContextbackend) *string { return v.Name }).(pulumi.StringPtrOutput)\n}" ]
[ "0.6440815", "0.62418556", "0.61738175", "0.61010945", "0.6079546", "0.60654175", "0.6047112", "0.58845854", "0.58486265", "0.5778065", "0.56679744", "0.56591153", "0.5604969", "0.5579104", "0.5553676", "0.5548935", "0.55190974", "0.5492352", "0.54096514", "0.5362376", "0.5344448", "0.5344161", "0.53249776", "0.5301911", "0.52995723", "0.52593035", "0.52521676", "0.52474236", "0.5244666", "0.5244666", "0.5238646", "0.52190256", "0.5218434", "0.5210404", "0.51955175", "0.5140083", "0.5139717", "0.512657", "0.51255167", "0.5120213", "0.5118361", "0.51119775", "0.50981957", "0.5087276", "0.50684386", "0.50681674", "0.5064535", "0.50609744", "0.5058659", "0.50552374", "0.5053965", "0.5043834", "0.503607", "0.5027848", "0.5022127", "0.50164074", "0.50129676", "0.50129676", "0.4998481", "0.49943936", "0.49587145", "0.49537918", "0.49521527", "0.4952066", "0.4949635", "0.49490634", "0.49480802", "0.4944729", "0.49430114", "0.49354428", "0.4931918", "0.49314937", "0.4925033", "0.49162173", "0.4914568", "0.49123335", "0.49089423", "0.4908566", "0.49077034", "0.49059242", "0.49057135", "0.49002862", "0.48931283", "0.48931283", "0.48833644", "0.48811364", "0.48774692", "0.48774692", "0.48755735", "0.48716825", "0.48716825", "0.48716825", "0.48716825", "0.48709825", "0.48647785", "0.48646402", "0.48611218", "0.4859569", "0.4854486", "0.48466983" ]
0.75903994
0
Connected tells whether all the ports of the process are connected
func (p *StreamToSubStream) Connected() bool { return p.In.Connected() && p.OutSubStream.Connected() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Runtime) IsConnected() bool { return r.isConnected }", "func (o *Switch) Connected() bool {\n\tfor _, out := range o.outputs {\n\t\tif !out.Connected() {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (d *Device) Connected() bool {\n\tdata := []byte{0}\n\td.bus.ReadRegister(uint8(d.Address), WHO_AM_I, data)\n\treturn data[0] == 0x40\n}", "func (orch *Orchestrator) Connected() (bool, error) {\n\n\torchObj, err := orch.client.GetOrchestration(orch.orch)\n\tif err == nil {\n\t\treturn orchObj.Status.Status == \"success\", nil\n\t}\n\n\treturn false, err\n}", "func (node *hostNode) IsConnected() bool {\n\treturn node.PeersConnected() > 0\n}", "func (d *Device) Connected() bool {\n\tdata := d.buf[:1]\n\td.bus.ReadRegister(uint8(d.Address), WHO_AM_I, data)\n\treturn data[0] == 0x6A\n}", "func (orchCol *OrchestratorCollection) Connected() (bool, error) {\n\n\tfor _, orch := range orchCol.Orchestrators {\n\t\tif connected, err := orch.Connected(); err != nil || !connected {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\treturn true, nil\n}", "func (p *Port) Connected(q *Port) bool {\n\tif b, ok := p.dests[q]; ok {\n\t\treturn b && q.src == p\n\t}\n\treturn false\n}", "func (cmd *EnableCommand) IsConnected() bool {\n\treturn true\n}", "func (p *Peer) Connected() bool {\n\treturn atomic.LoadInt32(&p.connected) != 0 &&\n\t\tatomic.LoadInt32(&p.disconnect) == 0\n}", "func (d *Device) Connected() bool {\n\tdata1, data2 := d.buf[:1], d.buf[1:2]\n\td.bus.ReadRegister(d.AccelAddress, WHO_AM_I, data1)\n\td.bus.ReadRegister(d.MagAddress, WHO_AM_I_M, data2)\n\treturn data1[0] == 0x68 && data2[0] == 0x3D\n}", "func (r *Producer) Connected() bool {\n\treturn (atomic.LoadInt32(&r.open) == 1)\n}", "func (os Outputs) Connected(name string) bool {\n\tfor _, o := range os {\n\t\tm, err := path.Match(name, o.Name)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\n\t\tif m && o.Connected {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (d *Device) Connected() bool {\n\tid, err := d.readRegister(REG_DEVICE_ID)\n\tif nil != err {\n\t\treturn false\n\t}\n\treturn isDeviceIDValid(id)\n}", "func (conn *Conn) Connected() bool {\n\tconn.mu.RLock()\n\tdefer conn.mu.RUnlock()\n\treturn conn.connected\n}", "func (p *Proxy) IsConnected() <-chan struct{} {\n\tfor p.webSocketClient == nil {\n\t\ttime.Sleep(50 * time.Millisecond)\n\t}\n\treturn p.webSocketClient.Connected()\n}", "func (c *TcpClient) Connected() bool {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tlog.Printf(\"Checking if connected: %t\\n\", c.connected)\n\treturn c.connected\n}", "func (r *Resource) Connected() bool {\n\tin, err := r.mgr.GetInput(r.name)\n\tif err != nil {\n\t\tr.log.Debugf(\"Failed to obtain input resource '%v': %v\", r.name, err)\n\t\tr.mErrNotFound.Incr(1)\n\t\treturn false\n\t}\n\treturn in.Connected()\n}", "func (uplink *SenseUplink) IsConnected() bool {\n\tif uplink == nil || uplink.Global == nil {\n\t\treturn false\n\t}\n\tselect {\n\tcase <-uplink.Global.Disconnected():\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}", "func (node *hostNode) PeersConnected() int {\n\treturn len(node.host.Network().Peers())\n}", "func (os Outputs) Connected(name string) bool {\n\tfor _, o := range os {\n\t\tif !o.Connected {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check legacy name\n\t\tm, err := path.Match(name, o.Name)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tif m {\n\t\t\treturn true\n\t\t}\n\n\t\t// Check extended name\n\t\tm, err = path.Match(name, o.Name+\"-\"+o.MonitorID)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tif m {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (r *Retry) Connected() bool {\n\treturn r.wrapped.Connected()\n}", "func (player *Player) Connected() bool {\n\treturn player.connected\n}", "func (s *Server) Connected() bool {\n\treturn s.connection != nil\n}", "func (ctn *Connection) IsConnected() bool {\n\treturn ctn.conn != nil\n}", "func (ctn *Connection) IsConnected() bool {\n\treturn ctn.conn != nil\n}", "func (v *VectorBLE) Connected() bool {\n\treturn v.ble.Connected()\n}", "func (client *Client) Connected() bool {\n\treturn client.conn != nil\n}", "func (b *Base) IsConnected() bool {\n\treturn b.Connected\n}", "func (i *ircconn) IsConnected() bool {\n\treturn atomic.LoadInt64(&i.connected) > 0\n}", "func (transporter *IPCTransporter) IsConnected() bool {\n\treturn true\n}", "func (c *connState) IsConnected() bool {\n\treturn c.isConnected > 0\n}", "func (d *Device) IsConnected() bool {\n\tif d.m != nil {\n\t\treturn d.m.IsConnected()\n\t}\n\treturn false\n}", "func (u *UnityServer) Connected() bool {\n\treturn u.connected\n}", "func (conn *Conn) IsConnected() bool {\n\treturn conn.isConnected\n}", "func (s State) Connected() bool {\n\treturn s == Connected\n}", "func (c *Connection) IsConnected() bool {\n\treturn c.ws != nil\n}", "func (p *pahoClient) IsConnected(c chan bool) {\n\tc <- p.client.IsConnected()\n}", "func (c *Client) Connected() bool {\n\treturn c.ws.Connected()\n}", "func (ht *HeadTracker) Connected() bool {\n\tht.headMutex.RLock()\n\tdefer ht.headMutex.RUnlock()\n\n\treturn ht.connected\n}", "func (r *Resource) Connected() (isConnected bool) {\n\tif err := interop.AccessInput(context.Background(), r.mgr, r.name, func(i types.Input) {\n\t\tisConnected = i.Connected()\n\t}); err != nil {\n\t\tr.log.Debugf(\"Failed to obtain input resource '%v': %v\", r.name, err)\n\t\tr.mErrNotFound.Incr(1)\n\t}\n\treturn\n}", "func (conn *Connection) IsConnected() bool {\n\tif conn.done() {\n\t\treturn false\n\t}\n\tconn.wGroup.Add(1)\n\tdefer func() {\n\t\tconn.wGroup.Done()\n\t}()\n\treturn conn.Disconnected() == false\n}", "func (db *BlabDb) Connected() bool {\n\treturn db.Ping() == nil\n}", "func (c *Client) isConnected() bool {\n\treturn c.connected.Load()\n}", "func (client *Client) Connected() bool {\n\tclient.mutex.RLock()\n\tdefer client.mutex.RUnlock()\n\n\treturn client.conn != nil\n}", "func (hs *Handshake) Connected(c p2p.Conn) {\n\tfmt.Println(\"connected\")\n}", "func (client *RPCConnection) IsConnected() bool {\n\treturn client.isConnected\n}", "func (h *EmptyNSMMonitorHandler) Connected(map[string]*connection.Connection) {}", "func (conn Connection) IsConnected() bool {\n\treturn conn.handle != nil\n}", "func (notifee *Notifee) Connected(net network.Network, conn network.Conn) {\n\tnotifee.logger.Info().Msgf(\n\t\t\"Connected to peer %s\",\n\t\tconn.RemotePeer().Pretty(),\n\t)\n\n}", "func (m *MySQL)Connected()bool{\n\treturn m.db.Ping() == nil\n}", "func (c Client) Connected() bool {\n\treturn c.connected\n}", "func (p *Plugin) Connected() bool {\n\treturn false\n}", "func (c *Checker) IsConnected() bool {\n\tc.Lock()\n\tdefer c.Unlock()\n\treturn c.connected\n}", "func (client *NpmClient) IsConnected() bool {\n\tif (client.rpcClient == nil) || (client.rpcClient.ClientConn == nil) || (client.rpcClient.ClientConn.GetState() != connectivity.Ready) {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (ctrl *AgentController) Connected() bool {\n\tif mockedController {\n\t\treturn true\n\t}\n\treturn ctrl.connected\n}", "func (c *INDIClient) IsConnected() bool {\n\tif c.conn != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (conn *Conn) IsConnected() bool {\n\tconn.mux.Lock()\n\tconnected := conn.buf.conn != nil\n\tconn.mux.Unlock()\n\n\treturn connected\n}", "func (client *client) IsConnected() bool {\n\treturn \"\" != client.address\n}", "func (bn *BasicNotifiee) Connected(n net.Network, conn net.Conn) {\n\tglog.V(4).Infof(\"Notifiee - Connected. Local: %v - Remote: %v\", peer.IDHexEncode(conn.LocalPeer()), peer.IDHexEncode(conn.RemotePeer()))\n\tif bn.monitor != nil {\n\t\tbn.monitor.LogNewConn(peer.IDHexEncode(conn.LocalPeer()), peer.IDHexEncode(conn.RemotePeer()))\n\t}\n}", "func (b *OGame) IsConnected() bool {\n\treturn atomic.LoadInt32(&b.isConnectedAtom) == 1\n}", "func (c *Connection) IsConnected() bool {\n\treturn c.clientConn != nil\n}", "func (r *ReadUntil) Connected() bool {\n\treturn r.wrapped.Connected()\n}", "func (c *client) IsConnected() bool {\n\treturn c.connected && c.ws != nil\n}", "func (c *Client) IsConnected() bool {\n\treturn c.isConnected\n}", "func (c *Connection) IsConnected() bool {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\treturn c.isConnectedLocked()\n}", "func (electionManager *ElectionManager) isConnected() bool {\n\tif electionManager.ZKClientConn == nil {\n\t\treturn false\n\t} else if electionManager.ZKClientConn.State() != zk.StateConnected {\n\t\treturn false\n\t}\n\treturn true\n}", "func (s *Server) emitConnected(c *Conn) {\n\tfor _, l := range s.connectedEvtListeners {\n\t\tl(c)\n\t}\n}", "func (t *thing) IsConnected() bool {\n\treturn t.client != nil && t.client.IsConnected()\n}", "func (sf *Client) IsConnected() bool {\n\treturn sf.connectStatus() == connected\n}", "func (gph *Graph) IsConnected() bool {\n\tcount := gph.count\n\tvisited := make([]bool, count)\n\tvisited[0] = true\n\tgph.dfsUtil(0, visited)\n\n\tfor i := 0; i < count; i++ {\n\t\tif visited[i] == false {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (s *Service) IsConnected(ctx context.Context) (bool, error) {\n\tprops, err := s.GetProperties(ctx)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"unable to get properties\")\n\t}\n\tconnected, err := props.GetBool(shillconst.ServicePropertyIsConnected)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"unable to get IsConnected from properties\")\n\t}\n\treturn connected, nil\n}", "func (o *StorageHitachiPortAllOf) HasPortConnection() bool {\n\tif o != nil && o.PortConnection != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (cm *RPCConnManager) ConnectedCount() int32 {\n\treturn cm.server.ConnectedCount()\n}", "func (w *xcWallet) connected() bool {\n\tw.mtx.RLock()\n\tdefer w.mtx.RUnlock()\n\treturn w.hookedUp\n}", "func (c *PGClient) IsConnected() bool {\n\tres, err := c.DB.Exec(\"select 1\")\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn res.RowsReturned() == 1\n}", "func (m *ConnManager) Connected(k, v interface{}) {\n\tm.connections.Store(k, v)\n\n\tatomic.AddInt32(m.Online, 1)\n}", "func (cl *Client) IsConnected() bool {\n\treturn cl.client != nil && cl.conn != nil\n}", "func Connected(g Iterator) bool {\n\t_, count := components(g)\n\treturn count == 1\n}", "func (c *Client) IsConnected() bool {\n\t// TODO: auto-select transport based on BMC capabilities\n\treturn c.isConnected()\n}", "func isConnected(db *sql.DB) bool {\n\treturn db.Ping() == nil\n}", "func (transporter *Transporter) IsConnected() bool {\n\treturn transporter.isConnected && transporter.isHandling && !transporter.isConnecting\n}", "func Connected() bool {\n\t_, err := http.Get(\"http://www.google.co.in\")\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func (qf *QuickFind) Connected(p int, q int) bool {\n\tif qf.ids[p] == qf.ids[q] {\n\t\treturn true\n\t}\n\treturn false\n}", "func (imd *InMemoryDb) Connected() bool {\n\treturn imd.connected\n}", "func (p *Port) DirectlyConnected() error {\n\tif p.direction != DIRECTION_IN {\n\t\treturn errors.New(\"can only check in ports\")\n\t}\n\n\tif p.PrimitiveType() {\n\t\tif p.src == nil {\n\t\t\treturn errors.New(p.Name() + \" not connected\")\n\t\t}\n\t\tif p.src.direction != DIRECTION_OUT && p.src.operator.name != \"\" {\n\t\t\treturn errors.New(p.Name() + \" only out ports can be connected with in ports\")\n\t\t}\n\t\tif p.src.src != nil {\n\t\t\treturn errors.New(p.Name() + \" has connected source \" + p.src.Name() + \": \" + p.src.src.Name())\n\t\t}\n\t\tfor dest := range p.src.dests {\n\t\t\tif dest == p {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn errors.New(p.Name() + \" not connected back from \" + p.src.Name())\n\t}\n\n\tif p.sub != nil {\n\t\tif err := p.sub.DirectlyConnected(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\tfor _, sub := range p.subs {\n\t\tif err := sub.DirectlyConnected(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *StorageHitachiPortAllOf) GetPortConnectionOk() (*string, bool) {\n\tif o == nil || o.PortConnection == nil {\n\t\treturn nil, false\n\t}\n\treturn o.PortConnection, true\n}", "func (self *SinglePad) Connected() bool{\n return self.Object.Get(\"connected\").Bool()\n}", "func (qf *QuickFind) Connected(p, q int) bool {\n\tpFind, pErr := qf.Find(p)\n\tif pErr != nil {\n\t\treturn false\n\t}\n\tqFind, qErr := qf.Find(q)\n\tif qErr != nil {\n\t\treturn false\n\t}\n\treturn qFind == pFind\n}", "func (c *Client) IsConnected() bool {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\treturn c.Client != nil\n}", "func (gph *Graph) IsConnected() bool {\n\tcount := gph.count\n\tvisited := make([]bool, count)\n\tvisited[0] = true\n\tgph.DFSUtil(0, visited)\n\n\tfor i := 0; i < count; i++ {\n\t\tif visited[i] == false {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func isPortAvailable(port int) bool {\n\tln, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", port))\n\tif err != nil { // port not available\n\t\treturn false\n\t}\n\n\tif err := ln.Close(); err != nil {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (vm *VM) Connected(id ids.ShortID) error {\n\treturn nil\n}", "func (kt *SharedKeybaseTransport) IsConnected() bool {\n\tkt.mutex.Lock()\n\tdefer kt.mutex.Unlock()\n\treturn kt.transport != nil && kt.transport.IsConnected()\n}", "func (b *presenceBuilder) Connected(connected bool) *presenceBuilder {\n\tb.opts.connected = connected\n\n\treturn b\n}", "func (ct *ConnTracker) HasConnectedWith(protocol connection.Protocol) bool {\n\tct.RLock()\n\tdefer ct.RUnlock()\n\tfor _, ci := range ct.connectionInfo {\n\t\tif ci.Protocol == protocol {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (u *Upstream) IsConnectedTo(serverPublicKey []byte) bool {\n\treturn u.client.IsConnectedTo(serverPublicKey)\n}", "func (o *IaasDeviceStatusAllOf) HasConnectionStatus() bool {\n\tif o != nil && o.ConnectionStatus != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func TestHostConnectabilityStatus(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\tht, err := newHostTester(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer ht.Close()\n\n\t// TODO: this causes an ndf, because it relies on the host tester starting up\n\t// and fully returning faster than the first check, which isnt always the\n\t// case. This check is disabled for now, but can be fixed by using the\n\t// Disrupt() pattern.\n\t// if ht.host.ConnectabilityStatus() != modules.HostConnectabilityStatusChecking {\n\t// \t\tt.Fatal(\"expected connectability state to initially be ConnectablityStateChecking\")\n\t// }\n\n\tsuccess := false\n\tfor start := time.Now(); time.Since(start) < 30*time.Second; time.Sleep(time.Millisecond * 10) {\n\t\tif ht.host.ConnectabilityStatus() == modules.HostConnectabilityStatusConnectable {\n\t\t\tsuccess = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !success {\n\t\tt.Fatal(\"expected connectability state to flip to HostConnectabilityStatusConnectable\")\n\t}\n}", "func (c *Notification2Client) IsConnected() bool {\n\tc.mtx.RLock()\n\tisConnected := c.connected\n\tc.mtx.RUnlock()\n\treturn isConnected\n}" ]
[ "0.7308632", "0.70478034", "0.69318163", "0.6841799", "0.6802986", "0.68025744", "0.6785902", "0.67671365", "0.6764436", "0.6752102", "0.6681452", "0.66715723", "0.66356033", "0.65785146", "0.6510589", "0.6454273", "0.6446856", "0.64368707", "0.64300346", "0.6411145", "0.64080614", "0.6402267", "0.6394091", "0.63861877", "0.63780564", "0.63780564", "0.63568527", "0.6353326", "0.6349232", "0.6336635", "0.633436", "0.63307375", "0.6322293", "0.63144857", "0.6310386", "0.63102967", "0.6309965", "0.6301863", "0.62967014", "0.6295867", "0.629534", "0.6282373", "0.6267832", "0.6252222", "0.6247821", "0.62458825", "0.6216268", "0.6211502", "0.6206944", "0.6205494", "0.61864954", "0.61861837", "0.618469", "0.6183598", "0.6146051", "0.6107139", "0.61001045", "0.60778207", "0.60743034", "0.6072621", "0.6070544", "0.60308313", "0.6014897", "0.5997861", "0.5990728", "0.59886086", "0.5986172", "0.5979612", "0.59707665", "0.59622407", "0.5954018", "0.5952233", "0.5944155", "0.590758", "0.5897293", "0.5893811", "0.58788264", "0.5877609", "0.5875358", "0.58753085", "0.58669853", "0.58447963", "0.58444124", "0.58002126", "0.5781776", "0.57769597", "0.57696784", "0.5745917", "0.57356054", "0.57294405", "0.5721216", "0.5660537", "0.56442183", "0.56282836", "0.5608233", "0.55967504", "0.5586947", "0.55670136", "0.55660725", "0.55409193" ]
0.602785
62
InPorts returns all the inports for the process
func (p *StreamToSubStream) InPorts() map[string]*scipipe.InPort { return map[string]*scipipe.InPort{ p.In.Name(): p.In, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func getOpenPorts(n int) []string {\n\tports := []string{}\n\tfor i := 0; i < n; i++ {\n\t\tts := httptest.NewServer(http.NewServeMux())\n\t\tdefer ts.Close()\n\t\tu, err := url.Parse(ts.URL)\n\t\trtx.Must(err, \"Could not parse url to local server:\", ts.URL)\n\t\tports = append(ports, \":\"+u.Port())\n\t}\n\treturn ports\n}", "func (qs SysDBQuerySet) PortIn(port int, portRest ...int) SysDBQuerySet {\n\tiArgs := []interface{}{port}\n\tfor _, arg := range portRest {\n\t\tiArgs = append(iArgs, arg)\n\t}\n\treturn qs.w(qs.db.Where(\"port IN (?)\", iArgs))\n}", "func GetPorts(lookupPids bool) map[string]GOnetstat.Process {\n\tports := make(map[string]GOnetstat.Process)\n\tnetstat, _ := GOnetstat.Tcp(lookupPids)\n\tvar net string\n\t//netPorts := make(map[string]GOnetstat.Process)\n\t//ports[\"tcp\"] = netPorts\n\tnet = \"tcp\"\n\tfor _, entry := range netstat {\n\t\tif entry.State == \"LISTEN\" {\n\t\t\tport := strconv.FormatInt(entry.Port, 10)\n\t\t\tports[net+\":\"+port] = entry\n\t\t}\n\t}\n\tnetstat, _ = GOnetstat.Tcp6(lookupPids)\n\t//netPorts = make(map[string]GOnetstat.Process)\n\t//ports[\"tcp6\"] = netPorts\n\tnet = \"tcp6\"\n\tfor _, entry := range netstat {\n\t\tif entry.State == \"LISTEN\" {\n\t\t\tport := strconv.FormatInt(entry.Port, 10)\n\t\t\tports[net+\":\"+port] = entry\n\t\t}\n\t}\n\tnetstat, _ = GOnetstat.Udp(lookupPids)\n\t//netPorts = make(map[string]GOnetstat.Process)\n\t//ports[\"udp\"] = netPorts\n\tnet = \"udp\"\n\tfor _, entry := range netstat {\n\t\tport := strconv.FormatInt(entry.Port, 10)\n\t\tports[net+\":\"+port] = entry\n\t}\n\tnetstat, _ = GOnetstat.Udp6(lookupPids)\n\t//netPorts = make(map[string]GOnetstat.Process)\n\t//ports[\"udp6\"] = netPorts\n\tnet = \"udp6\"\n\tfor _, entry := range netstat {\n\t\tport := strconv.FormatInt(entry.Port, 10)\n\t\tports[net+\":\"+port] = entry\n\t}\n\treturn ports\n}", "func exposedPorts(node *parser.Node) [][]string {\n\tvar allPorts [][]string\n\tvar ports []string\n\tfroms := FindAll(node, command.From)\n\texposes := FindAll(node, command.Expose)\n\tfor i, j := len(froms)-1, len(exposes)-1; i >= 0; i-- {\n\t\tfor ; j >= 0 && exposes[j] > froms[i]; j-- {\n\t\t\tports = append(nextValues(node.Children[exposes[j]]), ports...)\n\t\t}\n\t\tallPorts = append([][]string{ports}, allPorts...)\n\t\tports = nil\n\t}\n\treturn allPorts\n}", "func getExposedPortsFromISI(image *imagev1.ImageStreamImage) ([]corev1.ContainerPort, error) {\n\t// file DockerImageMetadata\n\terr := imageWithMetadata(&image.Image)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ports []corev1.ContainerPort\n\n\tvar exposedPorts = image.Image.DockerImageMetadata.Object.(*dockerapiv10.DockerImage).ContainerConfig.ExposedPorts\n\n\tif image.Image.DockerImageMetadata.Object.(*dockerapiv10.DockerImage).Config != nil {\n\t\tif exposedPorts == nil {\n\t\t\texposedPorts = make(map[string]struct{})\n\t\t}\n\n\t\t// add ports from Config\n\t\tfor exposedPort := range image.Image.DockerImageMetadata.Object.(*dockerapiv10.DockerImage).Config.ExposedPorts {\n\t\t\tvar emptyStruct struct{}\n\t\t\texposedPorts[exposedPort] = emptyStruct\n\t\t}\n\t}\n\n\tfor exposedPort := range exposedPorts {\n\t\tsplits := strings.Split(exposedPort, \"/\")\n\t\tif len(splits) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"invalid port %s\", exposedPort)\n\t\t}\n\n\t\tportNumberI64, err := strconv.ParseInt(splits[0], 10, 32)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"invalid port number %s\", splits[0])\n\t\t}\n\t\tportNumber := int32(portNumberI64)\n\n\t\tvar portProto corev1.Protocol\n\t\tswitch strings.ToUpper(splits[1]) {\n\t\tcase \"TCP\":\n\t\t\tportProto = corev1.ProtocolTCP\n\t\tcase \"UDP\":\n\t\t\tportProto = corev1.ProtocolUDP\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"invalid port protocol %s\", splits[1])\n\t\t}\n\n\t\tport := corev1.ContainerPort{\n\t\t\tName: fmt.Sprintf(\"%d-%s\", portNumber, strings.ToLower(string(portProto))),\n\t\t\tContainerPort: portNumber,\n\t\t\tProtocol: portProto,\n\t\t}\n\n\t\tports = append(ports, port)\n\t}\n\n\treturn ports, nil\n}", "func getOpenPorts() string {\n\tcmd := \"./Bash Functions/getOpenPorts.sh\"\n\n\t// Get's output of 'nmap' command\n\topenPortsByte, _ := exec.Command(cmd).Output()\n\topenPortsString := string(openPortsByte)\n\topenPortsString = strings.Trim(openPortsString, \"\\n\")\n\n\treturn openPortsString\n}", "func getContainerPorts(ports []echo.Port) model.PortList {\n\tcontainerPorts := make(model.PortList, 0, len(ports))\n\tvar healthPort *model.Port\n\tvar readyPort *model.Port\n\tfor _, p := range ports {\n\t\t// Add the port to the set of application ports.\n\t\tcport := &model.Port{\n\t\t\tName: p.Name,\n\t\t\tProtocol: p.Protocol,\n\t\t\tPort: p.InstancePort,\n\t\t}\n\t\tcontainerPorts = append(containerPorts, cport)\n\n\t\tswitch p.Protocol {\n\t\tcase model.ProtocolGRPC:\n\t\t\tcontinue\n\t\tcase model.ProtocolHTTP:\n\t\t\tif p.InstancePort == httpReadinessPort {\n\t\t\t\treadyPort = cport\n\t\t\t}\n\t\tdefault:\n\t\t\tif p.InstancePort == tcpHealthPort {\n\t\t\t\thealthPort = cport\n\t\t\t}\n\t\t}\n\t}\n\n\t// If we haven't added the readiness/health ports, do so now.\n\tif readyPort == nil {\n\t\tcontainerPorts = append(containerPorts, &model.Port{\n\t\t\tName: \"http-readiness-port\",\n\t\t\tProtocol: model.ProtocolHTTP,\n\t\t\tPort: httpReadinessPort,\n\t\t})\n\t}\n\tif healthPort == nil {\n\t\tcontainerPorts = append(containerPorts, &model.Port{\n\t\t\tName: \"tcp-health-port\",\n\t\t\tProtocol: model.ProtocolHTTP,\n\t\t\tPort: tcpHealthPort,\n\t\t})\n\t}\n\treturn containerPorts\n}", "func (client ProcessesClient) ListAcceptingPorts(ctx context.Context, resourceGroupName string, workspaceName string, machineName string, processName string, startTime *date.Time, endTime *date.Time) (result PortCollectionPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/ProcessesClient.ListAcceptingPorts\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.pc.Response.Response != nil {\n\t\t\t\tsc = result.pc.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: resourceGroupName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"resourceGroupName\", Name: validation.MaxLength, Rule: 64, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.MinLength, Rule: 1, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.Pattern, Rule: `[a-zA-Z0-9_-]+`, Chain: nil}}},\n\t\t{TargetValue: workspaceName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"workspaceName\", Name: validation.MaxLength, Rule: 63, Chain: nil},\n\t\t\t\t{Target: \"workspaceName\", Name: validation.MinLength, Rule: 3, Chain: nil},\n\t\t\t\t{Target: \"workspaceName\", Name: validation.Pattern, Rule: `[a-zA-Z0-9_][a-zA-Z0-9_-]+[a-zA-Z0-9_]`, Chain: nil}}},\n\t\t{TargetValue: machineName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"machineName\", Name: validation.MaxLength, Rule: 64, Chain: nil},\n\t\t\t\t{Target: \"machineName\", Name: validation.MinLength, Rule: 3, Chain: nil}}},\n\t\t{TargetValue: processName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"processName\", Name: validation.MaxLength, Rule: 128, Chain: nil},\n\t\t\t\t{Target: \"processName\", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewError(\"servicemap.ProcessesClient\", \"ListAcceptingPorts\", err.Error())\n\t}\n\n\tresult.fn = client.listAcceptingPortsNextResults\n\treq, err := client.ListAcceptingPortsPreparer(ctx, resourceGroupName, workspaceName, machineName, processName, startTime, endTime)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"servicemap.ProcessesClient\", \"ListAcceptingPorts\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListAcceptingPortsSender(req)\n\tif err != nil {\n\t\tresult.pc.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"servicemap.ProcessesClient\", \"ListAcceptingPorts\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.pc, err = client.ListAcceptingPortsResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"servicemap.ProcessesClient\", \"ListAcceptingPorts\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func openPorts() {\n\tinPort, err = utils.CreateInputPort(\"bonjour/discover.options\", *inputEndpoint, nil)\n\tutils.AssertError(err)\n}", "func availablePorts(cnt int) ([]string, error) {\n\trtn := []string{}\n\n\tfor i := 0; i < cnt; i++ {\n\t\tport, err := getPort()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trtn = append(rtn, strconv.Itoa(port))\n\t}\n\treturn rtn, nil\n}", "func (c *auditLog) getPorts() []corev1.ContainerPort {\n\treturn nil\n}", "func Ports() map[docker.Port]struct{} {\n\tlog.Println(\"port:\", *port)\n\tif *port != \"\" {\n\t\treturn map[docker.Port]struct{}{\n\t\t\tdocker.Port(*port): struct{}{},\n\t\t}\n\t}\n\treturn nil\n}", "func (in *instance) GetImageExposedPorts(img string) (map[string]struct{}, error) {\n\tcfg, err := image.InspectConfig(\"docker://\" + img)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cfg.Config.ExposedPorts, nil\n}", "func getPortsFromReader(file io.Reader) []int {\n\tvar ports []int\n\tres, err := parser.Parse(file)\n\tif err != nil {\n\t\treturn ports\n\t}\n\n\tfor _, child := range res.AST.Children {\n\t\t// check for the potential port number in a Dockerfile/Containerfile\n\t\tif strings.ToLower(child.Value) == \"expose\" {\n\t\t\tfor n := child.Next; n != nil; n = n.Next {\n\t\t\t\tif port, err := strconv.Atoi(n.Value); err == nil {\n\t\t\t\t\tports = append(ports, port)\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t}\n\treturn ports\n}", "func PortBindings() map[docker.Port][]docker.PortBinding {\n\tlog.Println(\"port:\", *port)\n\tif *port != \"\" {\n\t\treturn map[docker.Port][]docker.PortBinding{\n\t\t\tdocker.Port(*port): []docker.PortBinding{},\n\t\t}\n\t}\n\treturn nil\n}", "func PortsForTask(taskInfo *mesos.TaskInfo) map[docker.Port]struct{} {\n\tports := make(map[docker.Port]struct{}, len(taskInfo.Container.Docker.PortMappings))\n\n\tfor _, port := range taskInfo.Container.Docker.PortMappings {\n\t\tif port.ContainerPort == nil {\n\t\t\tcontinue\n\t\t}\n\t\tportStr := docker.Port(strconv.Itoa(int(*port.ContainerPort)) + \"/tcp\") // TODO UDP support?\n\t\tports[portStr] = struct{}{}\n\t}\n\n\tlog.Debugf(\"Ports: %#v\", ports)\n\n\treturn ports\n}", "func nodePorts(svcPorts []utils.ServicePort) []int64 {\n\tports := []int64{}\n\tfor _, p := range uniq(svcPorts) {\n\t\tif !p.NEGEnabled {\n\t\t\tports = append(ports, p.NodePort)\n\t\t}\n\t}\n\treturn ports\n}", "func (s *server) getListenerPorts() map[uint32]bool {\n\n\tlistenerPorts := map[uint32]bool{}\n\tfor _, listener := range s.dbentities.GetListeners() {\n\t\tlistenerPorts[uint32(listener.Port)] = true\n\t}\n\treturn listenerPorts\n}", "func (test *Test) GetPorts(projectName string, ip string) ([]models.Port, error) {\n\treturn tests.NormalPorts, nil\n}", "func openPorts() {\n\toptionsPort, err = utils.CreateInputPort(\"distinct.options\", *optionsEndpoint, nil)\n\tutils.AssertError(err)\n\n\tinPort, err = utils.CreateInputPort(\"distinct.in\", *inputEndpoint, inCh)\n\tutils.AssertError(err)\n\n\toutPort, err = utils.CreateOutputPort(\"distinct.out\", *outputEndpoint, outCh)\n\tutils.AssertError(err)\n}", "func ListSubports(port, workDir string) ([]string, error) {\n\tlistCmd := exec.Command(\"mpbb\", \"--work-dir\", workDir, \"list-subports\", \"--archive-site=\", \"--archive-site-private=\", port)\n\tstdout, err := listCmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = listCmd.Start(); err != nil {\n\t\treturn nil, err\n\t}\n\tsubports := make([]string, 0, 1)\n\tstdoutScanner := bufio.NewScanner(stdout)\n\tfor stdoutScanner.Scan() {\n\t\tline := stdoutScanner.Text()\n\t\tsubports = append(subports, line)\n\t}\n\tif err = listCmd.Wait(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn subports, nil\n}", "func (s LifecyclerRPC) Ports(version string, flagValues map[string]interface{}) ([]string, error) {\n\tvar resp []string\n\terr := s.client.Call(\"Plugin.Ports\", HostOpts{Version: version, FlagValues: flagValues}, &resp)\n\treturn resp, err\n}", "func (g *Generator) ConfigExposedPortsArray() []string {\n\tvar ports []string\n\tfor port := range g.image.Config.ExposedPorts {\n\t\tports = append(ports, port)\n\t}\n\tsort.Strings(ports)\n\treturn ports\n}", "func getAllContainersWithPort(\n\tpod corev1.Pod,\n\tportName string,\n) ([]corev1.Container, error) {\n\tif pod.Status.Phase != corev1.PodRunning {\n\t\treturn nil, fmt.Errorf(\"pod not running: %s\", pod.GetName())\n\t}\n\tvar containers []corev1.Container\n\n\tfor _, c := range pod.Spec.Containers {\n\t\tfor _, p := range c.Ports {\n\t\t\tif p.Name == portName {\n\t\t\t\tcontainers = append(containers, c)\n\t\t\t}\n\t\t}\n\t}\n\treturn containers, nil\n}", "func (w Work) Ports() map[string]connector.Connector {\n\treturn w.Ports_\n}", "func portscan(asyncCount int, host string, startPort uint32, endPort uint32, portsChecked *uint32) (chan uint32, chan bool) {\n\tportCount := endPort + 1 - startPort\n\n\tvar goroutines = make(chan bool, asyncCount) // concurrency control\n\tvar openPorts = make(chan uint32, portCount) // Store list of open ports, concurrency-safe, buffered\n\tvar completed = make(chan bool)\n\n\tgo func() {\n\t\t// Tasks to do at completion of scanning\n\t\tdefer func() {\n\t\t\t// Close openPorts channel since it's buffered\n\t\t\tclose(openPorts)\n\n\t\t\t// Send signal to anything waiting on buffered completion channel\n\t\t\tcompleted <- true\n\t\t}()\n\n\t\tfor port := startPort; port <= endPort; port++ {\n\t\t\tgoroutines <- true // Wait until allowed to go\n\n\t\t\tgo func(p uint32) {\n\t\t\t\tdefer func() {\n\t\t\t\t\t<-goroutines\n\t\t\t\t}() // release lock when done\n\n\t\t\t\t// Check the port\n\t\t\t\tif portOpen := scanOnePort(host, p); portOpen {\n\t\t\t\t\topenPorts <- p\n\t\t\t\t}\n\t\t\t\tatomic.AddUint32(portsChecked, 1)\n\t\t\t}(port)\n\t\t}\n\n\t}()\n\n\treturn openPorts, completed\n}", "func (context *Context) EnumPorts() (allports []string, usbports []string, notecardports []string, err error) {\n\tif context.PortEnumFn == nil {\n\t\treturn\n\t}\n\treturn context.PortEnumFn()\n}", "func Within(start, end int) (port int, err error) {\n\treturn within(start, end)\n}", "func (client ProcessesClient) ListAcceptingPortsResponder(resp *http.Response) (result PortCollection, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func PortBindingsForTask(taskInfo *mesos.TaskInfo) map[docker.Port][]docker.PortBinding {\n\tportBinds := make(map[docker.Port][]docker.PortBinding, len(taskInfo.Container.Docker.PortMappings))\n\n\tfor _, port := range taskInfo.Container.Docker.PortMappings {\n\t\tif port.HostPort == nil {\n\t\t\tcontinue\n\t\t}\n\t\tportBinds[docker.Port(strconv.Itoa(int(*port.ContainerPort))+\"/tcp\")] = // TODO UDP support?\n\t\t\t[]docker.PortBinding{\n\t\t\t\tdocker.PortBinding{HostPort: strconv.Itoa(int(*port.HostPort))},\n\t\t\t}\n\t}\n\n\tlog.Debugf(\"Port Bindings: %#v\", portBinds)\n\n\treturn portBinds\n}", "func (p *P) Ports() gnomock.NamedPorts {\n\treturn gnomock.DefaultTCP(defaultPort)\n}", "func TestPortaniaGetPorts(t *testing.T) {\n\n\ttestSuite := map[string]struct {\n\t\tportRange string\n\t\tportList []string\n\t\terr string\n\t\tports []int\n\t}{\n\t\t\"getPorts should throw an error due to nil values\": {\n\t\t\terr: \"no ports found to parse\",\n\t\t},\n\t\t\"getPorts using portList should return the ports 80,443,8080\": {\n\t\t\tportList: []string{\"80\", \"443\", \"8080\"},\n\t\t\tports: []int{80, 443, 8080},\n\t\t},\n\t\t\"getPorts using portRange should return the ports 80-85\": {\n\t\t\tportRange: \"80-85\",\n\t\t\tports: []int{80, 81, 82, 83, 84, 85},\n\t\t},\n\t}\n\tfor testName, testCase := range testSuite {\n\n\t\tt.Logf(\"Running test %v\\n\", testName)\n\t\tports, err := getPorts(testCase.portList, testCase.portRange)\n\t\tif err != nil && err.Error() != testCase.err {\n\t\t\tt.Errorf(\"expected getPorts to fail with %v but received %v.\", testCase.err, err.Error())\n\t\t} else {\n\t\t\tt.Logf(\"received the expected error result %v\", testCase.err)\n\t\t}\n\n\t\tif len(testCase.ports) != 0 {\n\n\t\t\tfor _, p := range testCase.ports {\n\n\t\t\t\tmatch := false\n\t\t\t\tfor _, x := range ports {\n\t\t\t\t\tif p == x {\n\t\t\t\t\t\tmatch = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif match == false {\n\t\t\t\t\tt.Errorf(\"%v was not found in the returned slice from getPorts\", p)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func V1GetPorts(c *gin.Context) {\n\tvar ports []m.PortCBP\n\tportPt := &ports\n\tif err := dao.GetAllPorts(&portPt); err != nil {\n\t\tutils.NotFound(c, err)\n\t\treturn\n\t}\n\tutils.Ok(c, *portPt)\n}", "func (s *SecurityRule) Ports() []string {\n\treturn s.Ports_\n}", "func getAvailablePort(from, to int) int {\n\tfor port := from; port <= to; port++ {\n\t\tif isPortAvailable(port) {\n\t\t\treturn port\n\t\t}\n\t}\n\n\treturn 0\n}", "func IsRunningByPort(port int) bool {\n\to, err := sh.Command(\"lsof\", fmt.Sprintf(\"-i:%d\", port)).\n\t\tCommand(\"wc\", \"-l\").\n\t\tOutput()\n\tif err != nil || strings.TrimSpace(string(o)) == \"0\" {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func ports(servicePorts []swarmtypes.PortConfig) string {\n\tif servicePorts == nil {\n\t\treturn \"\"\n\t}\n\n\tpr := portRange{}\n\tports := []string{}\n\n\tsort.Slice(servicePorts, func(i, j int) bool {\n\t\tif servicePorts[i].Protocol == servicePorts[j].Protocol {\n\t\t\treturn servicePorts[i].PublishedPort < servicePorts[j].PublishedPort\n\t\t}\n\t\treturn servicePorts[i].Protocol < servicePorts[j].Protocol\n\t})\n\n\tfor _, p := range servicePorts {\n\t\tif p.PublishMode == swarmtypes.PortConfigPublishModeIngress {\n\t\t\tprIsRange := pr.tEnd != pr.tStart\n\t\t\ttOverlaps := p.TargetPort <= pr.tEnd\n\n\t\t\t// Start a new port-range if:\n\t\t\t// - the protocol is different from the current port-range\n\t\t\t// - published or target port are not consecutive to the current port-range\n\t\t\t// - the current port-range is a _range_, and the target port overlaps with the current range's target-ports\n\t\t\tif p.Protocol != pr.protocol || p.PublishedPort-pr.pEnd > 1 || p.TargetPort-pr.tEnd > 1 || prIsRange && tOverlaps {\n\t\t\t\t// start a new port-range, and print the previous port-range (if any)\n\t\t\t\tif pr.pStart > 0 {\n\t\t\t\t\tports = append(ports, pr.String())\n\t\t\t\t}\n\t\t\t\tpr = portRange{\n\t\t\t\t\tpStart: p.PublishedPort,\n\t\t\t\t\tpEnd: p.PublishedPort,\n\t\t\t\t\ttStart: p.TargetPort,\n\t\t\t\t\ttEnd: p.TargetPort,\n\t\t\t\t\tprotocol: p.Protocol,\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tpr.pEnd = p.PublishedPort\n\t\t\tpr.tEnd = p.TargetPort\n\t\t}\n\t}\n\tif pr.pStart > 0 {\n\t\tports = append(ports, pr.String())\n\t}\n\treturn strings.Join(ports, \", \")\n}", "func Ports(ports ...int) Option {\n\treturn func(c *Container) {\n\t\tvar p []string\n\t\tfor _, port := range ports {\n\t\t\tp = append(p, fmt.Sprintf(\"%d\", port))\n\t\t}\n\t\tc.ports = p\n\t}\n}", "func getSplunkContainerPorts(instanceType InstanceType) []corev1.ContainerPort {\n\tl := []corev1.ContainerPort{}\n\tfor key, value := range getSplunkPorts(instanceType) {\n\t\tl = append(l, corev1.ContainerPort{\n\t\t\tName: key,\n\t\t\tContainerPort: int32(value),\n\t\t\tProtocol: \"TCP\",\n\t\t})\n\t}\n\treturn l\n}", "func (p *Provider) processPorts(application marathon.Application, task marathon.Task, serviceName string) (int, error) {\n\tif portLabel, ok := p.getLabel(application, types.LabelPort, serviceName); ok {\n\t\tport, err := strconv.Atoi(portLabel)\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\treturn 0, fmt.Errorf(\"failed to parse port label %q: %s\", portLabel, err)\n\t\tcase port <= 0:\n\t\t\treturn 0, fmt.Errorf(\"explicitly specified port %d must be larger than zero\", port)\n\t\t}\n\t\treturn port, nil\n\t}\n\n\tports := retrieveAvailablePorts(application, task)\n\tif len(ports) == 0 {\n\t\treturn 0, errors.New(\"no port found\")\n\t}\n\n\tportIndex := 0\n\tif portIndexLabel, ok := p.getLabel(application, types.LabelPortIndex, serviceName); ok {\n\t\tvar err error\n\t\tportIndex, err = parseIndex(portIndexLabel, len(ports))\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"cannot use port index to select from %d ports: %s\", len(ports), err)\n\t\t}\n\t}\n\treturn ports[portIndex], nil\n}", "func portKnocking(ports []int) {\n\tfor _, port := range ports {\n\t\thost := fmt.Sprintf(\"http://127.0.0.1:%d\", port)\n\t\tknock(host)\n\t}\t\n}", "func InByName(portName string) (in In, err error) {\n\tdrv := Get()\n\tif drv == nil {\n\t\treturn nil, fmt.Errorf(\"no driver registered\")\n\t}\n\treturn openIn(drv, -1, portName)\n}", "func (s *socatManager) Reserve(n int) ([]int, error) {\n\t//get all listening tcp ports\n\ttype portInfo struct {\n\t\tNetwork string `json:\"network\"`\n\t\tPort int `json:\"port\"`\n\t}\n\tvar ports []portInfo\n\n\t/*\n\t\tlist ports from local services, we of course can't grantee\n\t\tthat a service will start listening after listing the ports\n\t\tbut zos doesn't start any more services (it shouldn't) after\n\t\tthe initial bootstrap, so we almost safe by using this returned\n\t\tlist\n\t*/\n\tif err := s.api.Internal(\"info.port\", nil, &ports); err != nil {\n\t\treturn nil, err\n\t}\n\n\tused := make(map[int]struct{})\n\n\tfor _, port := range ports {\n\t\tif port.Network == \"tcp\" {\n\t\t\tused[port.Port] = struct{}{}\n\t\t}\n\t}\n\n\ts.rm.Lock()\n\tdefer s.rm.Unlock()\n\n\tfor port := range s.rules {\n\t\tused[port] = struct{}{}\n\t}\n\n\ts.sm.Lock()\n\tdefer s.sm.Unlock()\n\n\t//used is now filled with all assigned system ports (except reserved)\n\t//we can safely find the first port that is not used, and not in reseved and add it to\n\t//the result list\n\tvar result []int\n\tp := 1024\n\tfor i := 0; i < n; i++ {\n\t\tfor ; p <= 65536; p++ { //i know last valid port is at 65535, but check code below\n\t\t\tif _, ok := used[p]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif _, ok := s.reserved.Get(fmt.Sprint(p)); ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\n\t\tif p == 65536 {\n\t\t\treturn result, fmt.Errorf(\"pool is exhausted\")\n\t\t}\n\n\t\ts.reserved.Set(fmt.Sprint(p), nil, cache.DefaultExpiration)\n\t\tresult = append(result, p)\n\t}\n\n\treturn result, nil\n}", "func (m portMappings) mapsTo(containerPort int) []int {\n\tresults := []int{}\n\tfor _, pm := range m {\n\t\tif pm.container == containerPort {\n\t\t\tresults = append(results, pm.host)\n\t\t}\n\t}\n\n\treturn results\n}", "func servicePorts(j *v1alpha1.Jira) []v1.ServicePort {\n\treturn []v1.ServicePort{{\n\t\tPort: 8080,\n\t\tName: \"http\",\n\t}}\n}", "func (s *BasevhdlListener) EnterPort_list(ctx *Port_listContext) {}", "func (s *Server) Port() (int, error) {\n\tcj, err := s.ContainerInspect(context.Background(), s.ContainerID)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tportBindings := cj.HostConfig.PortBindings\n\n\tif len(portBindings) == 0 {\n\t\treturn 0, fmt.Errorf(\"no ports bound for container %s\", s.ContainerName)\n\t}\n\n\tvar port int\n\n\tfor _, v := range portBindings {\n\t\tp, err := strconv.Atoi(v[0].HostPort)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"error reading container port: %s\", err)\n\t\t}\n\n\t\tport = p\n\t}\n\n\tif port == 0 {\n\t\tpanic(\"port is 0\")\n\t}\n\n\treturn port, nil\n}", "func getComponentPortsFromDockerComposeFileBytes(bytes []byte, componentPath string, basePath string) []int {\n\tvar ports []int\n\tcomposeMap := make(map[string]interface{})\n\terr := yaml.Unmarshal(bytes, &composeMap)\n\tif err != nil {\n\t\treturn ports\n\t}\n\n\tservicesField, hasServicesField := composeMap[\"services\"].(map[string]interface{})\n\tif !hasServicesField {\n\t\treturn ports\n\t}\n\n\tfor _, serviceItem := range servicesField {\n\t\tserviceField, hasServiceField := serviceItem.(map[string]interface{})\n\t\tif !hasServiceField {\n\t\t\tcontinue\n\t\t}\n\t\tbuild, hasBuild := serviceField[\"build\"].(string)\n\t\tif !hasBuild {\n\t\t\tcontinue\n\t\t}\n\t\tif build == \".\" || filepath.Join(basePath, build) == filepath.Clean(componentPath) {\n\t\t\tportsField, hasPortsField := serviceField[\"ports\"].([]interface{})\n\t\t\texposeField, hasExposeField := serviceField[\"expose\"].([]interface{})\n\t\t\tif hasPortsField {\n\t\t\t\tre := regexp.MustCompile(`(\\d+)\\/*\\w*$`) // ports syntax [HOST:]CONTAINER[/PROTOCOL] or map[string]interface\n\t\t\t\tfor _, portInterface := range portsField {\n\t\t\t\t\tport := -1\n\t\t\t\t\tswitch portInterfaceValue := portInterface.(type) {\n\t\t\t\t\tcase string:\n\t\t\t\t\t\tport = utils.FindPortSubmatch(re, portInterfaceValue, 1)\n\t\t\t\t\tcase map[string]interface{}:\n\t\t\t\t\t\tif targetInterface, exists := portInterfaceValue[\"target\"]; exists {\n\t\t\t\t\t\t\tswitch targetInterfaceValue := targetInterface.(type) {\n\t\t\t\t\t\t\tcase int:\n\t\t\t\t\t\t\t\tif utils.IsValidPort(targetInterfaceValue) {\n\t\t\t\t\t\t\t\t\tport = targetInterfaceValue\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcase string:\n\t\t\t\t\t\t\t\tportValue, err := utils.GetValidPort(portInterfaceValue[\"target\"].(string))\n\t\t\t\t\t\t\t\tif err == nil {\n\t\t\t\t\t\t\t\t\tport = portValue\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tif port != -1 {\n\t\t\t\t\t\tports = append(ports, port)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif hasExposeField {\n\t\t\t\tfor _, portInterface := range exposeField {\n\t\t\t\t\tif portValue, ok := portInterface.(string); ok {\n\t\t\t\t\t\tport, err := utils.GetValidPort(portValue)\n\t\t\t\t\t\tif err == nil {\n\t\t\t\t\t\t\tports = append(ports, port)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn ports\n}", "func generateContainerPorts(cluster *myspec.M3DBCluster) []v1.ContainerPort {\n\tcntPorts := []v1.ContainerPort{}\n\tbasePorts := baseM3DBPorts[:]\n\tif cluster.Spec.EnableCarbonIngester {\n\t\tbasePorts = append(basePorts, carbonListenerPort)\n\t}\n\tfor _, v := range basePorts {\n\t\tnewPortMapping := v1.ContainerPort{\n\t\t\tName: v.name,\n\t\t\tContainerPort: int32(v.port),\n\t\t\tProtocol: v.protocol,\n\t\t}\n\t\tcntPorts = append(cntPorts, newPortMapping)\n\t}\n\treturn cntPorts\n}", "func (g *Generator) ConfigExposedPorts() map[string]struct{} {\n\t// We have to make a copy to preserve the privacy of g.image.Config.\n\tcopy := map[string]struct{}{}\n\tfor k, v := range g.image.Config.ExposedPorts {\n\t\tcopy[k] = v\n\t}\n\treturn copy\n}", "func ports(pod *corev1.Pod) []string {\n\tuniquePorts := map[string]struct{}{}\n\tif p, ok := pod.Annotations[TelegrafMetricsPort]; ok {\n\t\tuniquePorts[p] = struct{}{}\n\t}\n\tif ports, ok := pod.Annotations[TelegrafMetricsPorts]; ok {\n\t\tfor _, p := range strings.Split(ports, \",\") {\n\t\t\tuniquePorts[p] = struct{}{}\n\t\t}\n\t}\n\tif len(uniquePorts) == 0 {\n\t\treturn nil\n\t}\n\n\tps := make([]string, 0, len(uniquePorts))\n\tfor p := range uniquePorts {\n\t\tps = append(ps, p)\n\t}\n\t sort.Strings(ps)\n\t return ps\n}", "func ScanPorts(host string, options ...ScanPortsOption) <-chan int {\n\topts := defaultScanPortsOptions()\n\tfor _, opt := range options {\n\t\topt.setScanPortsOption(opts)\n\t}\n\n\tallPorts := pipeline.Ints(1, maxPort)\n\tfoundPorts := make([]<-chan int, 128)\n\tfor i := 0; i < 128; i++ {\n\t\tfoundPorts[i] = scanPorts(host, allPorts, opts.EagerPrint)\n\t}\n\treturn pipeline.MergeInts(foundPorts...)\n}", "func GetPorts(service corev1.Service) []int {\n\tif len(service.Spec.Ports) == 0 {\n\t\treturn []int{}\n\t}\n\tvar svcPorts []int\n\tfor _, port := range service.Spec.Ports {\n\t\tsvcPorts = append(svcPorts, int(port.Port))\n\t}\n\treturn svcPorts\n}", "func (client ProcessesClient) ListAcceptingPortsSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tazure.DoRetryWithRegistration(client.Client))\n}", "func fakeHostPorts(fromHost, toHost, fromPort, toPort int) []string {\n\tvar hostports []string\n\tfor h := fromHost; h <= toHost; h++ {\n\t\tfor p := fromPort; p <= toPort; p++ {\n\t\t\thostports = append(hostports, fmt.Sprintf(\"192.0.2.%v:%v\", h, p))\n\t\t}\n\t}\n\treturn hostports\n}", "func (ig *InstanceGroup) ServicePorts() []corev1.ServicePort {\n\t// Collect ports to be exposed for each job\n\tports := []corev1.ServicePort{}\n\tfor _, job := range ig.Jobs {\n\t\tfor _, port := range job.Properties.Quarks.Ports {\n\t\t\tports = append(ports, corev1.ServicePort{\n\t\t\t\tName: port.Name,\n\t\t\t\tProtocol: corev1.Protocol(port.Protocol),\n\t\t\t\tPort: int32(port.Internal),\n\t\t\t})\n\t\t}\n\t}\n\treturn ports\n}", "func (d *common) validatePorts(ports []string) error {\n\tfor _, port := range ports {\n\t\terr := validate.IsNetworkPortRange(port)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (p *Concatenator) In() *scipipe.InPort { return p.InPort(\"in\") }", "func (o SecurityProfileMetricValueOutput) Ports() pulumi.IntArrayOutput {\n\treturn o.ApplyT(func(v SecurityProfileMetricValue) []int { return v.Ports }).(pulumi.IntArrayOutput)\n}", "func (s *LifecyclerRPCServer) Ports(opts HostOpts, resp *[]string) (err error) {\n\t*resp, err = s.Plugin.Ports(opts.Version, opts.FlagValues)\n\treturn err\n}", "func freePortAddrs(ip string, n int) []string {\n\tmin, max := 49152, 65535\n\tfreePortsMu.Lock()\n\tdefer freePortsMu.Unlock()\n\tports := make(map[int]net.Listener, n)\n\taddrs := make([]string, n)\n\tif lastPort < min || lastPort > max {\n\t\tlastPort = min\n\t}\n\tfor i := 0; i < n; i++ {\n\t\tp, addr, listener, err := oneFreePort(ip, lastPort, min, max)\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t\tlastPort = p\n\t\taddrs[i] = addr\n\t\tports[p] = listener\n\t\tusedPorts[p] = struct{}{}\n\t}\n\t// Now release them all. It's now a race to get our desired things\n\t// listening on these addresses.\n\tfor _, l := range ports {\n\t\tif err := l.Close(); err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t}\n\treturn addrs\n}", "func IsPortInUse(port int) bool {\n\tlis, lisErr := net.Listen(\"tcp\", fmt.Sprintf(\"127.0.0.1:%d\", port))\n\tif lisErr != nil {\n\t\treturn true\n\t}\n\t_ = lis.Close()\n\treturn false\n}", "func (p *MapToKeys) In() *scipipe.InPort { return p.InPort(\"in\") }", "func jackIn(port int) NeighborServerList {\n\tjsonStr := \"{\\\"port\\\":\" + strconv.Itoa(port) + \"}\"\n\tTrace.Println(\"Jacking in with \" + jsonStr)\n\tvar jsonByte = []byte(jsonStr)\n\n\treq, err := http.NewRequest(\"POST\", masterUrl, bytes.NewBuffer(jsonByte))\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tError.Printf(\"%v\\n\", err)\n\t\tpanic(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tvar neighbors NeighborServerList\n\tdecoder := json.NewDecoder(resp.Body)\n\terr = decoder.Decode(&neighbors)\n\n\tif err != nil {\n\t\tError.Printf(\"%v\\n\", err)\n\t\tpanic(err)\n\t}\n\n\tTrace.Printf(\"Neighbor servers are: %+v\\n\", neighbors)\n\n\treturn neighbors\n}", "func (g *MeleeState) InferOpponentPorts() {\n\tFWriter.Match.OpponentCharacters = []string{}\n\tFWriter.Match.OpponentPorts = PortList{}\n\n\tfor i := 1; i < 5; i++ {\n\t\tif i != g.SelfPort {\n\t\t\tplayer := g.Players[i]\n\n\t\t\tif char, ok := player.GetCharacter(); ok == nil {\n\t\t\t\tif char != UNKNOWN_CHARACTER {\n\t\t\t\t\tg.OpponentPorts = append(g.OpponentPorts, i)\n\n\t\t\t\t\tchars := &FWriter.Match.OpponentCharacters\n\t\t\t\t\t*chars = append(*chars, CHARACTER_NAMES[char])\n\n\t\t\t\t\tports := &FWriter.Match.OpponentPorts\n\t\t\t\t\t*ports = append(*ports, Port(i))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (h *Handler) GetApplicationPorts(w http.ResponseWriter, r *http.Request) {\n\tif err := requireValidKOTSToken(w, r); err != nil {\n\t\tlogger.Error(errors.Wrap(err, \"failed to validate kots token\"))\n\t\treturn\n\t}\n\n\tapps, err := store.GetStore().ListInstalledApps()\n\tif err != nil {\n\t\tlogger.Error(errors.Wrap(err, \"failed to list installed apps\"))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tresponse := GetApplicationPortsResponse{}\n\n\tfor _, app := range apps {\n\t\tlatestSequence, err := store.GetStore().GetLatestAppSequence(app.ID, true)\n\t\tif err != nil {\n\t\t\tlogger.Error(errors.Wrapf(err, \"failed to get latest sequence for app %s\", app.ID))\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tports, err := version.GetForwardedPortsFromAppSpec(app.ID, latestSequence)\n\t\tif err != nil {\n\t\t\tlogger.Error(errors.Wrapf(err, \"failed to get ports from app spec for app %s\", app.ID))\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tresponse.Ports = append(response.Ports, ports...)\n\t}\n\n\tJSON(w, 200, response)\n}", "func (s *StatusSyncer) runningAddresses() []string {\n\taddrs := make([]string, 0)\n\tingressService := s.meshConfig.Mesh().IngressService\n\tingressSelector := s.meshConfig.Mesh().IngressSelector\n\n\tif ingressService != \"\" {\n\t\tsvc := s.services.Get(ingressService, IngressNamespace)\n\t\tif svc == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tif svc.Spec.Type == corev1.ServiceTypeExternalName {\n\t\t\taddrs = append(addrs, svc.Spec.ExternalName)\n\n\t\t\treturn addrs\n\t\t}\n\n\t\tfor _, ip := range svc.Status.LoadBalancer.Ingress {\n\t\t\tif ip.IP == \"\" {\n\t\t\t\taddrs = append(addrs, ip.Hostname)\n\t\t\t} else {\n\t\t\t\taddrs = append(addrs, ip.IP)\n\t\t\t}\n\t\t}\n\n\t\taddrs = append(addrs, svc.Spec.ExternalIPs...)\n\t\treturn addrs\n\t}\n\n\t// get all pods acting as ingress gateways\n\tigSelector := getIngressGatewaySelector(ingressSelector, ingressService)\n\tigPods := s.pods.List(IngressNamespace, labels.SelectorFromSet(igSelector))\n\n\tfor _, pod := range igPods {\n\t\t// only Running pods are valid\n\t\tif pod.Status.Phase != corev1.PodRunning {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Find node external IP\n\t\tnode := s.nodes.Get(pod.Spec.NodeName, \"\")\n\t\tif node == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, address := range node.Status.Addresses {\n\t\t\tif address.Type == corev1.NodeExternalIP {\n\t\t\t\tif address.Address != \"\" && !addressInSlice(address.Address, addrs) {\n\t\t\t\t\taddrs = append(addrs, address.Address)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn addrs\n}", "func getIPRangesForPort(isAllow bool, sg secGroup, myIP, sshUser string, port int64) (ipr []*ec2.IpRange) {\n\tif isAllow {\n\t\tif !strings.Contains(myIP, \"/\") {\n\t\t\tmyIP += \"/32\"\n\t\t}\n\t\tfor _, cidr := range sg.portToMyIPs[port] {\n\t\t\tif cidr == myIP {\n\t\t\t\tout.Highlight(out.WARN, \"skipping existing access for %s - IP %s to port %s in SG %s (%s)\", sshUser, cidr, strconv.Itoa(int(port)), sg.name, sg.id)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tipr = append(ipr, &ec2.IpRange{\n\t\t\tCidrIp: aws.String(myIP),\n\t\t\tDescription: aws.String(sshUser),\n\t\t})\n\t} else {\n\t\tfor _, cidr := range sg.portToMyIPs[port] {\n\t\t\tipr = append(ipr, &ec2.IpRange{\n\t\t\t\tCidrIp: aws.String(cidr),\n\t\t\t\tDescription: aws.String(sshUser),\n\t\t\t})\n\t\t}\n\t}\n\treturn\n}", "func (client ProcessesClient) ListAcceptingPortsPreparer(ctx context.Context, resourceGroupName string, workspaceName string, machineName string, processName string, startTime *date.Time, endTime *date.Time) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"machineName\": autorest.Encode(\"path\", machineName),\n\t\t\"processName\": autorest.Encode(\"path\", processName),\n\t\t\"resourceGroupName\": autorest.Encode(\"path\", resourceGroupName),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t\t\"workspaceName\": autorest.Encode(\"path\", workspaceName),\n\t}\n\n\tconst APIVersion = \"2015-11-01-preview\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\tif startTime != nil {\n\t\tqueryParameters[\"startTime\"] = autorest.Encode(\"query\", *startTime)\n\t}\n\tif endTime != nil {\n\t\tqueryParameters[\"endTime\"] = autorest.Encode(\"query\", *endTime)\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/features/serviceMap/machines/{machineName}/processes/{processName}/acceptingPorts\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func isServicePortInPorts(ports []v1.ServicePort, servicePort *v1.ServicePort) (int, bool) {\n\t// First pass to check for matching name\n\tfor i := 0; i < len(ports); i++ {\n\t\tif servicePort.Name == ports[i].Name {\n\t\t\t// Found match by name\n\t\t\treturn i, true\n\t\t}\n\t}\n\t// In case if Port's name was changed for God knows what reasons\n\t// trying to match for Protocol and Port pair\n\tfor i := 0; i < len(ports); i++ {\n\t\tif servicePort.Protocol == ports[i].Protocol &&\n\t\t\tservicePort.Port == ports[i].Port {\n\t\t\treturn i, true\n\t\t}\n\t}\n\n\t// Port has not been found in the provided slice, indicating that it is either a new port\n\t// or delete port.\n\treturn 0, false\n}", "func (o BuildStrategySpecBuildStepsPortsOutput) ContainerPort() pulumi.IntOutput {\n\treturn o.ApplyT(func(v BuildStrategySpecBuildStepsPorts) int { return v.ContainerPort }).(pulumi.IntOutput)\n}", "func GetPortsList() ([]string, error) {\n\treturn getPortsList()\n}", "func (o AppTemplateContainerStartupProbeOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v AppTemplateContainerStartupProbe) int { return v.Port }).(pulumi.IntOutput)\n}", "func (s *BasevhdlListener) EnterInterface_port_list(ctx *Interface_port_listContext) {}", "func (s *APAPIServer) GetPortEntries(req *fibcapi.ApGetPortEntriesRequest, stream fibcapi.FIBCApApi_GetPortEntriesServer) error {\n\treturn s.ctl.GetPortEntries(stream)\n}", "func (daemon *DaemonListening) isListening() (int, error) {\n\tportsFormat := daemon.getPortsRegexFormat()\n\thostsFormat, err := daemon.getHostRegexFormat()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\t// Get all the Ports combination\n\tregex := regexp.MustCompile(\n\t\tfmt.Sprintf(`[0-9]+: (%s):(%s)`, hostsFormat, portsFormat))\n\tdaemon.contextLogger.Tracef(\"Looking with regex [%s] for open ports...\", regex.String())\n\n\t// Conditions & file-lookup map\n\tcondFilePairList := []condFilePair{\n\t\t{daemon.requiresIPV4 && daemon.requiresTCP, tcp4ConfSock}, // TCP & IPv4\n\t\t{daemon.requiresIPV6 && daemon.requiresTCP, tcp6ConfSock}, // TCP & IPv6\n\t\t{daemon.requiresIPV4 && daemon.requiresUDP, udp4ConfSock}, // UDP & IPv4\n\t\t{daemon.requiresIPV6 && daemon.requiresUDP, udp6ConfSock}, // UDP & IPv6\n\t}\n\n\tfor _, cfp := range condFilePairList {\n\t\tfoundLines, err := daemon.matchIfRequired(cfp.cond, cfp.filepath, regex)\n\t\tif err != nil {\n\t\t\treturn -1, err\n\t\t}\n\n\t\tif foundLines >= 1 {\n\t\t\tdaemon.contextLogger.Tracef(\"Found the required ports [%v] listening on [%s]\", daemon.Ports, cfp.filepath)\n\t\t\treturn 1, nil\n\t\t}\n\t}\n\n\tdaemon.contextLogger.Errorf(\"Failed to find the required open ports [%v]\", daemon.Ports)\n\treturn -1, nil\n}", "func InByNumber(portNumber int) (in In, err error) {\n\tdrv := Get()\n\tif drv == nil {\n\t\treturn nil, fmt.Errorf(\"no driver registered\")\n\t}\n\treturn openIn(drv, portNumber, \"\")\n}", "func (r *Runtime) exposeMachinePorts(ports []types.PortMapping) error {\n\tif !machine.IsGvProxyBased() {\n\t\treturn nil\n\t}\n\treturn requestMachinePorts(true, ports)\n}", "func getIPRangesForPort(isAllow bool, sg secGroup, myIP string, userName *string, port int64) (ipr []*ec2.IpRange) {\n\tif isAllow {\n\t\tif !strings.Contains(myIP, \"/\") {\n\t\t\tmyIP += \"/32\"\n\t\t}\n\t\tfor _, cidr := range sg.portToMyIPs[port] {\n\t\t\tif cidr == myIP {\n\t\t\t\tout.Highlight(out.WARN, \"skipping existing access for %s - IP %s to port %s in SG %s (%s)\", *userName, cidr, strconv.Itoa(int(port)), sg.name, sg.id)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tipr = append(ipr, &ec2.IpRange{\n\t\t\tCidrIp: aws.String(myIP),\n\t\t\tDescription: aws.String(*userName),\n\t\t})\n\t} else {\n\t\tfor _, cidr := range sg.portToMyIPs[port] {\n\t\t\tipr = append(ipr, &ec2.IpRange{\n\t\t\t\tCidrIp: aws.String(cidr),\n\t\t\t\tDescription: aws.String(*userName),\n\t\t\t})\n\t\t}\n\t}\n\treturn\n}", "func (p *EC2Provisioner) exposePorts(securityGroupID string, daemonPort int64, ports []int64) error {\n\t// Create Inertia rules\n\tportRules := []*ec2.IpPermission{{\n\t\tFromPort: aws.Int64(int64(22)),\n\t\tToPort: aws.Int64(int64(22)),\n\t\tIpProtocol: aws.String(\"tcp\"),\n\t\tIpRanges: []*ec2.IpRange{{CidrIp: aws.String(\"0.0.0.0/0\"), Description: aws.String(\"Inertia SSH port\")}},\n\t\tIpv6Ranges: []*ec2.Ipv6Range{{CidrIpv6: aws.String(\"::/0\"), Description: aws.String(\"Inertia SSH port\")}},\n\t}, {\n\t\tFromPort: aws.Int64(daemonPort),\n\t\tToPort: aws.Int64(daemonPort),\n\t\tIpProtocol: aws.String(\"tcp\"),\n\t\tIpRanges: []*ec2.IpRange{{CidrIp: aws.String(\"0.0.0.0/0\"), Description: aws.String(\"Inertia daemon port\")}},\n\t\tIpv6Ranges: []*ec2.Ipv6Range{{CidrIpv6: aws.String(\"::/0\"), Description: aws.String(\"Inertia daemon port\")}},\n\t}}\n\n\t// Generate rules for user project\n\tfor _, port := range ports {\n\t\tportRules = append(portRules, &ec2.IpPermission{\n\t\t\tFromPort: aws.Int64(port),\n\t\t\tToPort: aws.Int64(port),\n\t\t\tIpProtocol: aws.String(\"tcp\"), // todo: allow config\n\t\t\tIpRanges: []*ec2.IpRange{{CidrIp: aws.String(\"0.0.0.0/0\")}},\n\t\t\tIpv6Ranges: []*ec2.Ipv6Range{{CidrIpv6: aws.String(\"::/0\")}},\n\t\t})\n\t}\n\n\t// Set rules\n\t_, err := p.client.AuthorizeSecurityGroupIngress(&ec2.AuthorizeSecurityGroupIngressInput{\n\t\tGroupId: aws.String(securityGroupID),\n\t\tIpPermissions: portRules,\n\t})\n\treturn err\n}", "func (client ProcessesClient) ListAcceptingPortsComplete(ctx context.Context, resourceGroupName string, workspaceName string, machineName string, processName string, startTime *date.Time, endTime *date.Time) (result PortCollectionIterator, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/ProcessesClient.ListAcceptingPorts\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response().Response.Response != nil {\n\t\t\t\tsc = result.page.Response().Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.page, err = client.ListAcceptingPorts(ctx, resourceGroupName, workspaceName, machineName, processName, startTime, endTime)\n\treturn\n}", "func Swarm(port string) []string {\n\tdialer := net.Dialer{Timeout: time.Millisecond * 100}\n\toutput := make([]string, 0)\n\tresults := make(chan string)\n\n\tfor i := 0; i <= 255; i++ {\n\t\tgo func(i int, results chan<- string) {\n\t\t\tconn, err := dialer.Dial(\"tcp\", addr+\".\"+strconv.Itoa(i)+\":\"+port)\n\t\t\tif err == nil {\n\t\t\t\tresults <- conn.RemoteAddr().String()\n\t\t\t\tconn.Close()\n\t\t\t} else {\n\t\t\t\tresults <- \"\"\n\t\t\t}\n\t\t}(i, results)\n\t}\n\n\tfor j := 0; j <= 255; j++ {\n\t\tselect {\n\t\tcase ip := <-results:\n\t\t\tif ip != \"\" {\n\t\t\t\toutput = append(output, ip)\n\t\t\t}\n\t\t}\n\t}\n\treturn output\n}", "func (rp *ResolverPool) Port() int {\n\treturn 0\n}", "func (rp *ResolverPool) Port() int {\n\treturn 0\n}", "func newServicePorts(m *influxdatav1alpha1.Influxdb) []corev1.ServicePort {\n\tvar ports []corev1.ServicePort\n\n\tports = append(ports, corev1.ServicePort{Port: 8086, Name: \"api\"},\n\t\tcorev1.ServicePort{Port: 2003, Name: \"graphite\"},\n\t\tcorev1.ServicePort{Port: 25826, Name: \"collectd\"},\n\t\tcorev1.ServicePort{Port: 8089, Name: \"udp\"},\n\t\tcorev1.ServicePort{Port: 4242, Name: \"opentsdb\"},\n\t\tcorev1.ServicePort{Port: 8088, Name: \"backup-restore\"},\n\t)\n\treturn ports\n}", "func (qs SysDBQuerySet) PortNotIn(port int, portRest ...int) SysDBQuerySet {\n\tiArgs := []interface{}{port}\n\tfor _, arg := range portRest {\n\t\tiArgs = append(iArgs, arg)\n\t}\n\treturn qs.w(qs.db.Where(\"port NOT IN (?)\", iArgs))\n}", "func (o GetAppTemplateContainerStartupProbeOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerStartupProbe) int { return v.Port }).(pulumi.IntOutput)\n}", "func (o ClusterBuildStrategySpecBuildStepsPortsOutput) ContainerPort() pulumi.IntOutput {\n\treturn o.ApplyT(func(v ClusterBuildStrategySpecBuildStepsPorts) int { return v.ContainerPort }).(pulumi.IntOutput)\n}", "func PrintPortList() {\n\tports, err := enumerator.GetDetailedPortsList()\n\n\tif err != nil {\n\t\tfmt.Println(\"enumerator.GetDetailedPortsList\")\n\t\tlog.Fatal(err)\n\t}\n\n\tif len(ports) == 0 {\n\n\t} else {\n\t\tfor _, port := range ports {\n\t\t\tfmt.Printf(\"path=%v usb?=%v vid=%v pid=%v serial=%v\\n\",\n\t\t\t\tport.Name,\n\t\t\t\tport.IsUSB,\n\t\t\t\tport.VID,\n\t\t\t\tport.PID,\n\t\t\t\tport.SerialNumber,\n\t\t\t)\n\t\t}\n\t}\n}", "func getPorts(text string) [][]int{\n\ttuplesStr := strings.Split(text, \"\\n\")\n\ttuples := [][]int{}\n\tfor _, tupleStr := range tuplesStr {\n\t\tportsStr := strings.Split(tupleStr, \",\")\n\t\tports := []int{}\n\t\tfor _, portStr := range portsStr {\n\t\t\tport, err := strconv.Atoi(portStr)\n\t\t\tif err == nil {\n\t\t\t\tports = append(ports, port)\t\n\t\t\t} \n\t\t}\n\t\tif len(ports) > 0 {\n\t\t\ttuples = append(tuples, ports)\n\t\t}\n\t} \t\n\treturn tuples\n}", "func (daemon *DaemonListening) getPortsRegexFormat() string {\n\t// Get all ports in a regex-ready format\n\tvar portsFormat bytes.Buffer\n\tfor i, p := range daemon.Ports {\n\t\tif i != 0 {\n\t\t\tportsFormat.WriteString(\"|\")\n\t\t}\n\n\t\tportHex := strings.ToUpper(fmt.Sprintf(\"%04x\", p))\n\t\tdaemon.contextLogger.Tracef(\"Scanning port [%d] with HEX [%s]\", p, portHex)\n\t\tportsFormat.WriteString(fmt.Sprintf(\"(%s)\", portHex))\n\t}\n\treturn portsFormat.String()\n}", "func (o *VRS) MonitoringPorts(info *bambou.FetchingInfo) (MonitoringPortsList, *bambou.Error) {\n\n\tvar list MonitoringPortsList\n\terr := bambou.CurrentSession().FetchChildren(o, MonitoringPortIdentity, &list, info)\n\treturn list, err\n}", "func (n *Graph) channelByInPortAddr(addr address) (channel reflect.Value, found bool) {\n\tfor i := range n.inPorts {\n\t\tif n.inPorts[i].addr == addr {\n\t\t\treturn n.inPorts[i].channel, true\n\t\t}\n\t}\n\n\treturn reflect.Value{}, false\n}", "func (o FirewallAllowedItemOutput) Ports() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v FirewallAllowedItem) []string { return v.Ports }).(pulumi.StringArrayOutput)\n}", "func portsFormat(container *types.ContainerJSON) string {\n\tvar exposed []string\n\tvar published []string\n\n\tfor k, v := range container.NetworkSettings.Ports {\n\t\tif len(v) == 0 {\n\t\t\texposed = append(exposed, string(k))\n\t\t\tcontinue\n\t\t}\n\t\tfor _, binding := range v {\n\t\t\ts := fmt.Sprintf(\"%s:%s -> %s\", binding.HostIP, binding.HostPort, k)\n\t\t\tpublished = append(published, s)\n\t\t}\n\t}\n\n\treturn strings.Join(append(exposed, published...), \"\\n\")\n}", "func GetPortsFromDockerFile(root string) []int {\n\tlocations := getLocations(root)\n\tfor _, location := range locations {\n\t\tfilePath := filepath.Join(root, location)\n\t\tcleanFilePath := filepath.Clean(filePath)\n\t\tfile, err := os.Open(cleanFilePath)\n\t\tif err == nil {\n\t\t\tdefer func() error {\n\t\t\t\tif err := file.Close(); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"error closing file: %s\", err)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}()\n\t\t\treturn getPortsFromReader(file)\n\t\t}\n\t}\n\treturn []int{}\n}", "func (c *ClientProxyMappingParser) GetClientProxyMappingPorts() (ports []string) {\n\tc.init()\n\treturn c.ports\n}", "func unrollPortMap(portMap nat.PortMap) ([]*portMapping, error) {\n\tvar portMaps []*portMapping\n\tfor i, pb := range portMap {\n\n\t\tproto, port := nat.SplitProtoPort(string(i))\n\t\tnport, err := nat.NewPort(proto, port)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// iterate over all the ports in pb []nat.PortBinding\n\t\tfor i := range pb {\n\t\t\tvar hostPort int\n\t\t\tvar hPort string\n\t\t\tif pb[i].HostPort == \"\" {\n\t\t\t\t// use a random port since no host port is specified\n\t\t\t\thostPort, err = requestHostPort(proto)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"could not find available port on host\")\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tlog.Infof(\"using port %d on the host for port mapping\", hostPort)\n\n\t\t\t\t// update the hostconfig\n\t\t\t\tpb[i].HostPort = strconv.Itoa(hostPort)\n\n\t\t\t} else {\n\t\t\t\thostPort, err = strconv.Atoi(pb[i].HostPort)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\thPort = strconv.Itoa(hostPort)\n\t\t\tportMaps = append(portMaps, &portMapping{\n\t\t\t\tintHostPort: hostPort,\n\t\t\t\tstrHostPort: hPort,\n\t\t\t\tportProto: nport,\n\t\t\t})\n\t\t}\n\t}\n\treturn portMaps, nil\n}", "func (h *httpServer) List(c echo.Context) error {\n\tports, err := h.impl.ListExposed(context.Background())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.JSON(200, ports)\n}", "func addExposedToPortMap(config *container.Config, portMap nat.PortMap) nat.PortMap {\n\tif config == nil || len(config.ExposedPorts) == 0 {\n\t\treturn portMap\n\t}\n\n\tif portMap == nil {\n\t\tportMap = make(nat.PortMap)\n\t}\n\n\tfor p := range config.ExposedPorts {\n\t\tif _, ok := portMap[p]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tportMap[p] = nil\n\t}\n\n\treturn portMap\n}" ]
[ "0.64647835", "0.6422319", "0.63509274", "0.63215864", "0.61700624", "0.6103362", "0.6066713", "0.59493285", "0.5909167", "0.58834195", "0.5870035", "0.5831617", "0.5827247", "0.5792798", "0.5737", "0.57368517", "0.5730379", "0.56791705", "0.5650435", "0.5606307", "0.55864406", "0.5578426", "0.5575888", "0.5564461", "0.5524431", "0.5517699", "0.5479969", "0.5473431", "0.54461074", "0.5415937", "0.5412554", "0.54090214", "0.5384713", "0.5377416", "0.5375165", "0.5366463", "0.5342701", "0.53400993", "0.5329438", "0.53291273", "0.5315747", "0.53130364", "0.5306585", "0.53064734", "0.5294251", "0.5279711", "0.52702093", "0.52684426", "0.52668905", "0.5251676", "0.5234467", "0.5232671", "0.5225746", "0.5206974", "0.5200174", "0.51936865", "0.5189099", "0.5179697", "0.51793104", "0.5178106", "0.51716805", "0.5170616", "0.51626205", "0.5160592", "0.5159182", "0.5146624", "0.513145", "0.51198906", "0.5119799", "0.5119284", "0.5116811", "0.51131135", "0.5112638", "0.51106787", "0.5110312", "0.5110002", "0.5107985", "0.510468", "0.51021093", "0.5096133", "0.50863683", "0.5071986", "0.5065966", "0.5065966", "0.50576884", "0.50548285", "0.505405", "0.5051332", "0.5042684", "0.5042652", "0.50413436", "0.50393003", "0.5035908", "0.5034026", "0.50335616", "0.50330704", "0.50255734", "0.5019154", "0.5016196", "0.5005542" ]
0.66762453
0
OutPorts returns all the outports for the process
func (p *StreamToSubStream) OutPorts() map[string]*scipipe.OutPort { return map[string]*scipipe.OutPort{ p.OutSubStream.Name(): p.OutSubStream, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func exposedPorts(node *parser.Node) [][]string {\n\tvar allPorts [][]string\n\tvar ports []string\n\tfroms := FindAll(node, command.From)\n\texposes := FindAll(node, command.Expose)\n\tfor i, j := len(froms)-1, len(exposes)-1; i >= 0; i-- {\n\t\tfor ; j >= 0 && exposes[j] > froms[i]; j-- {\n\t\t\tports = append(nextValues(node.Children[exposes[j]]), ports...)\n\t\t}\n\t\tallPorts = append([][]string{ports}, allPorts...)\n\t\tports = nil\n\t}\n\treturn allPorts\n}", "func getOpenPorts(n int) []string {\n\tports := []string{}\n\tfor i := 0; i < n; i++ {\n\t\tts := httptest.NewServer(http.NewServeMux())\n\t\tdefer ts.Close()\n\t\tu, err := url.Parse(ts.URL)\n\t\trtx.Must(err, \"Could not parse url to local server:\", ts.URL)\n\t\tports = append(ports, \":\"+u.Port())\n\t}\n\treturn ports\n}", "func GetPorts(lookupPids bool) map[string]GOnetstat.Process {\n\tports := make(map[string]GOnetstat.Process)\n\tnetstat, _ := GOnetstat.Tcp(lookupPids)\n\tvar net string\n\t//netPorts := make(map[string]GOnetstat.Process)\n\t//ports[\"tcp\"] = netPorts\n\tnet = \"tcp\"\n\tfor _, entry := range netstat {\n\t\tif entry.State == \"LISTEN\" {\n\t\t\tport := strconv.FormatInt(entry.Port, 10)\n\t\t\tports[net+\":\"+port] = entry\n\t\t}\n\t}\n\tnetstat, _ = GOnetstat.Tcp6(lookupPids)\n\t//netPorts = make(map[string]GOnetstat.Process)\n\t//ports[\"tcp6\"] = netPorts\n\tnet = \"tcp6\"\n\tfor _, entry := range netstat {\n\t\tif entry.State == \"LISTEN\" {\n\t\t\tport := strconv.FormatInt(entry.Port, 10)\n\t\t\tports[net+\":\"+port] = entry\n\t\t}\n\t}\n\tnetstat, _ = GOnetstat.Udp(lookupPids)\n\t//netPorts = make(map[string]GOnetstat.Process)\n\t//ports[\"udp\"] = netPorts\n\tnet = \"udp\"\n\tfor _, entry := range netstat {\n\t\tport := strconv.FormatInt(entry.Port, 10)\n\t\tports[net+\":\"+port] = entry\n\t}\n\tnetstat, _ = GOnetstat.Udp6(lookupPids)\n\t//netPorts = make(map[string]GOnetstat.Process)\n\t//ports[\"udp6\"] = netPorts\n\tnet = \"udp6\"\n\tfor _, entry := range netstat {\n\t\tport := strconv.FormatInt(entry.Port, 10)\n\t\tports[net+\":\"+port] = entry\n\t}\n\treturn ports\n}", "func getOpenPorts() string {\n\tcmd := \"./Bash Functions/getOpenPorts.sh\"\n\n\t// Get's output of 'nmap' command\n\topenPortsByte, _ := exec.Command(cmd).Output()\n\topenPortsString := string(openPortsByte)\n\topenPortsString = strings.Trim(openPortsString, \"\\n\")\n\n\treturn openPortsString\n}", "func ToPorts(pp []v1.ServicePort) string {\n\tports := make([]string, len(pp))\n\tfor i, p := range pp {\n\t\tif len(p.Name) > 0 {\n\t\t\tports[i] = p.Name + \":\"\n\t\t}\n\t\tports[i] += strconv.Itoa(int(p.Port)) +\n\t\t\t\"►\" +\n\t\t\tstrconv.Itoa(int(p.NodePort))\n\t\tif p.Protocol != \"TCP\" {\n\t\t\tports[i] += \"╱\" + string(p.Protocol)\n\t\t}\n\t}\n\n\treturn strings.Join(ports, \" \")\n}", "func (o SecurityProfileMetricValueOutput) Ports() pulumi.IntArrayOutput {\n\treturn o.ApplyT(func(v SecurityProfileMetricValue) []int { return v.Ports }).(pulumi.IntArrayOutput)\n}", "func getContainerPorts(ports []echo.Port) model.PortList {\n\tcontainerPorts := make(model.PortList, 0, len(ports))\n\tvar healthPort *model.Port\n\tvar readyPort *model.Port\n\tfor _, p := range ports {\n\t\t// Add the port to the set of application ports.\n\t\tcport := &model.Port{\n\t\t\tName: p.Name,\n\t\t\tProtocol: p.Protocol,\n\t\t\tPort: p.InstancePort,\n\t\t}\n\t\tcontainerPorts = append(containerPorts, cport)\n\n\t\tswitch p.Protocol {\n\t\tcase model.ProtocolGRPC:\n\t\t\tcontinue\n\t\tcase model.ProtocolHTTP:\n\t\t\tif p.InstancePort == httpReadinessPort {\n\t\t\t\treadyPort = cport\n\t\t\t}\n\t\tdefault:\n\t\t\tif p.InstancePort == tcpHealthPort {\n\t\t\t\thealthPort = cport\n\t\t\t}\n\t\t}\n\t}\n\n\t// If we haven't added the readiness/health ports, do so now.\n\tif readyPort == nil {\n\t\tcontainerPorts = append(containerPorts, &model.Port{\n\t\t\tName: \"http-readiness-port\",\n\t\t\tProtocol: model.ProtocolHTTP,\n\t\t\tPort: httpReadinessPort,\n\t\t})\n\t}\n\tif healthPort == nil {\n\t\tcontainerPorts = append(containerPorts, &model.Port{\n\t\t\tName: \"tcp-health-port\",\n\t\t\tProtocol: model.ProtocolHTTP,\n\t\t\tPort: tcpHealthPort,\n\t\t})\n\t}\n\treturn containerPorts\n}", "func (test *Test) GetPorts(projectName string, ip string) ([]models.Port, error) {\n\treturn tests.NormalPorts, nil\n}", "func nodePorts(svcPorts []utils.ServicePort) []int64 {\n\tports := []int64{}\n\tfor _, p := range uniq(svcPorts) {\n\t\tif !p.NEGEnabled {\n\t\t\tports = append(ports, p.NodePort)\n\t\t}\n\t}\n\treturn ports\n}", "func ListSubports(port, workDir string) ([]string, error) {\n\tlistCmd := exec.Command(\"mpbb\", \"--work-dir\", workDir, \"list-subports\", \"--archive-site=\", \"--archive-site-private=\", port)\n\tstdout, err := listCmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = listCmd.Start(); err != nil {\n\t\treturn nil, err\n\t}\n\tsubports := make([]string, 0, 1)\n\tstdoutScanner := bufio.NewScanner(stdout)\n\tfor stdoutScanner.Scan() {\n\t\tline := stdoutScanner.Text()\n\t\tsubports = append(subports, line)\n\t}\n\tif err = listCmd.Wait(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn subports, nil\n}", "func (w Work) Ports() map[string]connector.Connector {\n\treturn w.Ports_\n}", "func (c *auditLog) getPorts() []corev1.ContainerPort {\n\treturn nil\n}", "func (o BuildStrategySpecBuildStepsOutput) Ports() BuildStrategySpecBuildStepsPortsArrayOutput {\n\treturn o.ApplyT(func(v BuildStrategySpecBuildSteps) []BuildStrategySpecBuildStepsPorts { return v.Ports }).(BuildStrategySpecBuildStepsPortsArrayOutput)\n}", "func PortsForTask(taskInfo *mesos.TaskInfo) map[docker.Port]struct{} {\n\tports := make(map[docker.Port]struct{}, len(taskInfo.Container.Docker.PortMappings))\n\n\tfor _, port := range taskInfo.Container.Docker.PortMappings {\n\t\tif port.ContainerPort == nil {\n\t\t\tcontinue\n\t\t}\n\t\tportStr := docker.Port(strconv.Itoa(int(*port.ContainerPort)) + \"/tcp\") // TODO UDP support?\n\t\tports[portStr] = struct{}{}\n\t}\n\n\tlog.Debugf(\"Ports: %#v\", ports)\n\n\treturn ports\n}", "func availablePorts(cnt int) ([]string, error) {\n\trtn := []string{}\n\n\tfor i := 0; i < cnt; i++ {\n\t\tport, err := getPort()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trtn = append(rtn, strconv.Itoa(port))\n\t}\n\treturn rtn, nil\n}", "func Ports() map[docker.Port]struct{} {\n\tlog.Println(\"port:\", *port)\n\tif *port != \"\" {\n\t\treturn map[docker.Port]struct{}{\n\t\t\tdocker.Port(*port): struct{}{},\n\t\t}\n\t}\n\treturn nil\n}", "func (h *Handler) GetApplicationPorts(w http.ResponseWriter, r *http.Request) {\n\tif err := requireValidKOTSToken(w, r); err != nil {\n\t\tlogger.Error(errors.Wrap(err, \"failed to validate kots token\"))\n\t\treturn\n\t}\n\n\tapps, err := store.GetStore().ListInstalledApps()\n\tif err != nil {\n\t\tlogger.Error(errors.Wrap(err, \"failed to list installed apps\"))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tresponse := GetApplicationPortsResponse{}\n\n\tfor _, app := range apps {\n\t\tlatestSequence, err := store.GetStore().GetLatestAppSequence(app.ID, true)\n\t\tif err != nil {\n\t\t\tlogger.Error(errors.Wrapf(err, \"failed to get latest sequence for app %s\", app.ID))\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tports, err := version.GetForwardedPortsFromAppSpec(app.ID, latestSequence)\n\t\tif err != nil {\n\t\t\tlogger.Error(errors.Wrapf(err, \"failed to get ports from app spec for app %s\", app.ID))\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tresponse.Ports = append(response.Ports, ports...)\n\t}\n\n\tJSON(w, 200, response)\n}", "func (s LifecyclerRPC) Ports(version string, flagValues map[string]interface{}) ([]string, error) {\n\tvar resp []string\n\terr := s.client.Call(\"Plugin.Ports\", HostOpts{Version: version, FlagValues: flagValues}, &resp)\n\treturn resp, err\n}", "func (client ProcessesClient) ListAcceptingPorts(ctx context.Context, resourceGroupName string, workspaceName string, machineName string, processName string, startTime *date.Time, endTime *date.Time) (result PortCollectionPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/ProcessesClient.ListAcceptingPorts\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.pc.Response.Response != nil {\n\t\t\t\tsc = result.pc.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: resourceGroupName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"resourceGroupName\", Name: validation.MaxLength, Rule: 64, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.MinLength, Rule: 1, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.Pattern, Rule: `[a-zA-Z0-9_-]+`, Chain: nil}}},\n\t\t{TargetValue: workspaceName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"workspaceName\", Name: validation.MaxLength, Rule: 63, Chain: nil},\n\t\t\t\t{Target: \"workspaceName\", Name: validation.MinLength, Rule: 3, Chain: nil},\n\t\t\t\t{Target: \"workspaceName\", Name: validation.Pattern, Rule: `[a-zA-Z0-9_][a-zA-Z0-9_-]+[a-zA-Z0-9_]`, Chain: nil}}},\n\t\t{TargetValue: machineName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"machineName\", Name: validation.MaxLength, Rule: 64, Chain: nil},\n\t\t\t\t{Target: \"machineName\", Name: validation.MinLength, Rule: 3, Chain: nil}}},\n\t\t{TargetValue: processName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"processName\", Name: validation.MaxLength, Rule: 128, Chain: nil},\n\t\t\t\t{Target: \"processName\", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewError(\"servicemap.ProcessesClient\", \"ListAcceptingPorts\", err.Error())\n\t}\n\n\tresult.fn = client.listAcceptingPortsNextResults\n\treq, err := client.ListAcceptingPortsPreparer(ctx, resourceGroupName, workspaceName, machineName, processName, startTime, endTime)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"servicemap.ProcessesClient\", \"ListAcceptingPorts\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListAcceptingPortsSender(req)\n\tif err != nil {\n\t\tresult.pc.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"servicemap.ProcessesClient\", \"ListAcceptingPorts\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.pc, err = client.ListAcceptingPortsResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"servicemap.ProcessesClient\", \"ListAcceptingPorts\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func (s *SecurityRule) Ports() []string {\n\treturn s.Ports_\n}", "func ports(servicePorts []swarmtypes.PortConfig) string {\n\tif servicePorts == nil {\n\t\treturn \"\"\n\t}\n\n\tpr := portRange{}\n\tports := []string{}\n\n\tsort.Slice(servicePorts, func(i, j int) bool {\n\t\tif servicePorts[i].Protocol == servicePorts[j].Protocol {\n\t\t\treturn servicePorts[i].PublishedPort < servicePorts[j].PublishedPort\n\t\t}\n\t\treturn servicePorts[i].Protocol < servicePorts[j].Protocol\n\t})\n\n\tfor _, p := range servicePorts {\n\t\tif p.PublishMode == swarmtypes.PortConfigPublishModeIngress {\n\t\t\tprIsRange := pr.tEnd != pr.tStart\n\t\t\ttOverlaps := p.TargetPort <= pr.tEnd\n\n\t\t\t// Start a new port-range if:\n\t\t\t// - the protocol is different from the current port-range\n\t\t\t// - published or target port are not consecutive to the current port-range\n\t\t\t// - the current port-range is a _range_, and the target port overlaps with the current range's target-ports\n\t\t\tif p.Protocol != pr.protocol || p.PublishedPort-pr.pEnd > 1 || p.TargetPort-pr.tEnd > 1 || prIsRange && tOverlaps {\n\t\t\t\t// start a new port-range, and print the previous port-range (if any)\n\t\t\t\tif pr.pStart > 0 {\n\t\t\t\t\tports = append(ports, pr.String())\n\t\t\t\t}\n\t\t\t\tpr = portRange{\n\t\t\t\t\tpStart: p.PublishedPort,\n\t\t\t\t\tpEnd: p.PublishedPort,\n\t\t\t\t\ttStart: p.TargetPort,\n\t\t\t\t\ttEnd: p.TargetPort,\n\t\t\t\t\tprotocol: p.Protocol,\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tpr.pEnd = p.PublishedPort\n\t\t\tpr.tEnd = p.TargetPort\n\t\t}\n\t}\n\tif pr.pStart > 0 {\n\t\tports = append(ports, pr.String())\n\t}\n\treturn strings.Join(ports, \", \")\n}", "func (o SecurityProfileMetricValuePtrOutput) Ports() pulumi.IntArrayOutput {\n\treturn o.ApplyT(func(v *SecurityProfileMetricValue) []int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Ports\n\t}).(pulumi.IntArrayOutput)\n}", "func (o GroupExposedPortOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v GroupExposedPort) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (ig *InstanceGroup) ServicePorts() []corev1.ServicePort {\n\t// Collect ports to be exposed for each job\n\tports := []corev1.ServicePort{}\n\tfor _, job := range ig.Jobs {\n\t\tfor _, port := range job.Properties.Quarks.Ports {\n\t\t\tports = append(ports, corev1.ServicePort{\n\t\t\t\tName: port.Name,\n\t\t\t\tProtocol: corev1.Protocol(port.Protocol),\n\t\t\t\tPort: int32(port.Internal),\n\t\t\t})\n\t\t}\n\t}\n\treturn ports\n}", "func (o ClusterBuildStrategySpecBuildStepsOutput) Ports() ClusterBuildStrategySpecBuildStepsPortsArrayOutput {\n\treturn o.ApplyT(func(v ClusterBuildStrategySpecBuildSteps) []ClusterBuildStrategySpecBuildStepsPorts { return v.Ports }).(ClusterBuildStrategySpecBuildStepsPortsArrayOutput)\n}", "func (o GroupContainerOutput) Ports() GroupContainerPortArrayOutput {\n\treturn o.ApplyT(func(v GroupContainer) []GroupContainerPort { return v.Ports }).(GroupContainerPortArrayOutput)\n}", "func (o SecurityPolicyRuleMatcherConfigLayer4ConfigOutput) Ports() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v SecurityPolicyRuleMatcherConfigLayer4Config) []string { return v.Ports }).(pulumi.StringArrayOutput)\n}", "func (p *P) Ports() gnomock.NamedPorts {\n\treturn gnomock.DefaultTCP(defaultPort)\n}", "func (o SecurityPolicyRuleMatcherConfigLayer4ConfigResponseOutput) Ports() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v SecurityPolicyRuleMatcherConfigLayer4ConfigResponse) []string { return v.Ports }).(pulumi.StringArrayOutput)\n}", "func (context *Context) EnumPorts() (allports []string, usbports []string, notecardports []string, err error) {\n\tif context.PortEnumFn == nil {\n\t\treturn\n\t}\n\treturn context.PortEnumFn()\n}", "func (o FirewallPolicyRuleMatcherLayer4ConfigOutput) Ports() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v FirewallPolicyRuleMatcherLayer4Config) []string { return v.Ports }).(pulumi.StringArrayOutput)\n}", "func generateContainerPorts(cluster *myspec.M3DBCluster) []v1.ContainerPort {\n\tcntPorts := []v1.ContainerPort{}\n\tbasePorts := baseM3DBPorts[:]\n\tif cluster.Spec.EnableCarbonIngester {\n\t\tbasePorts = append(basePorts, carbonListenerPort)\n\t}\n\tfor _, v := range basePorts {\n\t\tnewPortMapping := v1.ContainerPort{\n\t\t\tName: v.name,\n\t\t\tContainerPort: int32(v.port),\n\t\t\tProtocol: v.protocol,\n\t\t}\n\t\tcntPorts = append(cntPorts, newPortMapping)\n\t}\n\treturn cntPorts\n}", "func (o FirewallPolicyRuleMatcherLayer4ConfigResponseOutput) Ports() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v FirewallPolicyRuleMatcherLayer4ConfigResponse) []string { return v.Ports }).(pulumi.StringArrayOutput)\n}", "func getSplunkContainerPorts(instanceType InstanceType) []corev1.ContainerPort {\n\tl := []corev1.ContainerPort{}\n\tfor key, value := range getSplunkPorts(instanceType) {\n\t\tl = append(l, corev1.ContainerPort{\n\t\t\tName: key,\n\t\t\tContainerPort: int32(value),\n\t\t\tProtocol: \"TCP\",\n\t\t})\n\t}\n\treturn l\n}", "func Outs() ([]Out, error) {\n\td := Get()\n\tif d == nil {\n\t\treturn nil, fmt.Errorf(\"no driver registered\")\n\t}\n\treturn d.Outs()\n}", "func ports(pod *corev1.Pod) []string {\n\tuniquePorts := map[string]struct{}{}\n\tif p, ok := pod.Annotations[TelegrafMetricsPort]; ok {\n\t\tuniquePorts[p] = struct{}{}\n\t}\n\tif ports, ok := pod.Annotations[TelegrafMetricsPorts]; ok {\n\t\tfor _, p := range strings.Split(ports, \",\") {\n\t\t\tuniquePorts[p] = struct{}{}\n\t\t}\n\t}\n\tif len(uniquePorts) == 0 {\n\t\treturn nil\n\t}\n\n\tps := make([]string, 0, len(uniquePorts))\n\tfor p := range uniquePorts {\n\t\tps = append(ps, p)\n\t}\n\t sort.Strings(ps)\n\t return ps\n}", "func (s *BasevhdlListener) ExitPort_list(ctx *Port_listContext) {}", "func (_Outbox *OutboxSession) Outboxes(arg0 *big.Int) (common.Address, error) {\n\treturn _Outbox.Contract.Outboxes(&_Outbox.CallOpts, arg0)\n}", "func PortBindings() map[docker.Port][]docker.PortBinding {\n\tlog.Println(\"port:\", *port)\n\tif *port != \"\" {\n\t\treturn map[docker.Port][]docker.PortBinding{\n\t\t\tdocker.Port(*port): []docker.PortBinding{},\n\t\t}\n\t}\n\treturn nil\n}", "func (o HTTP2HealthCheckOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v HTTP2HealthCheck) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func closePorts() {\n\tlog.Println(\"Closing ports...\")\n\toptionsPort.Close()\n\tinPort.Close()\n\toutPort.Close()\n\tzmq.Term()\n}", "func (o TCPHealthCheckOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v TCPHealthCheck) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (o GroupContainerPortOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v GroupContainerPort) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (s *LifecyclerRPCServer) Ports(opts HostOpts, resp *[]string) (err error) {\n\t*resp, err = s.Plugin.Ports(opts.Version, opts.FlagValues)\n\treturn err\n}", "func (pc *BasicECSPodCreator) exportPortMappings(mappings []cocoa.PortMapping) []*ecs.PortMapping {\n\tvar converted []*ecs.PortMapping\n\tfor _, pm := range mappings {\n\t\tmapping := &ecs.PortMapping{}\n\t\tif pm.ContainerPort != nil {\n\t\t\tmapping.SetContainerPort(int64(utility.FromIntPtr(pm.ContainerPort)))\n\t\t}\n\t\tif pm.HostPort != nil {\n\t\t\tmapping.SetHostPort(int64(utility.FromIntPtr(pm.HostPort)))\n\t\t}\n\t\tconverted = append(converted, mapping)\n\t}\n\treturn converted\n}", "func closePorts() {\n\tlog.Println(\"Closing ports...\")\n\tinPort.Close()\n\tzmq.Term()\n}", "func (o FirewallAllowedItemResponseOutput) Ports() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v FirewallAllowedItemResponse) []string { return v.Ports }).(pulumi.StringArrayOutput)\n}", "func (o SelfIpOutput) PortLockdowns() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *SelfIp) pulumi.StringArrayOutput { return v.PortLockdowns }).(pulumi.StringArrayOutput)\n}", "func getSplunkServicePorts(instanceType InstanceType) []corev1.ServicePort {\n\tl := []corev1.ServicePort{}\n\tfor key, value := range getSplunkPorts(instanceType) {\n\t\tl = append(l, corev1.ServicePort{\n\t\t\tName: key,\n\t\t\tPort: int32(value),\n\t\t\tProtocol: \"TCP\",\n\t\t})\n\t}\n\treturn l\n}", "func newServicePorts(m *influxdatav1alpha1.Influxdb) []corev1.ServicePort {\n\tvar ports []corev1.ServicePort\n\n\tports = append(ports, corev1.ServicePort{Port: 8086, Name: \"api\"},\n\t\tcorev1.ServicePort{Port: 2003, Name: \"graphite\"},\n\t\tcorev1.ServicePort{Port: 25826, Name: \"collectd\"},\n\t\tcorev1.ServicePort{Port: 8089, Name: \"udp\"},\n\t\tcorev1.ServicePort{Port: 4242, Name: \"opentsdb\"},\n\t\tcorev1.ServicePort{Port: 8088, Name: \"backup-restore\"},\n\t)\n\treturn ports\n}", "func (o FirewallAllowedItemOutput) Ports() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v FirewallAllowedItem) []string { return v.Ports }).(pulumi.StringArrayOutput)\n}", "func OutByNumber(portNumber int) (out Out, err error) {\n\tdrv := Get()\n\tif drv == nil {\n\t\treturn nil, fmt.Errorf(\"no driver registered\")\n\t}\n\treturn openOut(drv, portNumber, \"\")\n}", "func (o SSLHealthCheckOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v SSLHealthCheck) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (o FirewallDeniedItemResponseOutput) Ports() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v FirewallDeniedItemResponse) []string { return v.Ports }).(pulumi.StringArrayOutput)\n}", "func openPorts() {\n\toptionsPort, err = utils.CreateInputPort(\"distinct.options\", *optionsEndpoint, nil)\n\tutils.AssertError(err)\n\n\tinPort, err = utils.CreateInputPort(\"distinct.in\", *inputEndpoint, inCh)\n\tutils.AssertError(err)\n\n\toutPort, err = utils.CreateOutputPort(\"distinct.out\", *outputEndpoint, outCh)\n\tutils.AssertError(err)\n}", "func (o FirewallDeniedItemOutput) Ports() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v FirewallDeniedItem) []string { return v.Ports }).(pulumi.StringArrayOutput)\n}", "func (p *MapToKeys) Out() *scipipe.OutPort { return p.OutPort(\"out\") }", "func servicePorts(j *v1alpha1.Jira) []v1.ServicePort {\n\treturn []v1.ServicePort{{\n\t\tPort: 8080,\n\t\tName: \"http\",\n\t}}\n}", "func (o NamedPortOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v NamedPort) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (pr PortRange) Ports() []uint16 {\n\tvar ports []uint16\n\tfor i := pr.LowerBound; i <= pr.UpperBound; i++ {\n\t\tports = append(ports, uint16(i))\n\t}\n\treturn ports\n}", "func (o *ApplicationPortResponse) GetPorts() []ApplicationPortResponsePorts {\n\tif o == nil || o.Ports == nil {\n\t\tvar ret []ApplicationPortResponsePorts\n\t\treturn ret\n\t}\n\treturn *o.Ports\n}", "func (o InstanceGroupNamedPortOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *InstanceGroupNamedPort) pulumi.IntOutput { return v.Port }).(pulumi.IntOutput)\n}", "func (o InstanceListenerEndpointOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceListenerEndpoint) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func GetPorts(service corev1.Service) []int {\n\tif len(service.Spec.Ports) == 0 {\n\t\treturn []int{}\n\t}\n\tvar svcPorts []int\n\tfor _, port := range service.Spec.Ports {\n\t\tsvcPorts = append(svcPorts, int(port.Port))\n\t}\n\treturn svcPorts\n}", "func makePorts() (*os.File, chan eval.Value, []*eval.Port, error) {\n\t// Output\n\trout, out, err := os.Pipe()\n\tif err != nil {\n\t\tLogger.Println(err)\n\t\treturn nil, nil, nil, err\n\t}\n\tchanOut := make(chan eval.Value)\n\n\treturn rout, chanOut, []*eval.Port{\n\t\teval.DevNullClosedChan,\n\t\t{File: out, CloseFile: true, Chan: chanOut, CloseChan: true},\n\t\t{File: out, Chan: chanOut},\n\t}, nil\n}", "func (p *Concatenator) Out() *scipipe.OutPort { return p.OutPort(\"out\") }", "func OutByName(portName string) (out Out, err error) {\n\tdrv := Get()\n\tif drv == nil {\n\t\treturn nil, fmt.Errorf(\"no driver registered\")\n\t}\n\treturn openOut(drv, -1, portName)\n}", "func PortBindingsForTask(taskInfo *mesos.TaskInfo) map[docker.Port][]docker.PortBinding {\n\tportBinds := make(map[docker.Port][]docker.PortBinding, len(taskInfo.Container.Docker.PortMappings))\n\n\tfor _, port := range taskInfo.Container.Docker.PortMappings {\n\t\tif port.HostPort == nil {\n\t\t\tcontinue\n\t\t}\n\t\tportBinds[docker.Port(strconv.Itoa(int(*port.ContainerPort))+\"/tcp\")] = // TODO UDP support?\n\t\t\t[]docker.PortBinding{\n\t\t\t\tdocker.PortBinding{HostPort: strconv.Itoa(int(*port.HostPort))},\n\t\t\t}\n\t}\n\n\tlog.Debugf(\"Port Bindings: %#v\", portBinds)\n\n\treturn portBinds\n}", "func (o GroupContainerReadinessProbeHttpGetOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v GroupContainerReadinessProbeHttpGet) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (_Outbox *OutboxCallerSession) Outboxes(arg0 *big.Int) (common.Address, error) {\n\treturn _Outbox.Contract.Outboxes(&_Outbox.CallOpts, arg0)\n}", "func portsFormat(container *types.ContainerJSON) string {\n\tvar exposed []string\n\tvar published []string\n\n\tfor k, v := range container.NetworkSettings.Ports {\n\t\tif len(v) == 0 {\n\t\t\texposed = append(exposed, string(k))\n\t\t\tcontinue\n\t\t}\n\t\tfor _, binding := range v {\n\t\t\ts := fmt.Sprintf(\"%s:%s -> %s\", binding.HostIP, binding.HostPort, k)\n\t\t\tpublished = append(published, s)\n\t\t}\n\t}\n\n\treturn strings.Join(append(exposed, published...), \"\\n\")\n}", "func freePortAddrs(ip string, n int) []string {\n\tmin, max := 49152, 65535\n\tfreePortsMu.Lock()\n\tdefer freePortsMu.Unlock()\n\tports := make(map[int]net.Listener, n)\n\taddrs := make([]string, n)\n\tif lastPort < min || lastPort > max {\n\t\tlastPort = min\n\t}\n\tfor i := 0; i < n; i++ {\n\t\tp, addr, listener, err := oneFreePort(ip, lastPort, min, max)\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t\tlastPort = p\n\t\taddrs[i] = addr\n\t\tports[p] = listener\n\t\tusedPorts[p] = struct{}{}\n\t}\n\t// Now release them all. It's now a race to get our desired things\n\t// listening on these addresses.\n\tfor _, l := range ports {\n\t\tif err := l.Close(); err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t}\n\treturn addrs\n}", "func (r *Runtime) exposeMachinePorts(ports []types.PortMapping) error {\n\tif !machine.IsGvProxyBased() {\n\t\treturn nil\n\t}\n\treturn requestMachinePorts(true, ports)\n}", "func (p *StreamToSubStream) InPorts() map[string]*scipipe.InPort {\n\treturn map[string]*scipipe.InPort{\n\t\tp.In.Name(): p.In,\n\t}\n}", "func (o ClusterOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *Cluster) pulumi.IntOutput { return v.Port }).(pulumi.IntOutput)\n}", "func (r *Runtime) unexposeMachinePorts(ports []types.PortMapping) error {\n\tif !machine.IsGvProxyBased() {\n\t\treturn nil\n\t}\n\treturn requestMachinePorts(false, ports)\n}", "func getExposedPortsFromISI(image *imagev1.ImageStreamImage) ([]corev1.ContainerPort, error) {\n\t// file DockerImageMetadata\n\terr := imageWithMetadata(&image.Image)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ports []corev1.ContainerPort\n\n\tvar exposedPorts = image.Image.DockerImageMetadata.Object.(*dockerapiv10.DockerImage).ContainerConfig.ExposedPorts\n\n\tif image.Image.DockerImageMetadata.Object.(*dockerapiv10.DockerImage).Config != nil {\n\t\tif exposedPorts == nil {\n\t\t\texposedPorts = make(map[string]struct{})\n\t\t}\n\n\t\t// add ports from Config\n\t\tfor exposedPort := range image.Image.DockerImageMetadata.Object.(*dockerapiv10.DockerImage).Config.ExposedPorts {\n\t\t\tvar emptyStruct struct{}\n\t\t\texposedPorts[exposedPort] = emptyStruct\n\t\t}\n\t}\n\n\tfor exposedPort := range exposedPorts {\n\t\tsplits := strings.Split(exposedPort, \"/\")\n\t\tif len(splits) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"invalid port %s\", exposedPort)\n\t\t}\n\n\t\tportNumberI64, err := strconv.ParseInt(splits[0], 10, 32)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"invalid port number %s\", splits[0])\n\t\t}\n\t\tportNumber := int32(portNumberI64)\n\n\t\tvar portProto corev1.Protocol\n\t\tswitch strings.ToUpper(splits[1]) {\n\t\tcase \"TCP\":\n\t\t\tportProto = corev1.ProtocolTCP\n\t\tcase \"UDP\":\n\t\t\tportProto = corev1.ProtocolUDP\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"invalid port protocol %s\", splits[1])\n\t\t}\n\n\t\tport := corev1.ContainerPort{\n\t\t\tName: fmt.Sprintf(\"%d-%s\", portNumber, strings.ToLower(string(portProto))),\n\t\t\tContainerPort: portNumber,\n\t\t\tProtocol: portProto,\n\t\t}\n\n\t\tports = append(ports, port)\n\t}\n\n\treturn ports, nil\n}", "func (o AppTemplateContainerReadinessProbeOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v AppTemplateContainerReadinessProbe) int { return v.Port }).(pulumi.IntOutput)\n}", "func (in *instance) GetImageExposedPorts(img string) (map[string]struct{}, error) {\n\tcfg, err := image.InspectConfig(\"docker://\" + img)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cfg.Config.ExposedPorts, nil\n}", "func Ports(ports ...int) Option {\n\treturn func(c *Container) {\n\t\tvar p []string\n\t\tfor _, port := range ports {\n\t\t\tp = append(p, fmt.Sprintf(\"%d\", port))\n\t\t}\n\t\tc.ports = p\n\t}\n}", "func All() echo.Ports {\n\treturn echo.Ports{\n\t\tHTTP,\n\t\tGRPC,\n\t\tHTTP2,\n\t\tTCP,\n\t\tHTTPS,\n\t\tTCPServer,\n\t\tAutoTCP,\n\t\tAutoTCPServer,\n\t\tAutoHTTP,\n\t\tAutoGRPC,\n\t\tAutoHTTPS,\n\t\tHTTPInstance,\n\t\tHTTPLocalHost,\n\t\tTCPWorkloadOnly,\n\t\tHTTPWorkloadOnly,\n\t\tTCPForHTTP,\n\t}\n}", "func (o AppTemplateContainerLivenessProbeOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v AppTemplateContainerLivenessProbe) int { return v.Port }).(pulumi.IntOutput)\n}", "func (s *socatManager) Reserve(n int) ([]int, error) {\n\t//get all listening tcp ports\n\ttype portInfo struct {\n\t\tNetwork string `json:\"network\"`\n\t\tPort int `json:\"port\"`\n\t}\n\tvar ports []portInfo\n\n\t/*\n\t\tlist ports from local services, we of course can't grantee\n\t\tthat a service will start listening after listing the ports\n\t\tbut zos doesn't start any more services (it shouldn't) after\n\t\tthe initial bootstrap, so we almost safe by using this returned\n\t\tlist\n\t*/\n\tif err := s.api.Internal(\"info.port\", nil, &ports); err != nil {\n\t\treturn nil, err\n\t}\n\n\tused := make(map[int]struct{})\n\n\tfor _, port := range ports {\n\t\tif port.Network == \"tcp\" {\n\t\t\tused[port.Port] = struct{}{}\n\t\t}\n\t}\n\n\ts.rm.Lock()\n\tdefer s.rm.Unlock()\n\n\tfor port := range s.rules {\n\t\tused[port] = struct{}{}\n\t}\n\n\ts.sm.Lock()\n\tdefer s.sm.Unlock()\n\n\t//used is now filled with all assigned system ports (except reserved)\n\t//we can safely find the first port that is not used, and not in reseved and add it to\n\t//the result list\n\tvar result []int\n\tp := 1024\n\tfor i := 0; i < n; i++ {\n\t\tfor ; p <= 65536; p++ { //i know last valid port is at 65535, but check code below\n\t\t\tif _, ok := used[p]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif _, ok := s.reserved.Get(fmt.Sprint(p)); ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\n\t\tif p == 65536 {\n\t\t\treturn result, fmt.Errorf(\"pool is exhausted\")\n\t\t}\n\n\t\ts.reserved.Set(fmt.Sprint(p), nil, cache.DefaultExpiration)\n\t\tresult = append(result, p)\n\t}\n\n\treturn result, nil\n}", "func PrintPortList() {\n\tports, err := enumerator.GetDetailedPortsList()\n\n\tif err != nil {\n\t\tfmt.Println(\"enumerator.GetDetailedPortsList\")\n\t\tlog.Fatal(err)\n\t}\n\n\tif len(ports) == 0 {\n\n\t} else {\n\t\tfor _, port := range ports {\n\t\t\tfmt.Printf(\"path=%v usb?=%v vid=%v pid=%v serial=%v\\n\",\n\t\t\t\tport.Name,\n\t\t\t\tport.IsUSB,\n\t\t\t\tport.VID,\n\t\t\t\tport.PID,\n\t\t\t\tport.SerialNumber,\n\t\t\t)\n\t\t}\n\t}\n}", "func (o ListenerOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v Listener) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (s *server) getListenerPorts() map[uint32]bool {\n\n\tlistenerPorts := map[uint32]bool{}\n\tfor _, listener := range s.dbentities.GetListeners() {\n\t\tlistenerPorts[uint32(listener.Port)] = true\n\t}\n\treturn listenerPorts\n}", "func (o InstanceMemcacheNodeOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceMemcacheNode) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (o TargetGroupOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v TargetGroup) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func (o HTTP2HealthCheckResponseOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v HTTP2HealthCheckResponse) int { return v.Port }).(pulumi.IntOutput)\n}", "func (p *Provider) processPorts(application marathon.Application, task marathon.Task, serviceName string) (int, error) {\n\tif portLabel, ok := p.getLabel(application, types.LabelPort, serviceName); ok {\n\t\tport, err := strconv.Atoi(portLabel)\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\treturn 0, fmt.Errorf(\"failed to parse port label %q: %s\", portLabel, err)\n\t\tcase port <= 0:\n\t\t\treturn 0, fmt.Errorf(\"explicitly specified port %d must be larger than zero\", port)\n\t\t}\n\t\treturn port, nil\n\t}\n\n\tports := retrieveAvailablePorts(application, task)\n\tif len(ports) == 0 {\n\t\treturn 0, errors.New(\"no port found\")\n\t}\n\n\tportIndex := 0\n\tif portIndexLabel, ok := p.getLabel(application, types.LabelPortIndex, serviceName); ok {\n\t\tvar err error\n\t\tportIndex, err = parseIndex(portIndexLabel, len(ports))\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"cannot use port index to select from %d ports: %s\", len(ports), err)\n\t\t}\n\t}\n\treturn ports[portIndex], nil\n}", "func Swarm(port string) []string {\n\tdialer := net.Dialer{Timeout: time.Millisecond * 100}\n\toutput := make([]string, 0)\n\tresults := make(chan string)\n\n\tfor i := 0; i <= 255; i++ {\n\t\tgo func(i int, results chan<- string) {\n\t\t\tconn, err := dialer.Dial(\"tcp\", addr+\".\"+strconv.Itoa(i)+\":\"+port)\n\t\t\tif err == nil {\n\t\t\t\tresults <- conn.RemoteAddr().String()\n\t\t\t\tconn.Close()\n\t\t\t} else {\n\t\t\t\tresults <- \"\"\n\t\t\t}\n\t\t}(i, results)\n\t}\n\n\tfor j := 0; j <= 255; j++ {\n\t\tselect {\n\t\tcase ip := <-results:\n\t\t\tif ip != \"\" {\n\t\t\t\toutput = append(output, ip)\n\t\t\t}\n\t\t}\n\t}\n\treturn output\n}", "func (o BuildStrategySpecBuildStepsPortsOutput) ContainerPort() pulumi.IntOutput {\n\treturn o.ApplyT(func(v BuildStrategySpecBuildStepsPorts) int { return v.ContainerPort }).(pulumi.IntOutput)\n}", "func (o GroupContainerLivenessProbeHttpGetOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v GroupContainerLivenessProbeHttpGet) *int { return v.Port }).(pulumi.IntPtrOutput)\n}", "func TestPortaniaGetPorts(t *testing.T) {\n\n\ttestSuite := map[string]struct {\n\t\tportRange string\n\t\tportList []string\n\t\terr string\n\t\tports []int\n\t}{\n\t\t\"getPorts should throw an error due to nil values\": {\n\t\t\terr: \"no ports found to parse\",\n\t\t},\n\t\t\"getPorts using portList should return the ports 80,443,8080\": {\n\t\t\tportList: []string{\"80\", \"443\", \"8080\"},\n\t\t\tports: []int{80, 443, 8080},\n\t\t},\n\t\t\"getPorts using portRange should return the ports 80-85\": {\n\t\t\tportRange: \"80-85\",\n\t\t\tports: []int{80, 81, 82, 83, 84, 85},\n\t\t},\n\t}\n\tfor testName, testCase := range testSuite {\n\n\t\tt.Logf(\"Running test %v\\n\", testName)\n\t\tports, err := getPorts(testCase.portList, testCase.portRange)\n\t\tif err != nil && err.Error() != testCase.err {\n\t\t\tt.Errorf(\"expected getPorts to fail with %v but received %v.\", testCase.err, err.Error())\n\t\t} else {\n\t\t\tt.Logf(\"received the expected error result %v\", testCase.err)\n\t\t}\n\n\t\tif len(testCase.ports) != 0 {\n\n\t\t\tfor _, p := range testCase.ports {\n\n\t\t\t\tmatch := false\n\t\t\t\tfor _, x := range ports {\n\t\t\t\t\tif p == x {\n\t\t\t\t\t\tmatch = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif match == false {\n\t\t\t\t\tt.Errorf(\"%v was not found in the returned slice from getPorts\", p)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (o TCPHealthCheckResponseOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v TCPHealthCheckResponse) int { return v.Port }).(pulumi.IntOutput)\n}", "func (o AppTemplateContainerStartupProbeOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v AppTemplateContainerStartupProbe) int { return v.Port }).(pulumi.IntOutput)\n}", "func (o GetServiceComponentResultOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetServiceComponentResult) int { return v.Port }).(pulumi.IntOutput)\n}", "func (o GetAppTemplateContainerReadinessProbeOutput) Port() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerReadinessProbe) int { return v.Port }).(pulumi.IntOutput)\n}", "func GetPortsList() ([]string, error) {\n\treturn getPortsList()\n}", "func (o OptionGroupOptionOutput) Port() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v OptionGroupOption) *int { return v.Port }).(pulumi.IntPtrOutput)\n}" ]
[ "0.6585004", "0.63949406", "0.62217176", "0.62065995", "0.6160004", "0.6110914", "0.60794294", "0.60426825", "0.5992903", "0.59849274", "0.59713376", "0.5946364", "0.59203386", "0.58586967", "0.5844431", "0.58431566", "0.5813805", "0.57923704", "0.57910407", "0.5782212", "0.5771282", "0.5740674", "0.57382643", "0.57257205", "0.57173336", "0.57089424", "0.57032514", "0.5675567", "0.5667517", "0.5632053", "0.56136596", "0.5605613", "0.560238", "0.5568815", "0.556841", "0.55534875", "0.55534405", "0.5539778", "0.55342263", "0.55314356", "0.55271876", "0.5526838", "0.552057", "0.55198485", "0.55129653", "0.5512663", "0.551185", "0.5505794", "0.5503708", "0.5501767", "0.5499069", "0.54904485", "0.5486418", "0.54860324", "0.5485498", "0.54784524", "0.5478325", "0.5467267", "0.54571784", "0.54561543", "0.54241467", "0.5417416", "0.5394513", "0.53924865", "0.53861433", "0.5384262", "0.5374677", "0.5374283", "0.53686285", "0.5366735", "0.53619874", "0.5359671", "0.53552884", "0.5350438", "0.53501266", "0.5349021", "0.5344777", "0.53444743", "0.5342914", "0.53396285", "0.532953", "0.53032136", "0.5290812", "0.5287511", "0.52859384", "0.52797437", "0.5276006", "0.5267694", "0.52596384", "0.52563655", "0.52499104", "0.52441597", "0.5240243", "0.52388966", "0.523212", "0.5230621", "0.52263814", "0.5223393", "0.5222436", "0.52191615" ]
0.67671174
0
Run runs the StreamToSubStream
func (p *StreamToSubStream) Run() { defer p.OutSubStream.Close() scipipe.Debug.Println("Creating new information packet for the substream...") subStreamIP := scipipe.NewIP("") scipipe.Debug.Printf("Setting in-port of process %s to IP substream field\n", p.Name()) subStreamIP.SubStream = p.In scipipe.Debug.Printf("Sending sub-stream IP in process %s...\n", p.Name()) p.OutSubStream.Send(subStreamIP) scipipe.Debug.Printf("Done sending sub-stream IP in process %s.\n", p.Name()) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (transmuxer *Transmuxer) Run() {\n\tif transmuxer.closed {\n\t\treturn\n\t}\n\n\tif transmuxer.running {\n\t\treturn\n\t}\n\n\ttransmuxer.running = true\n\n\tfor {\n\t\tvar sample float64\n\n\t\tfor _, streamer := range transmuxer.Streamers {\n\t\t\tnewSample, err := streamer.ReadSample()\n\t\t\tif err != nil {\n\t\t\t\tstreamer.setError(err)\n\t\t\t\tstreamer.Close()\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tsample += newSample * streamer.Volume\n\t\t}\n\n\t\tsample = sample * transmuxer.MasterVolume\n\n\t\tif transmuxer.FinalStream != nil {\n\t\t\terr := transmuxer.FinalStream.WriteSample(sample)\n\t\t\tif err != nil {\n\t\t\t\ttransmuxer.setError(err)\n\t\t\t\ttransmuxer.Close()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tif transmuxer.buffer != nil {\n\t\t\ttransmuxer.buffer = append(transmuxer.buffer, sample)\n\t\t}\n\t}\n}", "func (s *StreamingDriver) Run(path string, running *bool) error {\n\tchannel := s.pipeline.GetRootChannel()\n\tsfChannel := channel.(*plugins.SFChannel)\n\n\trecords := sfChannel.In\n\tif err := os.RemoveAll(path); err != nil {\n\t\tlogger.Error.Println(\"remove error:\", err)\n\t\treturn err\n\t}\n\n\tl, err := net.ListenUnix(\"unixpacket\", &net.UnixAddr{Name: path, Net: \"unixpacket\"})\n\tif err != nil {\n\t\tlogger.Error.Println(\"listen error:\", err)\n\t\treturn err\n\t}\n\tdefer l.Close()\n\n\tsFlow := sfgo.NewSysFlow()\n\tdeser, err := compiler.CompileSchemaBytes([]byte(sFlow.Schema()), []byte(sFlow.Schema()))\n\tif err != nil {\n\t\tlogger.Error.Println(\"compiler error:\", err)\n\t\treturn err\n\t}\n\n\tfor *running {\n\t\tbuf := make([]byte, BuffSize)\n\t\toobuf := make([]byte, OOBuffSize)\n\t\treader := bytes.NewReader(buf)\n\t\ts.conn, err = l.AcceptUnix()\n\t\tif err != nil {\n\t\t\tlogger.Error.Println(\"accept error:\", err)\n\t\t\tbreak\n\t\t}\n\t\tfor *running {\n\t\t\tsFlow = sfgo.NewSysFlow()\n\t\t\t_, _, flags, _, err := s.conn.ReadMsgUnix(buf[:], oobuf[:])\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error.Println(\"read error:\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif flags == 0 {\n\t\t\t\treader.Reset(buf)\n\t\t\t\terr = vm.Eval(reader, deser, sFlow)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Error.Println(\"deserialize:\", err)\n\t\t\t\t}\n\t\t\t\trecords <- sFlow\n\t\t\t} else {\n\t\t\t\tlogger.Error.Println(\"Flag error ReadMsgUnix:\", flags)\n\t\t\t}\n\t\t}\n\t\ts.conn.Close()\n\t}\n\tlogger.Trace.Println(\"Closing main channel\")\n\tclose(records)\n\ts.pipeline.Wait()\n\treturn nil\n}", "func (op *compose) run(s stream) stream {\n\tif err := op.validate(op.streams); err != nil {\n\t\ts.err = err\n\t\treturn s\n\t}\n\tif s.streams == nil {\n\t\ts.streams = make([]stream, 0)\n\t}\n\tfor _, str := range op.streams {\n\t\ts.streams = append(s.streams, str.(stream))\n\t}\n\treturn s\n}", "func (fn *Fn) Run(ctx context.Context, stream <-chan Msg) error {\n\tlog := LogFrom(ctx)\n\tfn.init()\n\n\twg := &sync.WaitGroup{}\n\tfor i := 0; i < fn.Workers; i++ {\n\t\twg.Add(1)\n\t\tgo func(id int) {\n\t\t\tdefer wg.Done()\n\n\t\t\tfor msg := range stream {\n\t\t\t\terr := fn.Func(ctx, msg)\n\t\t\t\tmsg.Ack(err)\n\t\t\t}\n\t\t\tlog(map[string]interface{}{\n\t\t\t\t\"level\": \"info\",\n\t\t\t\t\"message\": fmt.Sprintf(\"stream closed, worker %d exiting\", id),\n\t\t\t})\n\t\t}(i)\n\t}\n\twg.Wait()\n\n\tlog(map[string]interface{}{\n\t\t\"level\": \"info\",\n\t\t\"message\": \"all workers exited\",\n\t})\n\treturn nil\n}", "func (c *Copier) Run() {\n\tc.closed = make(chan struct{})\n\tgo c.logfile.ReadLogs(ReadConfig{Follow: true, Since: c.since, Tail: 0}, c.reader)\n\tgo c.copySrc()\n}", "func (pf ProcFn) Run(ctx context.Context, stream <-chan Msg) error { return pf(ctx, stream) }", "func (d *discoverer) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {\n\t// notice that Prometheus discovery.Discoverer abstraction doesn't allow failures,\n\t// so we must ensure that xDS client is up-and-running all the time.\n\tfor streamID := uint64(1); ; streamID++ {\n\t\terrCh := make(chan error, 1)\n\t\tgo func(errCh chan<- error) {\n\t\t\tdefer close(errCh)\n\t\t\t// recover from a panic\n\t\t\tdefer func() {\n\t\t\t\tif e := recover(); e != nil {\n\t\t\t\t\tif err, ok := e.(error); ok {\n\t\t\t\t\t\terrCh <- err\n\t\t\t\t\t} else {\n\t\t\t\t\t\terrCh <- errors.Errorf(\"%v\", e)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t\tstream := stream{\n\t\t\t\tlog: d.log.WithValues(\"streamID\", streamID),\n\t\t\t\tconfig: d.config,\n\t\t\t\thandler: &d.handler,\n\t\t\t}\n\t\t\terrCh <- stream.Run(ctx, ch)\n\t\t}(errCh)\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\td.log.Info(\"done\")\n\t\t\tbreak\n\t\tcase err := <-errCh:\n\t\t\tif err != nil {\n\t\t\t\td.log.WithValues(\"streamID\", streamID).Error(err, \"xDS stream terminated with an error\")\n\t\t\t}\n\t\t}\n\t}\n}", "func RunStream(plugin plugin.YomoStreamPlugin, endpoint string) {\n\tlog.SetPrefix(fmt.Sprintf(\"[%s:%v]\", plugin.Name(), os.Getpid()))\n\tlog.Printf(\"plugin service start... [%s]\", endpoint)\n\n\t// binding plugin\n\tpluginStream := framework.NewStreamPlugin(plugin)\n\n\t// decoding\n\tdeStream1 := txtkv.NewStreamDecoder(plugin.Observed())\n\n\t//过滤\n\tdeStream2 := txtkv.NewFilterDecoder(plugin.Observed())\n\n\t// encoding\n\tenStream := txtkv.NewStreamEncoder(plugin.Observed())\n\n\tdeStream := io.MultiWriter(deStream1.Writer, deStream2.Writer)\n\n\t// activation service\n\tframework.NewServer(endpoint, deStream, enStream.Reader)\n\n\tgo func() { io.CopyN(pluginStream.Writer, deStream1.Reader, 1024) }() // nolint\n\tgo func() { io.CopyN(enStream.Writer, pluginStream.Reader, 1024) }() // nolint\n\tgo func() { io.CopyN(enStream.Writer, deStream2.Reader, 1024) }() // nolint\n}", "func (proc *Sink) Run() {\n\tok := true\n\tvar ft *InformationPacket\n\tfor len(proc.inPorts) > 0 {\n\t\tfor i, inp := range proc.inPorts {\n\t\t\tselect {\n\t\t\tcase ft, ok = <-inp.Chan:\n\t\t\t\tif !ok {\n\t\t\t\t\tproc.deleteInPortAtKey(i)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tDebug.Println(\"Received file in sink: \", ft.GetPath())\n\t\t\tdefault:\n\t\t\t\tDebug.Printf(\"No receive on inport %d, so continuing ...\\n\", i)\n\t\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\t}\n\t\t}\n\t}\n}", "func (r fifo) Run(ctx context.Context, params StageParams) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase payloadIn, ok := <-params.Input():\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpayloadOut, err := r.proc.Process(ctx, payloadIn)\n\t\t\tif err != nil {\n\t\t\t\twrappedErr := xerrors.Errorf(\"pipeline stage %d : %w \", params.StageIndex(), err)\n\t\t\t\tmaybeEmitError(wrappedErr, params.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif payloadOut == nil {\n\t\t\t\tpayloadIn.MarkAsProcessed()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase params.Output() <- payloadOut:\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n}", "func (s *S3Sink) Run(stopCh <-chan bool) {\nloop:\n\tfor {\n\t\tselect {\n\t\tcase e := <-s.eventCh.Out():\n\t\t\tvar evt EventData\n\t\t\tvar ok bool\n\t\t\tif evt, ok = e.(EventData); !ok {\n\t\t\t\tglog.Warningf(\"Invalid type sent through event channel: %T\", e)\n\t\t\t\tcontinue loop\n\t\t\t}\n\n\t\t\t// Start with just this event...\n\t\t\tarr := []EventData{evt}\n\n\t\t\t// Consume all buffered events into an array, in case more have been written\n\t\t\t// since we last forwarded them\n\t\t\tnumEvents := s.eventCh.Len()\n\t\t\tfor i := 0; i < numEvents; i++ {\n\t\t\t\te := <-s.eventCh.Out()\n\t\t\t\tif evt, ok = e.(EventData); ok {\n\t\t\t\t\tarr = append(arr, evt)\n\t\t\t\t} else {\n\t\t\t\t\tglog.Warningf(\"Invalid type sent through event channel: %T\", e)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ts.drainEvents(arr)\n\t\tcase <-stopCh:\n\t\t\tbreak loop\n\t\t}\n\t}\n}", "func (s *Stream) Run() error {\n\tif Exists(ActiveStreamPID) {\n\t\treturn fmt.Errorf(\"existing PID file %s\", ActiveStreamPID)\n\t}\n\n\t// Setup the signal handler in Run()\n\ts.SigHandler()\n\n\t// Setup the gRPC server\n\tgo func() {\n\t\terr := s.ServerGRPC()\n\t\tif err != nil {\n\t\t\tlogger.Critical(\"Unable to start gRPC server! %v\", err)\n\t\t\ts.Shutdown <- true\n\t\t}\n\t}()\n\n\t// Do not handle error. If it cannot be removed just exit and let the user\n\t// figure out what to do.\n\tdefer os.Remove(ActiveStreamPID)\n\tpidInt := os.Getpid()\n\tpidStr := fmt.Sprintf(\"%d\", pidInt)\n\terr := ioutil.WriteFile(ActiveStreamPID, []byte(pidStr), ActiveStreamPIDWriteMode)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to write PID file: %v\", err)\n\t}\n\n\tlogger.Info(\"Streaming...\")\n\tif s.Server != nil {\n\t\tinfo := s.Server.GetServiceInfo()\n\t\tfor name, service := range info {\n\t\t\tlogger.Info(\"%s %v\", name, service.Metadata)\n\t\t}\n\t}\n\tfor {\n\t\tselect {\n\t\tcase <-s.Shutdown:\n\t\t\ts.Server.GracefulStop()\n\t\t\tos.Remove(ActiveStreamSocket)\n\t\t\tos.Remove(ActiveStreamPID)\n\t\t\tlogger.Always(\"Graceful shutdown...\")\n\t\t\treturn nil\n\t\tdefault:\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Second * 1)\n\t}\n\treturn nil\n}", "func (f *FeatureParser) Run(quarter string, streams int64) {\n\tvar maxUnit int64 = 10000\n\n\tdone := make(chan bool, streams)\n\terrors := make(chan bool, streams)\n\titems := make(chan int64, maxUnit)\n\tdefer close(done)\n\tdefer close(errors)\n\tdefer close(items)\n\n\twg := &sync.WaitGroup{}\n\n\tvar i int64\n\tfor i = 0; i < streams; i++ {\n\t\twg.Add(1)\n\t\tgo f.parse(quarter, items, errors, done, wg)\n\t}\n\n\tgo f.checkError(errors, done, streams)\n\tgo func() {\n\t\tfor i = 0; i < maxUnit; i++ {\n\t\t\titems <- i\n\t\t}\n\t}()\n\n\twg.Wait()\n}", "func (s *Service) Run(ctx context.Context) error {\n\tlog := s.log.With().Str(\"path\", s.Config.SourcePath).Logger()\n\n\t// Open source file\n\tf, err := os.Open(s.Config.SourcePath)\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"Failed to open file\")\n\t\treturn maskAny(err)\n\t}\n\tdefer f.Close()\n\n\t// Split into lines\n\tscanner := bufio.NewScanner(f)\n\tfor scanner.Scan() {\n\t\t// Get line\n\t\tline := scanner.Text()\n\n\t\t// Publish line\n\t\tdataURI := dataurl.New([]byte(line), \"text/plain\")\n\t\tif _, err := s.outputReady(ctx, dataURI.String(), s.Config.OutputName); err != nil {\n\t\t\treturn maskAny(err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *SSH) Run(command string) (outStr string, err error) {\n\toutChan, doneChan, err := s.stream(command)\n\tif err != nil {\n\t\treturn outStr, err\n\t}\n\n\tdone := false\n\tfor !done {\n\t\tselect {\n\t\tcase <-doneChan:\n\t\t\tdone = true\n\t\tcase line := <-outChan:\n\t\t\t// TODO ew.. this is nasty\n\t\t\toutStr += line + \"\\n\"\n\n\t\t}\n\t}\n\n\treturn outStr, err\n}", "func (s *Stream) Run(cmd string) error {\n\t// First parse the entire command string\n\tcm, er := parser.ParseString(cmd)\n\tif er != nil {\n\t\treturn er\n\t}\n\t//spew.Dump(cm)\n\t// So now we run these commands on the file\n\tfi, er := interpreter.New(s.file)\n\tif er != nil {\n\t\treturn er\n\t}\n\treturn fi.Run(cm)\n}", "func (r *mutationStreamReader) run() {\n\n\t//panic handler\n\tdefer r.panicHandler()\n\n\tfor {\n\t\tselect {\n\n\t\tcase msg, ok := <-r.streamMutch:\n\n\t\t\tif ok {\n\t\t\t\tswitch msg.(type) {\n\t\t\t\tcase []*protobuf.VbKeyVersions:\n\t\t\t\t\tvbKeyVer := msg.([]*protobuf.VbKeyVersions)\n\t\t\t\t\tr.handleVbKeyVersions(vbKeyVer)\n\n\t\t\t\tdefault:\n\t\t\t\t\tr.handleStreamInfoMsg(msg)\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\t//stream library has closed this channel indicating\n\t\t\t\t//unexpected stream closure send the message to supervisor\n\t\t\t\tlogging.Fatalf(\"MutationStreamReader::run Unexpected Mutation \"+\n\t\t\t\t\t\"Channel Close for Stream %v\", r.streamId)\n\t\t\t\tmsgErr := &MsgError{\n\t\t\t\t\terr: Error{code: ERROR_STREAM_READER_STREAM_SHUTDOWN,\n\t\t\t\t\t\tseverity: FATAL,\n\t\t\t\t\t\tcategory: STREAM_READER}}\n\t\t\t\tr.supvRespch <- msgErr\n\t\t\t}\n\n\t\tcase <-r.killch:\n\t\t\treturn\n\t\t}\n\t}\n\n}", "func SubscriberRun(s *websocket.StructSpeaker) {\n\ts.Start()\n\tdefer s.Stop()\n\n\tch := make(chan os.Signal, 1)\n\tsignal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)\n\t<-ch\n}", "func (s *S3Consumer) Run(\n\tctx context.Context,\n\tmessageChan chan message,\n) error {\n\tobjectChan := make(chan s3ObjTask, s.NumWorkers)\n\terrChan := make(chan error, s.NumWorkers+1)\n\n\tfor i := 0; i < s.NumWorkers; i++ {\n\t\tgo func() {\n\t\t\terrChan <- s.runSubTasks(ctx, messageChan, objectChan)\n\t\t}()\n\t}\n\n\tgo func() {\n\t\terrChan <- s.processPrefixes(ctx, objectChan)\n\t\tclose(objectChan)\n\t}()\n\n\tfor i := 0; i < s.NumWorkers+1; i++ {\n\t\tif err := <-errChan; err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (c *Consumer) Run() error {\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(c.Period):\n\t\tcase <-c.stop:\n\t\t\treturn nil\n\t\t}\n\t\tb, err := c.tailer.Next()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Could not retrieve log content: %v\", err)\n\t\t} else if err := c.consumeBytes(b); err != nil {\n\t\t\treturn fmt.Errorf(\"Could not export log content: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}", "func (w *noAggregationStreamWorker) run() {\n\tlog.Debugf(\"Starting streaming routine for the no-aggregation pipeline\")\n\n\tticker := time.NewTicker(noAggWorkerStreamCheckFrequency)\n\tdefer ticker.Stop()\n\tlogPayloads := config.Datadog.GetBool(\"log_payloads\")\n\tw.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false)\n\n\tstopped := false\n\tvar stopBlockChan chan struct{}\n\tvar lastStream time.Time\n\n\tfor !stopped {\n\t\tstart := time.Now()\n\t\tserializedSamples := 0\n\n\t\tmetrics.Serialize(\n\t\t\tw.seriesSink,\n\t\t\tw.sketchesSink,\n\t\t\tfunc(seriesSink metrics.SerieSink, sketchesSink metrics.SketchesSink) {\n\t\t\tmainloop:\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\n\t\t\t\t\t// stop signal\n\t\t\t\t\tcase trigger := <-w.stopChan:\n\t\t\t\t\t\tstopped = true\n\t\t\t\t\t\tstopBlockChan = trigger.blockChan\n\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\n\t\t\t\t\tcase <-ticker.C:\n\t\t\t\t\t\tn := time.Now()\n\t\t\t\t\t\tif serializedSamples > 0 && lastStream.Before(n.Add(-time.Second*1)) {\n\t\t\t\t\t\t\tlog.Debug(\"noAggregationStreamWorker: triggering an automatic payloads flush to the forwarder (no traffic since 1s)\")\n\t\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\t\t\t\t\t\t}\n\n\t\t\t\t\t// receiving samples\n\t\t\t\t\tcase samples := <-w.samplesChan:\n\t\t\t\t\t\tlog.Debugf(\"Streaming %d metrics from the no-aggregation pipeline\", len(samples))\n\t\t\t\t\t\tfor _, sample := range samples {\n\t\t\t\t\t\t\t// enrich metric sample tags\n\t\t\t\t\t\t\tsample.GetTags(w.taggerBuffer, w.metricBuffer)\n\t\t\t\t\t\t\tw.metricBuffer.AppendHashlessAccumulator(w.taggerBuffer)\n\n\t\t\t\t\t\t\t// turns this metric sample into a serie\n\t\t\t\t\t\t\tvar serie metrics.Serie\n\t\t\t\t\t\t\tserie.Name = sample.Name\n\t\t\t\t\t\t\tserie.Points = []metrics.Point{{Ts: sample.Timestamp, Value: sample.Value}}\n\t\t\t\t\t\t\tserie.Tags = tagset.CompositeTagsFromSlice(w.metricBuffer.Copy())\n\t\t\t\t\t\t\tserie.Host = sample.Host\n\t\t\t\t\t\t\t// ignored when late but mimic dogstatsd traffic here anyway\n\t\t\t\t\t\t\tserie.Interval = 10\n\t\t\t\t\t\t\tw.seriesSink.Append(&serie)\n\n\t\t\t\t\t\t\tw.taggerBuffer.Reset()\n\t\t\t\t\t\t\tw.metricBuffer.Reset()\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tlastStream = time.Now()\n\n\t\t\t\t\t\tserializedSamples += len(samples)\n\t\t\t\t\t\tif serializedSamples > w.maxMetricsPerPayload {\n\t\t\t\t\t\t\tbreak mainloop // end `Serialize` call and trigger a flush to the forwarder\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}, func(serieSource metrics.SerieSource) {\n\t\t\t\tsendIterableSeries(w.serializer, start, serieSource)\n\t\t\t}, func(sketches metrics.SketchesSource) {\n\t\t\t\t// noop: we do not support sketches in the no-agg pipeline.\n\t\t\t})\n\n\t\tif stopped {\n\t\t\tbreak\n\t\t}\n\n\t\tw.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false)\n\t}\n\n\tif stopBlockChan != nil {\n\t\tclose(stopBlockChan)\n\t}\n}", "func (b *ToLog) Run() {\n\tfor {\n\t\tselect {\n\t\tcase <-b.quit:\n\t\t\treturn\n\t\tcase msg := <-b.in:\n\t\t\tb.Log(msg)\n\t\t}\n\t}\n}", "func Run(\n\tbatchSize uint,\n\tparty, market, serverAddr, logFormat string,\n\treconnect bool,\n) error {\n\tflag.Parse()\n\n\tif len(serverAddr) <= 0 {\n\t\treturn fmt.Errorf(\"error: missing grpc server address\")\n\t}\n\n\tvar printEvent func(string)\n\tswitch logFormat {\n\tcase \"raw\":\n\t\tprintEvent = func(event string) { fmt.Printf(\"%v\\n\", event) }\n\tcase \"text\":\n\t\tprintEvent = func(event string) {\n\t\t\tfmt.Printf(\"%v;%v\", time.Now().UTC().Format(time.RFC3339Nano), event)\n\t\t}\n\tcase \"json\":\n\t\tprintEvent = func(event string) {\n\t\t\tfmt.Printf(\"{\\\"time\\\":\\\"%v\\\",%v\\n\", time.Now().UTC().Format(time.RFC3339Nano), event[1:])\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"error: unknown log-format: \\\"%v\\\". Allowed values: raw, text, json\", logFormat)\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\twg := sync.WaitGroup{}\n\tif err := run(ctx, cancel, &wg, batchSize, party, market, serverAddr, printEvent, reconnect); err != nil {\n\t\treturn fmt.Errorf(\"error when starting the stream: %v\", err)\n\t}\n\n\twaitSig(ctx, cancel)\n\twg.Wait()\n\n\treturn nil\n}", "func (b *broadcast) Run(ctx context.Context, params StageParams) {\n\tvar (\n\t\twg sync.WaitGroup\n\t\tinCh = make([]chan Payload, len(b.fifos))\n\t)\n\tfor i := 0; i < len(b.fifos); i++ {\n\t\twg.Add(1)\n\t\tinCh[i] = make(chan Payload)\n\t\tgo func(fifoIndex int) {\n\t\t\tfifoParams := &workerParams{\n\t\t\t\tstage: params.StageIndex(),\n\t\t\t\tinCh: inCh[fifoIndex],\n\t\t\t\toutCh: params.Output(),\n\t\t\t\terrCh: params.Error(),\n\t\t\t}\n\t\t\tb.fifos[fifoIndex].Run(ctx, fifoParams)\n\t\t\twg.Done()\n\t\t}(i)\n\t}\ndone:\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tbreak done\n\t\tcase payload, ok := <-params.Input():\n\t\t\tif !ok {\n\t\t\t\tbreak done\n\t\t\t}\n\t\t\tfor i := len(b.fifos) - 1; i >= 0; i++ {\n\t\t\t\t// as each FIFO might modify the payload, to\n\t\t\t\t// avoid data race we need to copy of each payload\n\t\t\t\t// for all FiFO execpt the first one\n\t\t\t\tvar fifoPayload = payload\n\t\t\t\tif i != 0 {\n\t\t\t\t\tfifoPayload = payload.Clone()\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\tbreak done\n\t\t\t\tcase inCh[i] <- fifoPayload:\n\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfor _, ch := range inCh {\n\t\tclose(ch)\n\t}\n\twg.Wait()\n\n}", "func (b *QuerySnipBroadcaster) Run() {\n\tfor {\n\t\ts := <-b.in\n\t\tfor _, recipient := range b.recipients {\n\t\t\trecipient <- s\n\t\t}\n\t}\n}", "func (fr *Reader) FlowRun(in <-chan flowroutines.FlowData, out chan<- flowroutines.FlowData, exit chan bool, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\t// go does not consider non-blocking io operations as best practice. We need to spin up a new goroutine.\n\tsocketchan := make(chan [16]byte, 1000)\n\tgo func() {\n\t\tfor {\n\t\t\traw := [16]byte{}\n\t\t\tfor i := range raw {\n\t\t\t\tvar err error\n\t\t\t\traw[i], err = fr.in.ReadByte()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tsocketchan <- raw\n\t\t}\n\t}()\n\tfor {\n\t\t// https://stackoverflow.com/a/11121616\n\t\tselect {\n\t\tcase raw := <-socketchan:\n\t\t\tframe := canbus.FrameFromRaw(raw)\n\t\t\tlogdata := globals.Livelog.NewLogData()\n\t\t\tlogdata.Identifier = fr.iface.Name\n\t\t\tlogdata.Timestamp = time.Now().UnixNano()\n\t\t\tlogdata.Msg = \"Received CAN Frame\"\n\t\t\tglobals.Livelog.Send <- logdata\n\t\t\tout <- flowroutines.FlowData{CanFrame: frame, SrcInterface: fr.iface.Index, DstInterface: fr.defaultRoute}\n\t\t\tglobals.Statistics.GetInChannel(fr.iface.Name) <- uint64(1)\n\t\t\tcontinue\n\t\tdefault:\n\t\t}\n\t\tselect {\n\t\tcase data := <-in:\n\t\t\tout <- data\n\t\t\tcontinue\n\t\tdefault:\n\t\t}\n\t\tselect {\n\t\tcase raw := <-socketchan:\n\t\t\tframe := canbus.FrameFromRaw(raw)\n\t\t\tlogdata := globals.Livelog.NewLogData()\n\t\t\tlogdata.Identifier = fr.iface.Name\n\t\t\tlogdata.Timestamp = time.Now().UnixNano()\n\t\t\tlogdata.Msg = \"Received CAN Frame\"\n\t\t\tglobals.Livelog.Send <- logdata\n\t\t\tout <- flowroutines.FlowData{CanFrame: frame, SrcInterface: fr.iface.Index, DstInterface: fr.defaultRoute}\n\t\t\tglobals.Statistics.GetInChannel(fr.iface.Name) <- uint64(1)\n\t\t\tcontinue\n\t\tcase data := <-in:\n\t\t\tout <- data\n\t\t\tcontinue\n\t\tcase <-exit:\n\t\t}\n\t\tbreak\n\t}\n}", "func (cmd *ReceiveCommand) Run() error {\n\tclient, err := cmd.WebsocketClientFactory.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer client.Close()\n\n\tif err := client.Receive(cmd.Timeout, func(message []byte) error {\n\t\t_, err := cmd.OutputWriter.Write(append(message, '\\n'))\n\t\treturn err\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func runEchoStream(b *testing.B, chunkCnt, payloadSize int, random bool) {\n\tb.ReportAllocs()\n\tinternal.CallEchoStream(b, ctx, serverAddr, b.N, chunkCnt, payloadSize, random, benchmark.AddStats(b, 16))\n}", "func (t *Subrogationcode) Run(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\treturn t.Invoke(stub, function, args)\n}", "func RunStream(plugin plugin.YomoStreamPlugin, endpoint string) {\n\tlogger.Infof(\"plugin service [%s] start... [%s]\", plugin.Name(), endpoint)\n\n\t// activation service\n\tpanic(\"not impl\")\n}", "func (t *TestPhase) Run(_ context.Context, queue *Queue, _, _ chan message.Message, _ RoundUpdate, step uint8) PhaseFn {\n\tt.callback(t.req, t.packet, t.streamer, t.aChan)\n\treturn nil\n}", "func (r *sinkRunner) run(pipeID, componentID string, cancel chan struct{}, in <-chan message, meter *meter) <-chan error {\n\terrc := make(chan error, 1)\n\tgo func() {\n\t\tdefer close(errc)\n\t\tcall(r.reset, pipeID, errc) // reset hook\n\t\tvar m message\n\t\tvar ok bool\n\t\tfor {\n\t\t\t// receive new message\n\t\t\tselect {\n\t\t\tcase m, ok = <-in:\n\t\t\t\tif !ok {\n\t\t\t\t\tcall(r.flush, pipeID, errc) // flush hook\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-cancel:\n\t\t\t\tcall(r.interrupt, pipeID, errc) // interrupt hook\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tm.params.applyTo(componentID) // apply params\n\t\t\terr := r.fn(m.Buffer) // sink a buffer\n\t\t\tif err != nil {\n\t\t\t\terrc <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmeter = meter.sample(int64(m.Buffer.Size())).message()\n\n\t\t\tm.feedback.applyTo(componentID) // apply feedback\n\t\t}\n\t}()\n\n\treturn errc\n}", "func Run(port string, bus *pubsub.MessageBus) {\n\tlog.Infof(\"SocketCAN %s\", port)\n\tdevice, err := socketcan.NewRawInterface(port)\n\n\tif err != nil {\n\t\tfmt.Printf(\"could not open interface %s: %v\\n\",\n\t\t\tport, err)\n\t\tos.Exit(1)\n\t}\n\n\tdefer device.Close()\n\n\tfor {\n\t\tframe, err := device.RecvFrame()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"error receiving frame: %v\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\t//dataStr := dataToString(frame.Data)\n\t\t//dataInt, _ := strconv.ParseInt(string(frame.Data), 10, 64)\n\t\t//fmt.Printf(\" %s\\t%03X\\t[%d]\\t%s\\n\", device.IfName, frame.ArbId, frame.Dlc, dataStr)\n\t\t//log.Infof(\"Device: %s, Data: %i, DLC: %s\\n\", frame.ArbId, dataInt, frame.Dlc)\n\t\tdata := binary.LittleEndian.Uint64(frame.Data)\n\t\tbus.Publish(\"CAN\", msgs.NewCAN(frame.ArbId, uint64(data), uint8(frame.Dlc)))\n\t}\n}", "func run(arg0 string, args ...string) error {\n\tcmd := exec.Command(arg0, args...)\n\tpipe, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd.Stderr = cmd.Stdout\n\n\tfmt.Println(\"Running command:\", arg0, strings.Join(args, \" \"))\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Stream the output from r10k as it is generated\n\tscanner := bufio.NewScanner(pipe)\n\tscanner.Split(bufio.ScanLines)\n\tfor scanner.Scan() {\n\t\tm := scanner.Text()\n\t\tfmt.Println(m)\n\t}\n\n\terr = cmd.Wait()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *Source) Run(ctx context.Context, domain string, session *subscraping.Session) <-chan subscraping.Result {\n\tresults := make(chan subscraping.Result)\n\n\tgo func() {\n\t\tdefer close(results)\n\t\tfound := s.getSubdomainsFromSQL(domain, results)\n\t\tif found {\n\t\t\treturn\n\t\t}\n\t\t_ = s.getSubdomainsFromHTTP(ctx, domain, session, results)\n\t}()\n\n\treturn results\n}", "func (l *logPipe) Run() error {\n\t// Remove the pipe or file if any exists\n\terr := removeExisting(l.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = mkFifo(l.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\treader, err := getFileReader(l.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor {\n\t\tline, err := reader.ReadBytes('\\n')\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\terr = l.Handle(string(line))\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error:\", err)\n\t\t}\n\t}\n}", "func (t *Task) Run() {\n\t// println(\"run step\", t.Step.Id, \"task\", t.Id)\n\tt.Step.Function(t)\n\tfor _, out := range t.Outputs {\n\t\tout.WriteChan.Close()\n\t}\n\t// println(\"stop step\", t.Step.Id, \"task\", t.Id)\n}", "func (s *UDPServer) Run(ctx context.Context, rawData chan []byte) error {\n\tbuffer := make([]byte, s.maxBufferSize)\n\tdoneChan := make(chan error, 1)\n\tgo func(output chan []byte) {\n\t\tfor {\n\t\t\t// By reading from the connection into the buffer, we block until there's\n\t\t\t// new content in the socket that we're listening for new packets.\n\t\t\t//\n\t\t\t// Whenever new packets arrive, `buffer` gets filled and we can continue\n\t\t\t// the execution.\n\t\t\t//\n\t\t\t// note.: `buffer` is not being reset between runs.\n\t\t\t//\t It's expected that only `n` reads are read from it whenever\n\t\t\t//\t inspecting its contents.\n\t\t\tn, addr, err := s.packetConn.ReadFrom(buffer)\n\t\t\tif err != nil {\n\t\t\t\tdoneChan <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttemp := make([]byte, n)\n\t\t\tcopy(temp, buffer[:n])\n\t\t\tfmt.Printf(\"packet-received: bytes=%d from=%s\\n\", n, addr.String())\n\t\t\toutput <- temp\n\t\t}\n\t}(rawData)\n\n\tselect {\n\tcase <-ctx.Done():\n\t\tfmt.Println(\"udp_server run done\", ctx.Err())\n\t\treturn nil\n\tcase err := <-doneChan:\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (es *EventSource) run(input chan Event, control chan interface{}) {\n\tvar (\n\t\tnextID = 0\n\t\toffset = 0\n\t\tbacklog = DefaultBacklog\n\t\tevents = make([]Event, 0, BufferSize)\n\t\tclients = make(map[chan Event]bool)\n\t)\n\n\tdefer func() {\n\t\tfor ch := range clients {\n\t\t\tclose(ch)\n\t\t}\n\t}()\n\n\tensure := func(avail int) {\n\t\tif len(events) > backlog-avail {\n\t\t\tremove := len(events) - backlog + avail\n\t\t\tevents, offset = events[remove:], offset+remove\n\t\t}\n\t}\n\n\tpush := func(ch chan Event, ev Event) {\n\t\tselect {\n\t\tcase ch <- ev:\n\t\tdefault:\n\t\t\tclose(ch)\n\t\t\tdelete(clients, ch)\n\t\t}\n\t}\n\n\tbound := func(v, lo, hi int) int {\n\t\tif v < lo {\n\t\t\treturn lo\n\t\t} else if v >= hi {\n\t\t\treturn hi\n\t\t}\n\t\treturn v\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase ev, ok := <-input:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tev.ID, nextID = nextID, nextID+1\n\n\t\t\tensure(1)\n\t\t\tevents = append(events, ev)\n\n\t\t\tfor ch := range clients {\n\t\t\t\tpush(ch, ev)\n\t\t\t}\n\t\tcase msg := <-control:\n\t\t\tswitch msg := msg.(type) {\n\t\t\tcase setBacklog:\n\t\t\t\tbacklog = msg.backlog\n\t\t\t\tensure(0)\n\t\t\tcase startStream:\n\t\t\t\tmsg.old <- events[bound(msg.startID-offset, 0, len(events)):]\n\t\t\t\tclients[msg.events] = true\n\t\t\t}\n\t\t}\n\t}\n}", "func (a *aggregator) Run() {\n\tgo a.submitter()\n\n\tfor m := range a.in {\n\t\tfor _, out_m := range a.process(m) {\n\t\t\ta.out <- out_m\n\t\t}\n\t}\n}", "func (mts *metadataTestSender) Run(wg *sync.WaitGroup) {\n\tif mts.out.output == nil {\n\t\tpanic(\"metadataTestSender output not initialized for emitting rows\")\n\t}\n\tRun(mts.flowCtx.Ctx, mts, mts.out.output)\n\tif wg != nil {\n\t\twg.Done()\n\t}\n}", "func (s *server) Run(req *pb.Request, stream pb.SQLFlow_RunServer) error {\n\tpr := s.run(req.Sql, s.db)\n\tdefer pr.Close()\n\n\tfor r := range pr.ReadAll() {\n\t\tvar res *pb.Response\n\t\tvar err error\n\t\tswitch s := r.(type) {\n\t\tcase error:\n\t\t\treturn s\n\t\tcase map[string]interface{}:\n\t\t\tres, err = encodeHead(s)\n\t\tcase []interface{}:\n\t\t\tres, err = encodeRow(s)\n\t\tcase string:\n\t\t\tres, err = encodeMessage(s)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unrecognize run channel return type %#v\", s)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := stream.Send(res); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (p *Process) Run(procFnc func(p *Process)) {\n\tchnSubscribed := make(chan struct{})\n\tgo func() {\n\t\tp.Comp.chnSubscribe <- p\n\t\tclose(chnSubscribed)\n\t\tp.Call(procFnc)\n\t\tp.unsubscribe()\n\t\t//fmt.Println(\"Unsubscribed\")\n\t}()\n\t<-chnSubscribed\n}", "func (fss *StreamingService) Stream(wg *sync.WaitGroup) error { return nil }", "func (b *bufferedChan) Run() {\n\tdefer close(b.OutChannel)\n\tfor value := range b.inChannel {\n\t\tselect {\n\t\tcase <-b.ctx.Done():\n\t\t\tfmt.Println(\"Run: Time to return\")\n\t\t\treturn\n\t\tcase b.OutChannel <- value:\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n\n}", "func (o *GetStreamOptions) Run() error {\n\tresolver, err := o.CreateVersionResolver(o.VersionsRepository, o.VersionsGitRef)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create the VersionResolver\")\n\t}\n\targs := o.Args\n\tif len(args) == 0 {\n\t\treturn util.MissingArgument(\"name\")\n\t}\n\tname := args[0]\n\n\tkind := versionstream.VersionKind(o.Kind)\n\tif kind == versionstream.KindDocker {\n\t\tresult, err := resolver.ResolveDockerImage(name)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to resolve docker image %s\", name)\n\t\t}\n\t\tlog.Logger().Infof(\"resolved image %s to %s\", util.ColorInfo(name), util.ColorInfo(result))\n\t\treturn nil\n\t}\n\n\tn, err := resolver.StableVersionNumber(kind, name)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to resolve %s version of %s\", o.Kind, name)\n\t}\n\n\tlog.Logger().Infof(\"resolved %s %s to version: %s\", util.ColorInfo(name), util.ColorInfo(o.Kind), util.ColorInfo(n))\n\treturn nil\n}", "func (e *BatchEngine) Run(ctx context.Context) error {\n\te.pollMessages(ctx, e.stream)\n\treturn e.processorError\n}", "func (jr *joinReader) Run(wg *sync.WaitGroup) {\n\terr := jr.mainLoop()\n\tjr.output.Close(err)\n\tif wg != nil {\n\t\twg.Done()\n\t}\n}", "func (w *Worker) Run(done <-chan interface{}) error {\n\tdefer close(w.resultStream)\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\tlog.Println(\n\t\t\t\t\"level\", \"INFO\",\n\t\t\t\t\"object\", \"workers.worker\",\n\t\t\t\t\"method\", \"Run\",\n\t\t\t\t\"msg\", \"terminating operations by application request\",\n\t\t\t)\n\t\t\treturn nil\n\t\tcase order, ok := <-w.orderStream:\n\t\t\tif !ok {\n\t\t\t\tlog.Println(\"level\", \"INFO\",\n\t\t\t\t\t\"object\", \"workers.worker\",\n\t\t\t\t\t\"method\", \"Run\",\n\t\t\t\t\t\"msg\", \"terminating operations because order stream was closed\",\n\t\t\t\t)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tw.processOrder(order)\n\t\t}\n\t}\n}", "func (r *Runner) Run(in io.Reader, out io.Writer) error {\n\tname, _ := detectType(r.fileName)\n\tdef := languageDefs[r.ft]\n\n\texec := ContainerExec{\n\t\tContainer: r.container,\n\t\tCmd: expandTemplate(def.RunCommand, r.fileName, name),\n\t\tIn: in,\n\t\tOut: out,\n\t}\n\n\texec.Run()\n\tcancelTimer := exec.StartKillTimer(r.timeLimit)\n\n\terr := <-exec.ExitC\n\tif err == ErrTimeLimit {\n\t\tr.container.Run()\n\t}\n\tcancelTimer <- true\n\treturn err\n}", "func (g *Gossiper) Run(ctx context.Context) {\n\tsths := make(chan sthInfo, g.bufferSize)\n\n\tvar wg sync.WaitGroup\n\twg.Add(1 + len(g.srcs))\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tglog.Info(\"starting Submitter\")\n\t\tg.Submitter(ctx, sths)\n\t\tglog.Info(\"finished Submitter\")\n\t}()\n\tfor _, src := range g.srcs {\n\t\tgo func(src *sourceLog) {\n\t\t\tdefer wg.Done()\n\t\t\tglog.Infof(\"starting Retriever(%s)\", src.Name)\n\t\t\tsrc.Retriever(ctx, g, sths)\n\t\t\tglog.Infof(\"finished Retriever(%s)\", src.Name)\n\t\t}(src)\n\t}\n\twg.Wait()\n}", "func (mon *SocketMonitor) Run(command []byte) ([]byte, error) {\n\t// Just call RunWithFile with no file\n\treturn mon.RunWithFile(command, nil)\n}", "func (csw *ChannelStatsWatcher) Run(ctx context.Context) {\n\tflushed, unregister := csw.statser.RegisterFlush()\n\tdefer unregister()\n\n\tticker := time.NewTicker(csw.sampleInterval)\n\tdefer ticker.Stop()\n\n\tcsw.sample()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-flushed:\n\t\t\tcsw.emit()\n\t\t\tcsw.sample() // Ensure there will always be at least one sample\n\t\tcase <-ticker.C:\n\t\t\tcsw.sample()\n\t\t}\n\t}\n}", "func (s *Server) Run() {\n\tstopChan := make(chan bool, 1)\n\tinputChan := make(chan interface{})\n\tgo s.filter(inputChan, stopChan)\n\tfor _ = range s.ticker.C {\n\t\tstopChan <- true\n\t\tstopChan = make(chan bool, 1)\n\t\tinputChan = make(chan interface{})\n\t\tgo s.filter(inputChan, stopChan)\n\t}\n}", "func (r *Reader) Run(ctx context.Context, outChan chan cortex_chunk.Chunk) {\n\terrChan := make(chan error)\n\tdefer close(outChan)\n\n\treadCtx, cancel := context.WithCancel(ctx)\n\n\t// starting workers\n\tfor i := 0; i < r.cfg.NumWorkers; i++ {\n\t\tr.workerGroup.Add(1)\n\t\tgo r.readLoop(readCtx, outChan, errChan)\n\t}\n\n\tgo func() {\n\t\t// cancel context when an error occurs or errChan is closed\n\t\tdefer cancel()\n\n\t\terr := <-errChan\n\t\tif err != nil {\n\t\t\tr.err = err\n\t\t\tlogrus.WithError(err).Errorln(\"error scanning chunks, stopping read operation\")\n\t\t\tclose(r.quit)\n\t\t}\n\t}()\n\n\tscanRequests := r.planner.Plan()\n\tlogrus.Infof(\"built %d plans for reading\", len(scanRequests))\n\n\tdefer func() {\n\t\t// lets wait for all workers to finish before we return.\n\t\t// An error in errChan would cause all workers to stop because we cancel the context.\n\t\t// Otherwise closure of scanRequestsChan(which is done after sending all the scanRequests) should make all workers to stop.\n\t\tr.workerGroup.Wait()\n\t\tclose(errChan)\n\t}()\n\n\t// feeding scan requests to workers\n\tfor _, req := range scanRequests {\n\t\tselect {\n\t\tcase r.scanRequestsChan <- req:\n\t\t\tcontinue\n\t\tcase <-r.quit:\n\t\t\treturn\n\t\t}\n\t}\n\n\t// all scan requests are fed, close the channel\n\tclose(r.scanRequestsChan)\n}", "func (w *StatsWriter) Run() {\n\tt := time.NewTicker(5 * time.Second)\n\tdefer t.Stop()\n\tdefer close(w.stop)\n\tfor {\n\t\tselect {\n\t\tcase stats := <-w.in:\n\t\t\tw.addStats(stats)\n\t\t\tif !w.syncMode {\n\t\t\t\tw.sendPayloads()\n\t\t\t}\n\t\tcase notify := <-w.flushChan:\n\t\t\tw.sendPayloads()\n\t\t\tnotify <- struct{}{}\n\t\tcase <-t.C:\n\t\t\tw.report()\n\t\tcase <-w.stop:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (r *Runner) Stream(ctx context.Context, study diviner.Study, nparallel int) *Streamer {\n\ts := &Streamer{\n\t\trunner: r,\n\t\tstudy: study,\n\t\tnparallel: nparallel,\n\t\tstopc: make(chan struct{}),\n\t\tdonec: make(chan error),\n\t}\n\tgo func() {\n\t\ts.donec <- s.do(ctx)\n\t}()\n\treturn s\n}", "func (sO *ScreenOutput) Run() {\n\tfor _, channel := range sO.DataInput {\n\t\tgo sO.runChannelInput(channel)\n\t}\n}", "func (fw *Writer) FlowRun(in <-chan flowroutines.FlowData, out chan<- flowroutines.FlowData, exit chan bool, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tsocketchan := make(chan [16]byte, 1000)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase raw := <-socketchan:\n\t\t\t\tfor i := range raw {\n\t\t\t\t\tif err := fw.out.WriteByte(raw[i]); err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif err := fw.out.Flush(); err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\tfor {\n\t\t// https://stackoverflow.com/a/11121616\n\t\tselect {\n\t\tcase data := <-in:\n\t\t\tif data.DstInterface == 0 || data.DstInterface == fw.iface.Index {\n\t\t\t\tframe := data.CanFrame\n\t\t\t\tsocketchan <- frame.Raw()\n\t\t\t\tglobals.Statistics.GetOutChannel(fw.iface.Name) <- uint64(1)\n\t\t\t\tlogdata := globals.Livelog.NewLogData()\n\t\t\t\tlogdata.Timestamp = time.Now().UnixNano()\n\t\t\t\tlogdata.Identifier = fw.iface.Name\n\t\t\t\tlogdata.Msg = \"Sent CAN Frame\"\n\t\t\t\tglobals.Livelog.Send <- logdata\n\t\t\t}\n\t\t\tcontinue\n\t\tdefault:\n\t\t}\n\t\tselect {\n\t\tcase data := <-in:\n\t\t\tif data.DstInterface == 0 || data.DstInterface == fw.iface.Index {\n\t\t\t\tframe := data.CanFrame\n\t\t\t\tsocketchan <- frame.Raw()\n\t\t\t\tglobals.Statistics.GetOutChannel(fw.iface.Name) <- uint64(1)\n\t\t\t\tlogdata := globals.Livelog.NewLogData()\n\t\t\t\tlogdata.Timestamp = time.Now().UnixNano()\n\t\t\t\tlogdata.Identifier = fw.iface.Name\n\t\t\t\tlogdata.Msg = \"Sent CAN Frame\"\n\t\t\t\tglobals.Livelog.Send <- logdata\n\t\t\t}\n\t\t\tcontinue\n\t\tcase <-exit:\n\t\t}\n\t\tbreak\n\t}\n}", "func (p *Plugin) Run() {\n\tp.Log(\"notice\", fmt.Sprintf(\"Start filter v%s\", p.Version))\n\tmyId := qutils.GetGID()\n\tdc := p.QChan.Data.Join()\n\tinputs := p.GetInputs()\n\n\tfor {\n\t\tselect {\n\t\tcase val := <-dc.Read:\n\t\t\tswitch val.(type) {\n\t\t\tcase qtypes.QMsg:\n\t\t\t\tqm := val.(qtypes.QMsg)\n\t\t\t\tif qm.SourceID == myId {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif len(inputs) != 0 && !qutils.IsInput(inputs, qm.Source) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif qutils.IsItem(p.sendData, qm.Source) {\n\t\t\t\t\tqm.SourceID = myId\n\t\t\t\t\tp.QChan.Data.Send(qm)\n\t\t\t\t}\n\t\t\t\tif qutils.IsItem(p.sendBack, qm.Source) {\n\t\t\t\t\tp.QChan.Back.Send(qm)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (r *serverFour) Run() {\n\tr.state = RstateRunning\n\n\trxcallback := func(ev EventInterface) int {\n\t\ttioevent := ev.(*TimedAnyEvent)\n\t\ttio := tioevent.GetTio()\n\t\ttio.doStage(r)\n\n\t\treturn 0\n\t}\n\n\tgo func() {\n\t\tfor r.state == RstateRunning {\n\t\t\t// recv\n\t\t\tr.receiveEnqueue()\n\t\t\ttime.Sleep(time.Microsecond)\n\t\t\tr.processPendingEvents(rxcallback)\n\t\t}\n\n\t\tr.closeTxChannels()\n\t}()\n}", "func Run(run func()) {\n\tdone := make(chan struct{})\n\tgo func() {\n\t\trun()\n\t\tdone <- struct{}{}\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase f := <-fq:\n\t\t\tf.fn()\n\t\t\tf.done <- struct{}{}\n\t\tcase <-done:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (o *StdOutOutput) Run(batch []*event.Event) {\n\tfor _, event := range batch {\n\t\tif event != nil {\n\t\t\tfmt.Printf(\"%s\\n\", o.config.codec.Encode(event))\n\t\t}\n\t}\n\to.next.Run(batch)\n}", "func (g *Gossiper) Run(ctx context.Context) {\n\tsths := make(chan sthInfo, g.bufferSize)\n\n\tvar wg sync.WaitGroup\n\twg.Add(len(g.srcs))\n\tfor _, src := range g.srcs {\n\t\tgo func(src *sourceLog) {\n\t\t\tdefer wg.Done()\n\t\t\tglog.Infof(\"starting Retriever(%s)\", src.Name)\n\t\t\tsrc.Retriever(ctx, g, sths)\n\t\t\tglog.Infof(\"finished Retriever(%s)\", src.Name)\n\t\t}(src)\n\t}\n\tglog.Info(\"starting Submitter\")\n\tg.Submitter(ctx, sths)\n\tglog.Info(\"finished Submitter\")\n\n\t// Drain the sthInfo channel during shutdown so the Retrievers don't block on it.\n\tgo func() {\n\t\tfor info := range sths {\n\t\t\tglog.V(1).Infof(\"discard STH from %s\", info.name)\n\t\t}\n\t}()\n\n\twg.Wait()\n\tclose(sths)\n}", "func (s *SubscriberInstance) mainSub() {\n\tfmt.Printf(\"Subscriber started.\\n\")\n\n\tgo s.RecordRecvTimes()\n\n\t// Add topics this subscriber is interested in.\n\tfor _, topic := range topics {\n\t\ts.subscriber.AddTopic(topic)\n\t}\n\n\t// Add broker information\n\tfor i, key := range brokerKeys {\n\t\tid := uint64(i)\n\t\ts.subscriber.AddBroker(id, brokerAddresses[id], []byte(key))\n\t}\n\n\t// Add the chain path\n\ts.subscriber.AddChainPath(rChain)\n\n\tgo s.subscriber.Start()\n\n\tfor {\n\t\tselect {\n\t\tcase pub := <-s.subscriber.ToUserPubCh:\n\t\t\t// Record the time received\n\t\t\ts.recordTimesCh <- common.RecordTime{\n\t\t\t\tPublisherID: pub.PublisherID,\n\t\t\t\tPublicationID: pub.PublicationID,\n\t\t\t\tTime: time.Now().UnixNano(),\n\t\t\t}\n\t\t}\n\t}\n}", "func (b *FromWebsocket) Run() {\n\tvar ws *websocket.Conn\n\tvar url string\n\tto, _ := time.ParseDuration(\"10s\")\n\tvar handshakeDialer = &websocket.Dialer{\n\t\tSubprotocols: []string{\"p1\", \"p2\"},\n\t\tHandshakeTimeout: to,\n\t}\n\tlistenWS := make(blocks.MsgChan)\n\twsHeader := http.Header{\"Origin\": {\"http://localhost/\"}}\n\n\ttoOut := make(blocks.MsgChan)\n\ttoError := make(chan error)\n\n\tfor {\n\t\tselect {\n\n\t\tcase msg := <-toOut:\n\t\t\tb.out <- msg\n\n\t\tcase ruleI := <-b.inrule:\n\t\t\tvar err error\n\t\t\t// set a parameter of the block\n\t\t\turl, err = util.ParseString(ruleI, \"url\")\n\t\t\tif err != nil {\n\t\t\t\tb.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif ws != nil {\n\t\t\t\tws.Close()\n\t\t\t}\n\n\t\t\tws, _, err = handshakeDialer.Dial(url, wsHeader)\n\t\t\tif err != nil {\n\t\t\t\tb.Error(\"could not connect to url\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tws.SetReadDeadline(time.Time{})\n\t\t\th := recvHandler{toOut, toError}\n\t\t\tgo h.recv(ws, listenWS)\n\n\t\tcase err := <-toError:\n\t\t\tb.Error(err)\n\n\t\tcase <-b.quit:\n\t\t\t// quit the block\n\t\t\treturn\n\t\tcase o := <-b.queryrule:\n\t\t\to <- map[string]interface{}{\n\t\t\t\t\"url\": url,\n\t\t\t}\n\t\tcase in := <-listenWS:\n\t\t\tb.out <- in\n\t\t}\n\t}\n}", "func Run(delegate Delegate) Task {\n\tt := create()\n\tgo func() {\n\t\tdefer close(t.errChannel)\n\t\tdefer close(t.valueChannel)\n\n\t\tvalue, err := delegate()\n\t\tif err != nil {\n\t\t\tt.errChannel <- err\n\t\t} else {\n\t\t\tt.valueChannel <- value\n\t\t}\n\t}()\n\treturn t\n}", "func (nn *NetNode) Run() {\n\tbuf := make([]byte, Cfg.BufferSize)\n\tfor {\n\t\tnn.ConnLock.Lock()\n\t\tif nn.Conn == nil {\n\t\t\tnn.ConnLock.Unlock()\n\t\t\tbreak\n\t\t}\n\t\trsize, err := nn.Conn.Read(buf)\n\t\tnn.ConnLock.Unlock()\n\t\tif err != nil {\n\t\t\tif Verbose {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tlog.Println(\"Node has disconnected:\", nn.Name)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(\"Error reading connection\", nn.Name, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tnn.ConnLock.Lock()\n\t\t\tnn.tearDownNode()\n\t\t\tnn.ConnLock.Unlock()\n\t\t\tbreak\n\t\t}\n\t\trbuf := make([]byte, rsize)\n\t\tcopy(rbuf, buf)\n\t\tif Verbose {\n\t\t\tif rsize <= 16 {\n\t\t\t\tlog.Println(nn.Name, \"sent\", rsize, \"bytes:\", rbuf, \"fanning out to\", len(nn.Domains), \"domains\")\n\t\t\t} else {\n\t\t\t\tlog.Println(nn.Name, \"sent, fanning out to\", len(nn.Domains), \"domains\")\n\t\t\t}\n\t\t}\n\t\tfor _, dom := range nn.Domains {\n\t\t\tdom.fanoutBuffer(rbuf, nn)\n\t\t}\n\t}\n}", "func (c *Client) Run(stream gpb.GNMI_SubscribeServer) (err error) {\n\tif stream == nil {\n\t\treturn grpc.Errorf(codes.FailedPrecondition, \"cannot start client: stream is nil\")\n\t}\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tc.errors++\n\t\t}\n\t}()\n\n\tquery, err := stream.Recv()\n\tc.recvMsg++\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\treturn grpc.Errorf(codes.Aborted, \"stream EOF received before init\")\n\t\t}\n\t\treturn grpc.Errorf(grpc.Code(err), \"received error from client\")\n\t}\n\n\tlog.V(1).Infof(\"Client %s recieved initial query with go struct : %#v %v\", c, query, query)\n\n\tc.subscribe = query.GetSubscribe()\n\tif c.subscribe == nil {\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"first message must be SubscriptionList: %q\", query)\n\t}\n\n\tswitch mode := c.subscribe.GetMode(); mode {\n\tcase gpb.SubscriptionList_STREAM:\n\t\terr = c.populateDbPath(c.subscribe, true)\n\t\tif err != nil {\n\t\t\treturn grpc.Errorf(codes.InvalidArgument, \"Invalid subscription path: %s %q\", err, query)\n\t\t}\n\t\tc.stop = make(chan struct{}, 1)\n\t\t// Close of stop channel serves as signal to stop subscribDB routine\n\t\tdefer close(c.stop)\n\t\tgo subscribeDb(c)\n\n\tcase gpb.SubscriptionList_POLL:\n\t\terr = c.populateDbPath(c.subscribe, false)\n\t\tif err != nil {\n\t\t\treturn grpc.Errorf(codes.InvalidArgument, \"Invalid subscription path: %s %q\", err, query)\n\t\t}\n\t\tc.polled = make(chan struct{}, 1)\n\t\t// Close of polled channel serves as signal to stop pollDb routine\n\t\tdefer close(c.polled)\n\t\tc.polled <- struct{}{}\n\t\tgo pollDb(c)\n\n\tcase gpb.SubscriptionList_ONCE:\n\t\treturn grpc.Errorf(codes.Unimplemented, \"SubscriptionList_ONCE is not implemented for SONiC gRPC/gNMI yet: %q\", query)\n\tdefault:\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"Unkown subscription mode: %q\", query)\n\t}\n\n\tlog.V(1).Infof(\"Client %s running\", c)\n\tgo c.recv(stream)\n\tc.send(stream)\n\tlog.V(1).Infof(\"Client %s shutdown\", c)\n\n\treturn nil\n}", "func (nm *NodeMonitor) run(sockPath, bpfRoot string) error {\n\tos.Remove(sockPath)\n\tif err := syscall.Mkfifo(sockPath, 0600); err != nil {\n\t\treturn fmt.Errorf(\"Unable to create named pipe %s: %s\", sockPath, err)\n\t}\n\n\tdefer os.Remove(sockPath)\n\n\tpipe, err := os.OpenFile(sockPath, os.O_RDWR, 0600)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to open named pipe for writing: %s\", err)\n\t}\n\n\tdefer pipe.Close()\n\n\tnm.pipeLock.Lock()\n\tnm.pipe = pipe\n\tnm.pipeLock.Unlock()\n\n\tnm.Launcher.SetArgs([]string{\"--bpf-root\", bpfRoot})\n\tif err := nm.Launcher.Run(); err != nil {\n\t\treturn err\n\t}\n\tmetrics.SubprocessStart.WithLabelValues(targetName).Inc()\n\n\tr := bufio.NewReader(nm.GetStdout())\n\tfor nm.GetProcess() != nil {\n\t\tl, err := r.ReadBytes('\\n') // this is a blocking read\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to read stdout from monitor: %s\", err)\n\t\t}\n\n\t\tvar tmp *models.MonitorStatus\n\t\tif err := json.Unmarshal(l, &tmp); err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to unmarshal stdout from monitor: %s\", err)\n\t\t}\n\n\t\tnm.setState(tmp)\n\t}\n\n\treturn fmt.Errorf(\"Monitor process quit unexepctedly\")\n}", "func (s *server) Stream(in *tt.Empty, stream tt.TamTam_StreamServer) error {\n\tch := make(chan []byte)\n\tctx := stream.Context()\n\tutil.AddBroadcastChannel(ctx, ch)\n\tdefer util.RemoveBroadcastChannel(ctx)\n\tdefer log.Info().Msg(\"Broadcast listener went away\")\n\tfor {\n\t\tselect {\n\t\tcase v := <-ch:\n\t\t\tlog.Debug().Msgf(\"Streaming %d bytes to subscriber\", len(v))\n\t\t\tif err := stream.Send(&tt.Message{Bytes: v}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (s *Converter) Run() {\n\tr := reader.NewFileReader(s.fileName, s.allowEmpty, s.anyHeader)\n\tif err := r.Validate(); err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\n\tif err := r.Read(); err != nil {\n\t\tlogrus.Fatalf(\"failed to open %v. %v\", s.fileName, err)\n\t}\n}", "func (w *WorkerContainer) Run(ctx context.Context, meta *scriptpb.RunMeta, chunkReader *common.ChunkReader) (<-chan interface{}, error) {\n\tworkerStream, err := w.Worker.scriptCli.Run(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Send meta header.\n\tif err := workerStream.Send(&scriptpb.RunRequest{\n\t\tValue: &scriptpb.RunRequest_Meta{\n\t\t\tMeta: meta,\n\t\t},\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Send chunks.\n\tif chunkReader != nil {\n\t\tvar chunk *scriptpb.RunChunk\n\n\t\tfor {\n\t\t\tchunk, err = chunkReader.Get()\n\t\t\tif err == io.EOF {\n\t\t\t\terr = nil\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"reading script chunk failed: %w\", err)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif err = workerStream.Send(&scriptpb.RunRequest{\n\t\t\t\tValue: &scriptpb.RunRequest_Chunk{\n\t\t\t\t\tChunk: chunk,\n\t\t\t\t},\n\t\t\t}); err != nil {\n\t\t\t\terr = fmt.Errorf(\"sending script request failed: %w\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif err := workerStream.CloseSend(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tch := make(chan interface{}, 1)\n\n\tgo func() {\n\t\tfor {\n\t\t\trunRes, err := workerStream.Recv()\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tch <- err\n\t\t\t\t}\n\n\t\t\t\tclose(ch)\n\n\t\t\t\treturn\n\t\t\t}\n\t\t\tch <- runRes\n\t\t}\n\t}()\n\n\treturn ch, nil\n}", "func (w *WorkerManager) Run(ctx context.Context, filename string) {\n\tstats.StartTimer()\n\n\tdefer w.close()\n\n\tvar r io.ReadCloser\n\tvar err error\n\n\tif filename == \"-\" {\n\t\tr = os.Stdin\n\t} else {\n\t\tr, err = os.Open(filename)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdefer r.Close()\n\t}\n\n\tw.produceWithScanner(ctx, r)\n}", "func (rw *ReadWorker) Run(reader *bufio.Reader) {\n\tisLastBufferSeparated := false\n\tlastWriteBytes := 0\n\tfor {\n\t\tbuf := make([]byte, readBufferSize)\n\t\tn, err := reader.Read(buf)\n\t\tif err == io.EOF {\n\t\t\tif n == 0 {\n\t\t\t\tif !isLastBufferSeparated && lastWriteBytes != 0 {\n\t\t\t\t\trw.sendSpool(1, []byte{separator})\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\trw.sendSpool(n, buf)\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif n != 0 {\n\t\t\tisLastBufferSeparated = buf[n-1] == separator\n\t\t\tlastWriteBytes = n\n\t\t\trw.sendSpool(n, buf)\n\t\t}\n\t}\n}", "func Run(run func()) {\n\t// Note: Initializing global `callQueue`. This is potentially unsafe, as `callQueue` might\n\t// have been already initialized.\n\t// TODO(yarcat): Decide whether we should panic at this point or do something else.\n\tcallQueue = make(chan func())\n\n\ttasks := make(chan func())\n\tdone := make(chan struct{})\n\tgo transferTasks(tasks, done)\n\n\tgo func() {\n\t\trun()\n\t\tclose(done) // `close` broadcasts it to all receivers.\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase f := <-tasks:\n\t\t\tf()\n\t\tcase <-done:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (this *Connection) run() {\n\tgo this.routineMain()\n}", "func (p *Producer) Run() {\n\tp.wg.Add(1)\n\tdefer p.wg.Done()\n\n\tsendMsg := func(routingKey string, data []byte) {\n\t\ttimeStamp := time.Now()\n\t\terr := p.rabbitChannel.Publish(\n\t\t\tp.rabbitExchange,\n\t\t\troutingKey,\n\t\t\tfalse,\n\t\t\tfalse,\n\t\t\tamqp.Publishing{\n\t\t\t\tDeliveryMode: amqp.Persistent,\n\t\t\t\tTimestamp: timeStamp,\n\t\t\t\tContentType: \"text/plain\",\n\t\t\t\tBody: data,\n\t\t\t})\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error publishing %s\", string(data))\n\t\t\tp.writeFailure(\n\t\t\t\tfmt.Sprintf(\"%s/%s-%d.txt\",\n\t\t\t\t\tp.failureDir,\n\t\t\t\t\troutingKey,\n\t\t\t\t\ttimeStamp.UnixNano()),\n\t\t\t\tdata)\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase event := <-p.eventsChan:\n\t\t\tsendMsg(\"raw_events\", event)\n\t\tcase meter := <-p.metersChan:\n\t\t\tsendMsg(\"raw_meters\", meter)\n\t\tcase <-p.quitChan:\n\t\t\tp.rabbitChannel.Close()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (pf *PolygonFetcher) Run() {\n\tapi.SetAPIKey(pf.config.APIKey)\n\n\tif pf.config.BaseURL != \"\" {\n\t\tapi.SetBaseURL(pf.config.BaseURL)\n\t}\n\n\tif pf.config.NatsServers != \"\" {\n\t\tapi.SetNatsServers(pf.config.NatsServers)\n\t}\n\n\tgo pf.workBackfill()\n\n\tif err := api.Stream(pf.streamHandler, pf.config.Symbols); err != nil {\n\t\tglog.Fatalf(\"nats streaming error (%v)\", err)\n\t}\n\n\tselect {}\n}", "func (gc *TemplateCommand) Run(args []string, w io.Writer) error {\n\tif len(args) == 0 {\n\t\treturn errExpectedSubcommand\n\t}\n\n\tsubcommand := args[0]\n\tswitch subcommand {\n\tcase \"list\":\n\t\treturn gc.list(w)\n\tcase \"copy\":\n\t\treturn gc.fetch(w, os.WriteFile, args[1:])\n\t}\n\treturn errUnknownSubcommand\n}", "func (_LvRecordableStream *LvRecordableStreamFilterer) WatchRunCreate(opts *bind.WatchOpts, sink chan<- *LvRecordableStreamRunCreate) (event.Subscription, error) {\n\n\tlogs, sub, err := _LvRecordableStream.contract.WatchLogs(opts, \"RunCreate\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(LvRecordableStreamRunCreate)\n\t\t\t\tif err := _LvRecordableStream.contract.UnpackLog(event, \"RunCreate\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (d *Dumper) Run(ctx context.Context) error {\n\tdefer d.consumer.Close()\n\tdefer d.storage.Close()\n\n\tvar total, saved int\n\tvar firstMsg, lastMsg time.Time\n\tlastLog := time.Now()\n\n\tlogStats := func() {\n\t\tlog.Infof(\n\t\t\t\"Read messages from %s to %s (total %d, saved %d)\",\n\t\t\tfirstMsg.Local().Format(\"2006-01-02 15:04:05\"),\n\t\t\tlastMsg.Local().Format(\"2006-01-02 15:04:05\"),\n\t\t\ttotal, saved,\n\t\t)\n\t\tfirstMsg = time.Time{}\n\t\tlastMsg = time.Time{}\n\t\tlastLog = time.Now()\n\t}\n\tdefer logStats()\n\n\tfor {\n\t\tif ctx.Err() != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tif time.Since(lastLog) > d.logPeriod {\n\t\t\tlogStats()\n\t\t}\n\n\t\tmsg, err := d.consumer.Read(ctx)\n\t\tif err == context.Canceled {\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to read message: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\ttotal++\n\n\t\tif msg.time.Before(firstMsg) || firstMsg.IsZero() {\n\t\t\tfirstMsg = msg.time\n\t\t}\n\t\tif msg.time.After(lastMsg) || lastMsg.IsZero() {\n\t\t\tlastMsg = msg.time\n\t\t}\n\n\t\tif !d.filter.Check(msg) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := d.storage.Save(msg); err != nil {\n\t\t\treturn fmt.Errorf(\"save message: %v\", err)\n\t\t}\n\t\tsaved++\n\t}\n}", "func (bt *Netsamplebeat) Run(b *beat.Beat) error {\n\tvar device string\n\tif isOffline() {\n\t\tdevice = *pcapFile\n\t} else {\n\t\tdevice = bt.resolveInterface(bt.config.Interface.Device)\n\t}\n\n\tlogp.Info(\"Sampling on device '%s' with sampling rate %d.\", device, bt.config.Interface.SampleRate)\n\n\tlogp.Info(\"netsamplebeat is running! Hit CTRL-C to stop it.\")\n\tbt.client = b.Publisher.Connect()\n\n\tvar handle *pcap.Handle\n\tvar err error\n\n\tif isOffline() {\n\t\thandle, err = pcap.OpenOffline(*pcapFile)\n\t} else {\n\t\thandle, err = pcap.OpenLive(device, snapshotLen, promiscuous, timeout)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer handle.Close()\n\n\tbpfInstructions, err := bt.prepareBpfFilter(handle)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = handle.SetBPFInstructionFilter(bpfInstructions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpacketChan := gopacket.NewPacketSource(handle, handle.LinkType()).Packets()\n\n\tfor {\n\t\tselect {\n\t\tcase <-bt.done:\n\t\t\treturn nil\n\t\tcase packet := <-packetChan:\n\t\t\tif packet == nil {\n\t\t\t\tlogp.Info(\"End of packet stream reached, exit\")\n\t\t\t\tt := time.NewTimer(2 * time.Second)\n\t\t\t\tselect {\n\t\t\t\tcase <-bt.done:\n\t\t\t\tcase <-t.C:\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tbt.client.PublishEvent(bt.handlePacket(b, packet))\n\t\t\tlogp.Info(\"Event sent\")\n\t\t}\n\t}\n}", "func (c *Collector) Run() {\n\ttick := time.NewTicker(c.interval)\n\tdefer tick.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-c.done:\n\t\t\treturn\n\t\tcase <-tick.C:\n\t\t\tc.emitStats()\n\t\t}\n\t}\n}", "func (r *serverOneDotOne) Run() {\n\tr.state = RstateRunning\n\n\t// event handling is a NOP in this model\n\trxcallback := func(ev EventInterface) int {\n\t\tassert(r == ev.GetTarget())\n\t\tlog(LogVV, \"proc-ed\", ev.String())\n\t\treturn 0\n\t}\n\n\tgo func() {\n\t\tfor r.state == RstateRunning {\n\t\t\tr.receiveEnqueue()\n\t\t\ttime.Sleep(time.Microsecond)\n\t\t\tr.processPendingEvents(rxcallback)\n\t\t}\n\t}()\n}", "func (_LvRecordableStream *LvRecordableStreamFilterer) WatchRunFinalize(opts *bind.WatchOpts, sink chan<- *LvRecordableStreamRunFinalize) (event.Subscription, error) {\n\n\tlogs, sub, err := _LvRecordableStream.contract.WatchLogs(opts, \"RunFinalize\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(LvRecordableStreamRunFinalize)\n\t\t\t\tif err := _LvRecordableStream.contract.UnpackLog(event, \"RunFinalize\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (_LvRecordableStream *LvRecordableStreamTransactor) RunStatusChange(opts *bind.TransactOpts, proposed_status_code *big.Int) (*types.Transaction, error) {\n\treturn _LvRecordableStream.contract.Transact(opts, \"runStatusChange\", proposed_status_code)\n}", "func (a *ServerQueryAPI) Run(parentContext context.Context) error {\n\tctx, ctxCancel := context.WithCancel(parentContext)\n\tdefer ctxCancel()\n\tgo a.readPump(ctx)\n\tgo func() {\n\t\ta.eventListenersMtx.Lock()\n\t\tfor _, list := range a.eventListeners {\n\t\t\tclose(list)\n\t\t}\n\t\ta.eventListeners = nil\n\t\ta.eventListenersMtx.Unlock()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn context.Canceled\n\t\tcase cmd := <-a.commandQueue:\n\t\t\tres, err := a.submitCommand(ctx, cmd)\n\t\t\tcmd.result = res\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn context.Canceled\n\t\t\tcase cmd.doneCh <- err:\n\t\t\t}\n\t\tcase env, ok := <-a.readQueue:\n\t\t\tif !ok {\n\t\t\t\tif a.readError == nil {\n\t\t\t\t\treturn context.Canceled\n\t\t\t\t}\n\t\t\t\treturn a.readError\n\t\t\t}\n\t\t\ta.processEvent(ctx, env)\n\t\t}\n\t}\n}", "func (s *Consumer) Run() error {\n\t// check queue status\n\tselect {\n\tcase <-s.stop:\n\t\treturn ErrQueueShutdown\n\tdefault:\n\t}\n\n\tfor task := range s.taskQueue {\n\t\tvar data Job\n\t\t_ = json.Unmarshal(task.Bytes(), &data)\n\t\tif v, ok := task.(Job); ok {\n\t\t\tif v.Task != nil {\n\t\t\t\tdata.Task = v.Task\n\t\t\t}\n\t\t}\n\t\tif err := s.handle(data); err != nil {\n\t\t\ts.logger.Error(err.Error())\n\t\t}\n\t}\n\treturn nil\n}", "func RunRule(r *Rulebase, shardId string, outbound []byte) (bool, error){\n\tsuccess, err := (*r.Destination.Plugin).Publish(r.Destination.Pointer, outbound)\n\n\treturn success, err\n}", "func Run() {\n\tloading.Prefix = loadingMsgProcess\n\tloading.Start()\n\n\tvar review []*Review\n\n\tlineCh := make(chan string, 100)\n\n\tgo readFile(inputFile, lineCh)\n\n\tfor line := range lineCh {\n\t\tindex := getIndexPosition(line, delimiter)\n\t\tif index != -1 {\n\t\t\tr, err := unmarshal(line[index:])\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"%s => %s\", errorMsgUnmarshal, err)\n\t\t\t}\n\t\t\treview = append(review, r)\n\t\t}\n\t}\n\tloading.Stop()\n\tloading.Prefix = loadingMsgWrite\n\tloading.FinalMSG = loadingMsgComplete\n\tloading.Start()\n\n\tif err := writeOut(review, outputFile); err != nil {\n\t\tlog.Fatalf(\"%s => %s\", errorMsgWriteOut, err)\n\t}\n\tloading.Stop()\n}", "func (self *Server) Run(quit chan int) {\n\n\t//takes in raw events and push the onto channel\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\n\t\t\tcase e <- RawEventIn:\n\t\t\t\t{\n\t\t\t\t\terr := CheckMsgAuth(e)\n\t\t\t\t\tif err == nil {\n\n\t\t\t\t\t\te.EventResponse <- \"error invalid message\"\n\t\t\t\t\t\tbreak //ignore messages that are invalid\n\t\t\t\t\t\t//return error to event channel\n\t\t\t\t\t}\n\t\t\t\t\terr = self.RawEventToEvent(e) //push onto event loops\n\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t//main:\n\n\t}()\n}", "func (p *Printer) run() {\n\tdefer close(p.ch)\n\tconn, err := p.ln.Accept()\n\tif err != nil {\n\t\treturn\n\t}\n\tp.conn = conn\n\n\t// If Close() has been called, close the connection.\n\tif atomic.SwapInt32(&p.state, 2) == 1 {\n\t\tconn.Close()\n\t\treturn\n\t}\n\n\tdata, err := ioutil.ReadAll(conn)\n\tif err != nil {\n\t\treturn\n\t}\n\tp.ch <- data\n}", "func (_LvRecordableStream *LvRecordableStreamFilterer) FilterRunCreate(opts *bind.FilterOpts) (*LvRecordableStreamRunCreateIterator, error) {\n\n\tlogs, sub, err := _LvRecordableStream.contract.FilterLogs(opts, \"RunCreate\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &LvRecordableStreamRunCreateIterator{contract: _LvRecordableStream.contract, event: \"RunCreate\", logs: logs, sub: sub}, nil\n}", "func (s *Session) Run() {\n\tgo s.run()\n}", "func (c *Client) Sub(name string, args ...interface{}) (chan string, error) {\n\n\tif args == nil {\n\t\tlog.Println(\"no args passed\")\n\t\tif err := c.ddp.Sub(name); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tif err := c.ddp.Sub(name, args[0], false); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tmsgChannel := make(chan string, default_buffer_size)\n\tc.ddp.CollectionByName(\"stream-room-messages\").AddUpdateListener(genericExtractor{msgChannel, \"update\"})\n\n\treturn msgChannel, nil\n}", "func (bg *Backgrounder) Run(f Handler) {\n\tbg.count++\n\tgo func() {\n\t\tbg.pipe <- process{Error: f()}\n\t}()\n}", "func (s *Superstep) Run() {\n\tLog.Print(\"Superstep:Run\")\n\t// go func() { s.runningStep <- 1 }()\n\ts.runningStep <- 1\n\tfor {\n\t\tv, ok := <-s.runningStep\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\n\t\tswitch v {\n\t\tcase 1:\n\t\t\t//traverse slave\n\t\t\ts.SendNextStep()\n\t\tcase 2:\n\t\t\t//\n\t\t\ts.SendSyncRequest()\n\t\tcase 3:\n\t\t\t//is terminate\n\t\t\tif atomic.LoadInt32(&s.counter) > 0 {\n\t\t\t\tatomic.StoreInt32(&s.counter, 0)\n\t\t\t\ts.runningStep <- 1\n\t\t\t} else {\n\t\t\t\t//terminated\n\t\t\t\tclose(s.runningStep)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tprintln(\"End:Run\")\n}", "func (s *SendEventToMeshAndCheckEventId) Run() error {\n\teventId := uuid.New().String()\n\n\terr := s.sendEventToMesh(eventId)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = s.checkEventId(eventId)\n\tif err != nil {\n\t\treturn errors.Wrap(err, s.testService.DumpAllReceivedEvents().Error())\n\t}\n\n\treturn nil\n}", "func Run(c *Config, token string, outputDir string) {\n\tclient := connect(c)\n\n\t// Authentication happens by sending the secret token via metadata\n\tmd := metadata.Pairs(shared.SecretKey, c.Secret)\n\n\tstream, err := client.Get(metadata.NewContext(context.Background(), md), &api.GetRequest{\n\t\tToken: token,\n\t})\n\tshared.ExitOnError(err, \"Unable to initiate Receive: %v\", grpc.ErrorDesc(err))\n\n\tfor {\n\t\tres, err := stream.Recv()\n\t\tif err != nil {\n\t\t\tshared.ExitOnError(err, \"Unable to receive data: %v\", grpc.ErrorDesc(err))\n\t\t}\n\t\tfmt.Println(\"received\", res.Type)\n\t}\n}" ]
[ "0.68090886", "0.6607723", "0.62104803", "0.6052246", "0.60397995", "0.6036606", "0.6014039", "0.5855327", "0.584613", "0.5787809", "0.5786557", "0.57808554", "0.57723176", "0.5762391", "0.5720863", "0.5698172", "0.56778085", "0.56413287", "0.56297857", "0.5620973", "0.5596509", "0.55898225", "0.5577841", "0.5564895", "0.5551683", "0.55425894", "0.55403733", "0.5535309", "0.5514597", "0.5497419", "0.54880446", "0.54681516", "0.5465937", "0.54653925", "0.5464412", "0.545952", "0.5458775", "0.5453086", "0.5421377", "0.5399702", "0.5388998", "0.5385961", "0.53854406", "0.5372304", "0.537072", "0.53695863", "0.53607476", "0.53476506", "0.5333166", "0.5330184", "0.532808", "0.5326805", "0.5321262", "0.5320659", "0.5319547", "0.5316912", "0.53099626", "0.5301372", "0.5276689", "0.5260996", "0.5251499", "0.5248596", "0.5245859", "0.5243559", "0.5235635", "0.52350783", "0.52295256", "0.52196723", "0.5217046", "0.52127224", "0.52121127", "0.5198198", "0.5196465", "0.5176604", "0.51456064", "0.514505", "0.51422924", "0.51223665", "0.51105314", "0.5108601", "0.5103311", "0.5090776", "0.5089095", "0.5075238", "0.5065647", "0.5062555", "0.5061524", "0.5059221", "0.50585663", "0.5057978", "0.50575316", "0.5046636", "0.5038651", "0.50325197", "0.50317335", "0.5029729", "0.5026928", "0.50258154", "0.50255966", "0.5023966" ]
0.79116404
0
reconcileStorage will ensure that the storage options for the ArgoCDExport are present.
func (r *ReconcileArgoCDExport) reconcileStorage(cr *argoprojv1a1.ArgoCDExport) error { if cr.Spec.Storage == nil { cr.Spec.Storage = &argoprojv1a1.ArgoCDExportStorageSpec{ Backend: common.ArgoCDExportStorageBackendLocal, } return r.Client.Update(context.TODO(), cr) } // Local storage if err := r.reconcileLocalStorage(cr); err != nil { return err } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *PBM) ResyncStorage(l *log.Event) error {\n\tstg, err := p.GetStorage(l)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to get backup store\")\n\t}\n\n\t_, err = stg.FileStat(StorInitFile)\n\tif errors.Is(err, storage.ErrNotExist) {\n\t\terr = stg.Save(StorInitFile, bytes.NewBufferString(version.DefaultInfo.Version), 0)\n\t}\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"init storage\")\n\t}\n\n\tbcps, err := stg.List(\"\", MetadataFileSuffix)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"get a backups list from the storage\")\n\t}\n\n\terr = p.moveCollection(BcpCollection, BcpOldCollection)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"copy current backups meta from %s to %s\", BcpCollection, BcpOldCollection)\n\t}\n\terr = p.moveCollection(PITRChunksCollection, PITRChunksOldCollection)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"copy current pitr meta from %s to %s\", PITRChunksCollection, PITRChunksOldCollection)\n\t}\n\n\tif len(bcps) == 0 {\n\t\treturn nil\n\t}\n\n\tvar ins []interface{}\n\tfor _, b := range bcps {\n\t\td, err := stg.SourceReader(b.Name)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"read meta for %v\", b.Name)\n\t\t}\n\n\t\tv := BackupMeta{}\n\t\terr = json.NewDecoder(d).Decode(&v)\n\t\td.Close()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"unmarshal backup meta\")\n\t\t}\n\t\terr = checkBackupFiles(&v, stg)\n\t\tif err != nil {\n\t\t\tl.Warning(\"skip snapshot %s: %v\", v.Name, err)\n\t\t\tcontinue\n\t\t}\n\t\tins = append(ins, v)\n\t}\n\t_, err = p.Conn.Database(DB).Collection(BcpCollection).InsertMany(p.ctx, ins)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"insert retrieved backups meta\")\n\t}\n\n\tpitrf, err := stg.List(PITRfsPrefix, \"\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"get list of pitr chunks\")\n\t}\n\tif len(pitrf) == 0 {\n\t\treturn nil\n\t}\n\n\tvar pitr []interface{}\n\tfor _, f := range pitrf {\n\t\t_, err := stg.FileStat(PITRfsPrefix + \"/\" + f.Name)\n\t\tif err != nil {\n\t\t\tl.Warning(\"skip pitr chunk %s/%s because of %v\", PITRfsPrefix, f.Name, err)\n\t\t\tcontinue\n\t\t}\n\t\tchnk := PITRmetaFromFName(f.Name)\n\t\tif chnk != nil {\n\t\t\tpitr = append(pitr, chnk)\n\t\t}\n\t}\n\n\tif len(pitr) == 0 {\n\t\treturn nil\n\t}\n\n\t_, err = p.Conn.Database(DB).Collection(PITRChunksCollection).InsertMany(p.ctx, pitr)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"insert retrieved pitr meta\")\n\t}\n\n\treturn nil\n}", "func configureStorage(dep *appsv1.Deployment, nodeSet v1alpha1.NodeSet) error {\n\tif nuxeoContainer, err := GetNuxeoContainer(dep); err != nil {\n\t\treturn err\n\t} else {\n\t\tfor _, storage := range nodeSet.Storage {\n\t\t\tif volume, err := createVolumeForStorage(storage); err != nil {\n\t\t\t\treturn err\n\t\t\t} else {\n\t\t\t\tvolMnt := createVolumeMountForStorage(storage.StorageType, volume.Name)\n\t\t\t\tenvVar := createEnvVarForStorage(storage.StorageType, volMnt.MountPath)\n\t\t\t\tif err := util.OnlyAddVol(dep, volume); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := util.OnlyAddVolMnt(nuxeoContainer, volMnt); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif envVar != (corev1.EnvVar{}) {\n\t\t\t\t\tif err := util.OnlyAddEnvVar(nuxeoContainer, envVar); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (s *Store) CompactStorage(entityKey string, threshold uint64) (err error) {\n\tvar repoSize, newRepoSize uint64\n\trepoSize, err = s.StorageSize(s.CacheDir)\n\tif err == nil && repoSize > 0 && repoSize > threshold {\n\t\tcslog := slog.WithFieldsF(func() logrus.Fields {\n\t\t\treturn logrus.Fields{\"repoSize\": repoSize, \"threshold\": threshold, \"entityKey\": entityKey}\n\t\t})\n\n\t\tcslog.Debug(\"Local repo size exceeds compaction threshold. Compacting.\")\n\t\tif err = s.compactCacheStorage(entityKey, threshold); err != nil {\n\t\t\treturn\n\t\t}\n\t\tnewRepoSize, err = s.StorageSize(s.CacheDir)\n\t\tif nil != err {\n\t\t\treturn\n\t\t}\n\n\t\tcslog.WithFieldsF(func() logrus.Fields {\n\t\t\tsavedPct := (float64(repoSize-newRepoSize) / float64(repoSize)) * 100\n\t\t\treturn logrus.Fields{\"newRepoSize\": newRepoSize, \"savedPercentage\": savedPct}\n\t\t}).Debug(\"Local repo compacted.\")\n\n\t\terr = s.SaveState()\n\t}\n\treturn\n}", "func (o ArgoCDExportSpecOutput) Storage() ArgoCDExportSpecStoragePtrOutput {\n\treturn o.ApplyT(func(v ArgoCDExportSpec) *ArgoCDExportSpecStorage { return v.Storage }).(ArgoCDExportSpecStoragePtrOutput)\n}", "func (r *MetadataRestoreReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tctx := context.Background()\n\n\tvar mr kubedrv1alpha1.MetadataRestore\n\tif err := r.Get(ctx, req.NamespacedName, &mr); err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\t// we'll ignore not-found errors, since they can't be fixed by an immediate\n\t\t\t// requeue (we'll need to wait for a new notification).\n\t\t\tr.Log.Info(\"MetadataRestore (\" + req.NamespacedName.Name + \") is not found\")\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\n\t\tr.Log.Error(err, \"unable to fetch MetadataRestore\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Skip if spec hasn't changed. This check prevents reconcile on status\n\t// updates.\n\tif mr.Status.ObservedGeneration == mr.ObjectMeta.Generation {\n\t\tr.Log.Info(\"Skipping reconcile as generation number hasn't changed\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// No deletion logic as we don't really have anything to do during\n\t// deletion of a MetadataRestore resource.\n\n\t// Check annotations to see if this resource was already processed\n\t// and restore was successful.\n\trestoreAnnotation := \"restored.annotations.kubedr.catalogicsoftware.com\"\n\n\trestored, exists := mr.ObjectMeta.Annotations[restoreAnnotation]\n\tif exists && (restored == \"true\") {\n\t\t// No need to process the resource as restore was done already.\n\t\tr.Log.Info(\"Restore was already done\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// We are deliberately avoiding any attempt to make the name unique.\n\t// The client is in a better position to come up with a unique name.\n\t// If we do switch to generating a unique name, we need to make sure\n\t// that any previous pods are cleaned up.\n\tpodName := mr.Name + \"-mr\"\n\n\t// Since we don't generate a unique name for the pod that initializes the repo,\n\t// we need to explicitly check and delete the pod if it exists.\n\tvar prevPod corev1.Pod\n\tif err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: podName}, &prevPod); err == nil {\n\t\tr.Log.Info(\"Found a previous restore pod, will delete it and continue...\")\n\t\tif err := r.Delete(ctx, &prevPod); ignoreNotFound(err) != nil {\n\t\t\tr.Log.Error(err, \"Error in deleting init pod\")\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\tpod, err := r.buildRestorePod(&mr, req.Namespace, podName)\n\tif err != nil {\n\t\tr.Log.Error(err, \"Error in creating restore pod\")\n\t\tif apierrors.IsNotFound(err) {\n\t\t\t// This shouldn't really happen but if an invalid MBR is given or\n\t\t\t// if backup location inside the MBR is wrong, there is nothing we can\n\t\t\t// do.\n\t\t\tr.setStatus(&mr, \"Failed\",\n\t\t\t\tfmt.Sprintf(\"Error in creating restore pod, reason (%s)\", err.Error()))\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif err := ctrl.SetControllerReference(&mr, pod, r.Scheme); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tr.Log.Info(\"Starting a new Pod\", \"Pod.Namespace\", pod.Namespace, \"Pod.Name\", pod.Name)\n\terr = r.Create(ctx, pod)\n\tif err != nil {\n\t\tr.Log.Error(err, \"Error in starting restore pod\")\n\t\tr.setStatus(&mr, \"Failed\", err.Error())\n\t\treturn ctrl.Result{}, err\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (r *deletedReconciler) Reconcile(ctx context.Context, in *marketplace.OperatorSource) (out *marketplace.OperatorSource, nextPhase *marketplace.Phase, err error) {\n\tout = in\n\n\t// Delete the operator source manifests.\n\tr.datastore.RemoveOperatorSource(out.UID)\n\n\t// Delete the owned CatalogSourceConfig\n\terr = r.deleteCreatedResources(ctx, in.Name, in.Namespace)\n\tif err != nil {\n\t\t// Something went wrong before we removed the finalizer, let's retry.\n\t\tnextPhase = phase.GetNextWithMessage(in.Status.CurrentPhase.Name, err.Error())\n\t\treturn\n\t}\n\n\t// Remove the opsrc finalizer from the object.\n\tout.RemoveFinalizer()\n\n\t// Update the client. Since there is no phase shift, the transitioner\n\t// will not update it automatically like the normal phases.\n\terr = r.client.Update(context.TODO(), out)\n\tif err != nil {\n\t\t// An error happened on update. If it was transient, we will retry.\n\t\t// If not, and the finalizer was removed, then the delete will clean\n\t\t// the object up anyway. Let's set the next phase for a possible retry.\n\t\tnextPhase = phase.GetNextWithMessage(in.Status.CurrentPhase.Name, err.Error())\n\t\treturn\n\t}\n\n\tr.logger.Info(\"Finalizer removed, now garbage collector will clean it up.\")\n\n\treturn\n}", "func (r *BackupLocationReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tctx := context.Background()\n\tlog := r.Log.WithValues(\"backuplocation\", req.NamespacedName)\n\n\tvar backupLoc kubedrv1alpha1.BackupLocation\n\tif err := r.Get(ctx, req.NamespacedName, &backupLoc); err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\t// we'll ignore not-found errors, since they can't be fixed by an immediate\n\t\t\t// requeue (we'll need to wait for a new notification).\n\t\t\tlog.Info(\"BackupLocation (\" + req.NamespacedName.Name + \") is not found\")\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\n\t\tlog.Error(err, \"unable to fetch BackupLocation\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Skip if spec hasn't changed. This check prevents reconcile on status\n\t// updates.\n\tif backupLoc.Status.ObservedGeneration == backupLoc.ObjectMeta.Generation {\n\t\tr.Log.Info(\"Skipping reconcile as generation number hasn't changed\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tfinalizer := \"backuplocation.finalizers.kubedr.catalogicsoftware.com\"\n\n\tif backupLoc.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\t// The object is not being deleted, so if it does not have our finalizer,\n\t\t// then lets add the finalizer and update the object. This is equivalent\n\t\t// to registering our finalizer.\n\t\tif !containsString(backupLoc.ObjectMeta.Finalizers, finalizer) {\n\t\t\tbackupLoc.ObjectMeta.Finalizers = append(backupLoc.ObjectMeta.Finalizers, finalizer)\n\t\t\tif err := r.Update(context.Background(), &backupLoc); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// The object is being deleted\n\t\tif containsString(backupLoc.ObjectMeta.Finalizers, finalizer) {\n\t\t\t// our finalizer is present, handle any pre-deletion logic here.\n\n\t\t\t// remove our finalizer from the list and update it.\n\t\t\tbackupLoc.ObjectMeta.Finalizers = removeString(backupLoc.ObjectMeta.Finalizers, finalizer)\n\n\t\t\tif err := r.Update(context.Background(), &backupLoc); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t}\n\n\t\t// Nothing more to do for DELETE.\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// Check annotations to see if repo is already initialized.\n\t// Ideally, we should check the repo itself to confirm that it is\n\t// initialized, instead of depending on annotation.\n\tinitAnnotation := \"initialized.annotations.kubedr.catalogicsoftware.com\"\n\n\tinitialized, exists := backupLoc.ObjectMeta.Annotations[initAnnotation]\n\tif exists && (initialized == \"true\") {\n\t\t// No need to initialize the repo.\n\t\tlog.Info(\"Repo is already initialized\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// Annotation doesn't exist so we need to initialize the repo.\n\n\tinitPodName := backupLoc.Name + \"-init-pod\"\n\n\t// Since we don't generate a unique name for the pod that initializes the repo,\n\t// we need to explicitly check and delete the pod if it exists. We may eventually\n\t// use a unique name but that will also require cleanup of old pods.\n\tvar pod corev1.Pod\n\tif err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: initPodName}, &pod); err == nil {\n\t\tlog.Info(\"Found init pod, will delete it and continue...\")\n\t\tif err := r.Delete(ctx, &pod); ignoreNotFound(err) != nil {\n\t\t\tlog.Error(err, \"Error in deleting init pod\")\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\tr.setStatus(&backupLoc, \"Initializing\", \"\")\n\n\t// Initialize the repo.\n\tinitPod, err := buildResticRepoInitPod(&backupLoc, log)\n\tif err != nil {\n\t\tlog.Error(err, \"Error in creating init pod\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif err := ctrl.SetControllerReference(&backupLoc, initPod, r.Scheme); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tlog.Info(\"Starting a new Pod\", \"Pod.Namespace\", initPod.Namespace, \"Pod.Name\", initPod.Name)\n\terr = r.Create(ctx, initPod)\n\tif err != nil {\n\t\tr.Log.Error(err, \"Error in starting init pod\")\n\t\tr.setStatus(&backupLoc, \"Failed\", err.Error())\n\t\treturn ctrl.Result{}, err\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (c *Container) cleanupStorage() error {\n\tif !c.state.Mounted {\n\t\t// Already unmounted, do nothing\n\t\treturn nil\n\t}\n\n\tfor _, mount := range c.config.Mounts {\n\t\tif err := unix.Unmount(mount, unix.MNT_DETACH); err != nil {\n\t\t\tif err != syscall.EINVAL {\n\t\t\t\tlogrus.Warnf(\"container %s failed to unmount %s : %v\", c.ID(), mount, err)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Also unmount storage\n\tif err := c.runtime.storageService.UnmountContainerImage(c.ID()); err != nil {\n\t\treturn errors.Wrapf(err, \"error unmounting container %s root filesystem\", c.ID())\n\t}\n\n\tc.state.Mountpoint = \"\"\n\tc.state.Mounted = false\n\n\treturn c.save()\n}", "func (p *BlackduckPatcher) patchStorage() error {\n\tfor k, v := range p.mapOfUniqueIdToBaseRuntimeObject {\n\t\tswitch v.(type) {\n\t\tcase *corev1.PersistentVolumeClaim:\n\t\t\tif !p.blackDuckCr.Spec.PersistentStorage {\n\t\t\t\tdelete(p.mapOfUniqueIdToBaseRuntimeObject, k)\n\t\t\t} else {\n\t\t\t\tif len(p.blackDuckCr.Spec.PVCStorageClass) > 0 {\n\t\t\t\t\tv.(*corev1.PersistentVolumeClaim).Spec.StorageClassName = &p.blackDuckCr.Spec.PVCStorageClass\n\t\t\t\t}\n\t\t\t\tfor _, pvc := range p.blackDuckCr.Spec.PVC {\n\t\t\t\t\tif strings.EqualFold(pvc.Name, v.(*corev1.PersistentVolumeClaim).Name) {\n\t\t\t\t\t\tv.(*corev1.PersistentVolumeClaim).Spec.VolumeName = pvc.VolumeName\n\t\t\t\t\t\tv.(*corev1.PersistentVolumeClaim).Spec.StorageClassName = &pvc.StorageClass\n\t\t\t\t\t\tif quantity, err := resource.ParseQuantity(pvc.Size); err == nil {\n\t\t\t\t\t\t\tv.(*corev1.PersistentVolumeClaim).Spec.Resources.Requests[corev1.ResourceStorage] = quantity\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase *corev1.ReplicationController:\n\t\t\tif !p.blackDuckCr.Spec.PersistentStorage {\n\t\t\t\tfor i := range v.(*corev1.ReplicationController).Spec.Template.Spec.Volumes {\n\t\t\t\t\t// If PersistentVolumeClaim then we change it to emptyDir\n\t\t\t\t\tif v.(*corev1.ReplicationController).Spec.Template.Spec.Volumes[i].VolumeSource.PersistentVolumeClaim != nil {\n\t\t\t\t\t\tv.(*corev1.ReplicationController).Spec.Template.Spec.Volumes[i].VolumeSource = corev1.VolumeSource{\n\t\t\t\t\t\t\tEmptyDir: &corev1.EmptyDirVolumeSource{\n\t\t\t\t\t\t\t\tMedium: corev1.StorageMediumDefault,\n\t\t\t\t\t\t\t\tSizeLimit: nil,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\treturn nil\n}", "func (c *Container) setupStorage() error {\n\tif !c.valid {\n\t\treturn errors.Wrapf(ErrCtrRemoved, \"container %s is not valid\", c.ID())\n\t}\n\n\tif c.state.State != ContainerStateConfigured {\n\t\treturn errors.Wrapf(ErrCtrStateInvalid, \"container %s must be in Configured state to have storage set up\", c.ID())\n\t}\n\n\t// Need both an image ID and image name, plus a bool telling us whether to use the image configuration\n\tif c.config.RootfsImageID == \"\" || c.config.RootfsImageName == \"\" {\n\t\treturn errors.Wrapf(ErrInvalidArg, \"must provide image ID and image name to use an image\")\n\t}\n\n\tcontainerInfo, err := c.runtime.storageService.CreateContainerStorage(c.runtime.imageContext, c.config.RootfsImageName, c.config.RootfsImageID, c.config.Name, c.config.ID, c.config.MountLabel)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"error creating container storage\")\n\t}\n\n\tc.config.StaticDir = containerInfo.Dir\n\tc.state.RunDir = containerInfo.RunDir\n\n\tartifacts := filepath.Join(c.config.StaticDir, artifactsDir)\n\tif err := os.MkdirAll(artifacts, 0755); err != nil {\n\t\treturn errors.Wrapf(err, \"error creating artifacts directory %q\", artifacts)\n\t}\n\n\treturn nil\n}", "func (c *Container) CleanupStorage() error {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\tif err := c.syncContainer(); err != nil {\n\t\treturn err\n\t}\n\treturn c.cleanupStorage()\n}", "func (r *StoreReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tctx := context.Background()\n\tlog := r.Log.WithValues(\"store\", req.NamespacedName)\n\n\tstore := &thanosv1beta1.Store{}\n\tif err := r.Get(ctx, req.NamespacedName, store); err != nil {\n\t\tif ignoreNotFound(err) == nil {\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\tlog.Error(err, \"unable to fetch thanos store\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Generate Service\n\tservice := &corev1.Service{\n\t\tObjectMeta: ctrl.ObjectMeta{\n\t\t\tName: req.Name,\n\t\t\tNamespace: req.Namespace,\n\t\t},\n\t}\n\t_, err := ctrl.CreateOrUpdate(ctx, r.Client, service, func() error {\n\t\tmakeService(service, service.Name)\n\t\treturn controllerutil.SetControllerReference(store, service, r.Scheme)\n\t})\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\t// Generate Deployment\n\tdm := &appsv1.Deployment{\n\t\tObjectMeta: ctrl.ObjectMeta{\n\t\t\tName: req.Name,\n\t\t\tNamespace: req.Namespace,\n\t\t},\n\t}\n\n\t_, err = ctrl.CreateOrUpdate(ctx, r.Client, dm, func() error {\n\t\tsetStoreDeployment(\n\t\t\tdm,\n\t\t\tservice,\n\t\t\t*store,\n\t\t)\n\t\treturn controllerutil.SetControllerReference(store, dm, r.Scheme)\n\t})\n\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Update Status\n\n\treturn ctrl.Result{}, nil\n}", "func (a *Agent) setStorages() error {\n\ta.MetadataStorages = &storage.MetadataStorages{}\n\n\ta.MetadataStorages.ResourcedMaster = storage.NewResourcedMasterMetadataStorage(a.GeneralConfig.ResourcedMaster.URL, a.GeneralConfig.ResourcedMaster.AccessToken)\n\n\ta.Db = storage.NewStorage()\n\n\treturn nil\n}", "func (r *FlowTestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tlogger := log.FromContext(ctx)\n\tlogger.Info(\"Reconciling\")\n\n\tvar flowTest loggingpipelineplumberv1beta1.FlowTest\n\tif err := r.Get(ctx, req.NamespacedName, &flowTest); err != nil {\n\t\t// all the resources are already deleted\n\t\tif apierrors.IsNotFound(err) {\n\t\t\t// Remove if log aggregator is still running\n\t\t\tif err := r.cleanUpOutputResources(ctx); client.IgnoreNotFound(err) != nil {\n\t\t\t\treturn ctrl.Result{Requeue: true}, err\n\t\t\t}\n\t\t\treturn ctrl.Result{Requeue: false}, nil\n\t\t}\n\t\tlogger.Error(err, \"failed to get the flowtest\")\n\t\treturn ctrl.Result{Requeue: false}, err\n\t}\n\n\tctx = context.WithValue(ctx, \"flowTest\", flowTest)\n\n\t// name of our custom finalizer\n\tfinalizerName := \"flowtests.loggingpipelineplumber.isala.me/finalizer\"\n\t// examine DeletionTimestamp to determine if object is under deletion\n\tif !flowTest.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\tif err := r.deleteResources(ctx, finalizerName); err != nil {\n\t\t\t// if fail to delete the external dependency here, return with error\n\t\t\t// so that it can be retried\n\t\t\treturn ctrl.Result{Requeue: true}, err\n\t\t}\n\t\t// Stop reconciliation as the item is being deleted\n\t\treturn ctrl.Result{Requeue: false}, nil\n\t}\n\n\tif flowTest.ObjectMeta.Name == \"\" {\n\t\tlogger.V(-1).Info(\"flowtest without a name queued\")\n\t\treturn ctrl.Result{Requeue: false}, nil\n\t}\n\n\t// Reconcile depending on status\n\tswitch flowTest.Status.Status {\n\n\t// This will run only at first iteration\n\tcase \"\":\n\t\t// Set the finalizer\n\t\tcontrollerutil.AddFinalizer(&flowTest, finalizerName)\n\t\tif err := r.Update(ctx, &flowTest); err != nil {\n\t\t\tlogger.Error(err, \"failed to add finalizer\")\n\t\t\treturn ctrl.Result{Requeue: true}, err\n\t\t}\n\t\t// Set the status\n\t\tflowTest.Status.Status = loggingpipelineplumberv1beta1.Created\n\t\tif err := r.Status().Update(ctx, &flowTest); err != nil {\n\t\t\tlogger.Error(err, \"failed to set status as created\")\n\t\t\treturn ctrl.Result{Requeue: true}, err\n\t\t}\n\t\tr.Recorder.Event(&flowTest, v1.EventTypeNormal, EventReasonProvision, \"moved to created state\")\n\t\treturn ctrl.Result{Requeue: true}, nil\n\n\tcase loggingpipelineplumberv1beta1.Created:\n\t\tif err := r.provisionResource(ctx); err != nil {\n\t\t\tr.Recorder.Event(&flowTest, v1.EventTypeWarning, EventReasonProvision, fmt.Sprintf(\"error while provision flow resources: %s\", err.Error()))\n\t\t\treturn ctrl.Result{Requeue: true}, r.setErrorStatus(ctx, err)\n\t\t}\n\t\tr.Recorder.Event(&flowTest, v1.EventTypeNormal, EventReasonProvision, \"all the need resources were scheduled\")\n\t\t// Give 1 minute to resource to provisioned\n\t\treturn ctrl.Result{RequeueAfter: time.Minute}, nil\n\n\tcase loggingpipelineplumberv1beta1.Running:\n\t\tfiveMinuteAfterCreation := flowTest.CreationTimestamp.Add(5 * time.Minute)\n\t\t// Timeout or all test are passing\n\t\tif time.Now().After(fiveMinuteAfterCreation) || allTestPassing(flowTest.Status) {\n\t\t\tflowTest.Status.Status = loggingpipelineplumberv1beta1.Completed\n\t\t\tif err := r.Status().Update(ctx, &flowTest); err != nil {\n\t\t\t\tlogger.Error(err, \"failed to set status as completed\")\n\t\t\t\treturn ctrl.Result{Requeue: true}, nil\n\t\t\t}\n\t\t\treturn ctrl.Result{RequeueAfter: 1 * time.Second}, nil\n\t\t}\n\n\t\tlogger.V(1).Info(\"checking log indexes\")\n\t\terr := r.checkForPassingFlowTest(ctx)\n\t\tif err != nil {\n\t\t\tr.Recorder.Event(&flowTest, v1.EventTypeWarning, EventReasonReconcile, fmt.Sprintf(\"error while checking log indexes: %s\", err.Error()))\n\t\t}\n\t\treturn ctrl.Result{RequeueAfter: 30 * time.Second}, err\n\n\tcase loggingpipelineplumberv1beta1.Completed:\n\t\tif err := r.deleteResources(ctx, finalizerName); err != nil {\n\t\t\treturn ctrl.Result{Requeue: true}, err\n\t\t}\n\t\tr.Recorder.Event(&flowTest, v1.EventTypeNormal, EventReasonCleanup, \"all the provisioned resources were scheduled to be deleted\")\n\t\treturn ctrl.Result{}, nil\n\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (r *reconciler) Reconcile(ctx context.Context, object runtime.Object) error {\n\tlogger := logging.FromContext(ctx)\n\n\tsource, ok := object.(*sourcesv1alpha1.DriveSource)\n\tif !ok {\n\t\tlogger.Errorf(\"could not find Drive source %v\", object)\n\t\treturn nil\n\t}\n\n\t// See if the source has been deleted.\n\taccessor, err := meta.Accessor(source)\n\tif err != nil {\n\t\tlogger.Warnf(\"Failed to get metadata accessor: %s\", zap.Error(err))\n\t\treturn err\n\t}\n\n\tvar reconcileErr error\n\tif accessor.GetDeletionTimestamp() == nil {\n\t\treconcileErr = r.reconcile(ctx, source)\n\t} else {\n\t\treconcileErr = r.finalize(ctx, source)\n\t}\n\n\treturn reconcileErr\n}", "func (r *ReconcileHiveConfig) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\n\thLog := log.WithField(\"controller\", \"hive\")\n\thLog.Info(\"Reconciling Hive components\")\n\n\t// Fetch the Hive instance\n\tinstance := &hivev1.HiveConfig{}\n\t// NOTE: ignoring the Namespace that seems to get set on request when syncing on namespaced objects,\n\t// when our HiveConfig is ClusterScoped.\n\terr := r.Get(context.TODO(), types.NamespacedName{Name: request.NamespacedName.Name}, instance)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Object not found, return. Created objects are automatically garbage collected.\n\t\t\t// For additional cleanup logic use finalizers.\n\t\t\thLog.Debug(\"HiveConfig not found, deleted?\")\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\thLog.WithError(err).Error(\"error reading HiveConfig\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// We only support one HiveConfig per cluster, and it must be called \"hive\". This prevents installing\n\t// Hive more than once in the cluster.\n\tif instance.Name != hiveConfigName {\n\t\thLog.WithField(\"hiveConfig\", instance.Name).Warn(\"invalid HiveConfig name, only one HiveConfig supported per cluster and must be named 'hive'\")\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\trecorder := events.NewRecorder(r.kubeClient.CoreV1().Events(constants.HiveNamespace), \"hive-operator\", &corev1.ObjectReference{\n\t\tName: request.Name,\n\t\tNamespace: constants.HiveNamespace,\n\t})\n\n\tif r.syncAggregatorCA {\n\t\t// We use the configmap lister and not the regular client which only watches resources in the hive namespace\n\t\taggregatorCAConfigMap, err := r.managedConfigCMLister.ConfigMaps(managedConfigNamespace).Get(aggregatorCAConfigMapName)\n\t\t// If an error other than not found, retry. If not found, it means we don't need to do anything with\n\t\t// admission pods yet.\n\t\tcmLog := hLog.WithField(\"configmap\", fmt.Sprintf(\"%s/%s\", managedConfigNamespace, aggregatorCAConfigMapName))\n\t\tswitch {\n\t\tcase errors.IsNotFound(err):\n\t\t\tcmLog.Warningf(\"configmap was not found, will not sync aggregator CA with admission pods\")\n\t\tcase err != nil:\n\t\t\tcmLog.WithError(err).Errorf(\"cannot retrieve configmap\")\n\t\t\treturn reconcile.Result{}, err\n\t\tdefault:\n\t\t\tcaHash := computeHash(aggregatorCAConfigMap.Data)\n\t\t\tcmLog.WithField(\"hash\", caHash).Debugf(\"computed hash for configmap\")\n\t\t\tif instance.Status.AggregatorClientCAHash != caHash {\n\t\t\t\tcmLog.WithField(\"oldHash\", instance.Status.AggregatorClientCAHash).\n\t\t\t\t\tInfo(\"configmap has changed, admission pods will restart on the next sync\")\n\t\t\t\tinstance.Status.AggregatorClientCAHash = caHash\n\t\t\t\tcmLog.Debugf(\"updating status with new aggregator CA configmap hash\")\n\t\t\t\terr = r.Status().Update(context.TODO(), instance)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcmLog.WithError(err).Error(\"cannot update hash in config status\")\n\t\t\t\t}\n\t\t\t\treturn reconcile.Result{}, err\n\t\t\t}\n\t\t\tcmLog.Debug(\"configmap unchanged, nothing to do\")\n\t\t}\n\t}\n\n\th := resource.NewHelperFromRESTConfig(r.restConfig, hLog)\n\n\tif err := deployManagedDomainsConfigMap(h, instance); err != nil {\n\t\thLog.WithError(err).Error(\"error deploying managed domains configmap\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\terr = r.deployHive(hLog, h, instance, recorder)\n\tif err != nil {\n\t\thLog.WithError(err).Error(\"error deploying Hive\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\terr = r.deployHiveAdmission(hLog, h, instance, recorder)\n\tif err != nil {\n\t\thLog.WithError(err).Error(\"error deploying HiveAdmission\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tif err := r.teardownLegacyExternalDNS(hLog); err != nil {\n\t\thLog.WithError(err).Error(\"error tearing down legacy ExternalDNS\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\treturn reconcile.Result{}, nil\n}", "func SetStorageToMountPath(dep *appsv1.Deployment, storagestructs []v1alpha1.StorageSpec, ctn string, volumeName string, group GroupEnums) {\n\tfor _, v := range storagestructs {\n\t\tif v.Type == v1alpha1.Ephemeral || v.Type == v1alpha1.PVC {\n\t\t\tif v.Usage == v1alpha1.Data {\n\t\t\t\tsetMountPath(dep, strings.Replace(volumeName, \"type\", string(v1alpha1.Data), -1), fmt.Sprintf(\"%s/%s\", DataMountPath, group), ctn, group)\n\t\t\t} else if v.Usage == v1alpha1.Log {\n\t\t\t\tsetMountPath(dep, strings.Replace(volumeName, \"type\", string(v1alpha1.Log), -1), fmt.Sprintf(\"%s/%s\", LogMountPath, group), ctn, group)\n\t\t\t} else if v.Usage == v1alpha1.Empty && group == AlamedaGroup {\n\t\t\t\tsetMountPath(dep, strings.Replace(volumeName, \"type\", string(v1alpha1.Data), -1), fmt.Sprintf(\"%s/%s\", DataMountPath, group), ctn, group)\n\t\t\t\tsetMountPath(dep, strings.Replace(volumeName, \"type\", string(v1alpha1.Log), -1), fmt.Sprintf(\"%s/%s\", LogMountPath, group), ctn, group)\n\t\t\t} else if v.Usage == v1alpha1.Empty && group != AlamedaGroup { // if not alameda component's then only set data\n\t\t\t\tsetMountPath(dep, strings.Replace(volumeName, \"type\", string(v1alpha1.Data), -1), fmt.Sprintf(\"%s/%s\", DataMountPath, group), ctn, group)\n\t\t\t}\n\t\t}\n\t}\n}", "func (o *Operator) onStartStorage(stop <-chan struct{}) {\n\tfor {\n\t\tif err := o.waitForCRD(false, false, true, false); err == nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\tlog.Error().Err(err).Msg(\"Resource initialization failed\")\n\t\t\tlog.Info().Msgf(\"Retrying in %s...\", initRetryWaitTime)\n\t\t\ttime.Sleep(initRetryWaitTime)\n\t\t}\n\t}\n\to.runLocalStorages(stop)\n}", "func (r *FoundationDBBackupReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {\n\tbackup := &fdbv1beta2.FoundationDBBackup{}\n\n\terr := r.Get(ctx, request.NamespacedName, backup)\n\n\toriginalGeneration := backup.ObjectMeta.Generation\n\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\t// Object not found, return. Created objects are automatically garbage collected.\n\t\t\t// For additional cleanup logic use finalizers.\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tbackupLog := globalControllerLogger.WithValues(\"namespace\", backup.Namespace, \"backup\", backup.Name)\n\n\tsubReconcilers := []backupSubReconciler{\n\t\tupdateBackupStatus{},\n\t\tupdateBackupAgents{},\n\t\tstartBackup{},\n\t\tstopBackup{},\n\t\ttoggleBackupPaused{},\n\t\tmodifyBackup{},\n\t\tupdateBackupStatus{},\n\t}\n\n\tfor _, subReconciler := range subReconcilers {\n\t\trequeue := subReconciler.reconcile(ctx, r, backup)\n\t\tif requeue == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn processRequeue(requeue, subReconciler, backup, r.Recorder, backupLog)\n\t}\n\n\tif backup.Status.Generations.Reconciled < originalGeneration {\n\t\tbackupLog.Info(\"Backup was not fully reconciled by reconciliation process\")\n\t\treturn ctrl.Result{Requeue: true}, nil\n\t}\n\n\tbackupLog.Info(\"Reconciliation complete\")\n\n\treturn ctrl.Result{}, nil\n}", "func SetStorageToVolumeSource(dep *appsv1.Deployment, storagestructs []v1alpha1.StorageSpec, volumeName string, group GroupEnums) {\n\tfor _, v := range storagestructs {\n\t\tif !v.StorageIsEmpty() {\n\t\t\tif index := getVolumeLogIndex(dep); index != -1 && v.Usage == v1alpha1.Log {\n\t\t\t\tsetVolumeSource(dep, index, strings.Replace(volumeName, \"type\", string(v1alpha1.Log), -1))\n\t\t\t}\n\t\t\tif index := getVolumeDataIndex(dep); index != -1 && v.Usage == v1alpha1.Data {\n\t\t\t\tsetVolumeSource(dep, index, strings.Replace(volumeName, \"type\", string(v1alpha1.Data), -1))\n\t\t\t}\n\t\t\tif v.Usage == v1alpha1.Empty && group == AlamedaGroup {\n\t\t\t\tif index := getVolumeLogIndex(dep); index != -1 {\n\t\t\t\t\tsetVolumeSource(dep, index, strings.Replace(volumeName, \"type\", string(v1alpha1.Log), -1))\n\t\t\t\t}\n\t\t\t\tif index := getVolumeDataIndex(dep); index != -1 {\n\t\t\t\t\tsetVolumeSource(dep, index, strings.Replace(volumeName, \"type\", string(v1alpha1.Data), -1))\n\t\t\t\t}\n\t\t\t} else if v.Usage == v1alpha1.Empty && group != AlamedaGroup {\n\t\t\t\tif index := getVolumeDataIndex(dep); index != -1 {\n\t\t\t\t\tsetVolumeSource(dep, index, strings.Replace(volumeName, \"type\", string(v1alpha1.Data), -1))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif v.Type == v1alpha1.Ephemeral {\n\t\t\tif index := getVolumeLogIndex(dep); index != -1 && v.Usage == v1alpha1.Log {\n\t\t\t\tsetEmptyDir(dep, index, v.Size)\n\t\t\t}\n\t\t\tif index := getVolumeDataIndex(dep); index != -1 && v.Usage == v1alpha1.Data {\n\t\t\t\tsetEmptyDir(dep, index, v.Size)\n\t\t\t}\n\t\t}\n\t}\n}", "func (r *PersistentVolumeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tlogger := log.FromContext(ctx)\n\n\tvar pv corev1.PersistentVolume\n\tif err := r.Get(ctx, req.NamespacedName, &pv); err != nil {\n\t\tlogger.Error(err, \"unable to fetch PersistentVolume\")\n\t\treturn ctrl.Result{}, client.IgnoreNotFound(err)\n\t}\n\n\tif name, ok := pv.ObjectMeta.Labels[localPVProvisionerLabelKey]; !ok || name != r.NodeName {\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tif pv.Status.Phase != corev1.VolumeReleased {\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tpath := pv.Spec.Local.Path\n\tlogger.Info(\"cleaning PersistentVolume\", \"path\", path)\n\tif err := r.Deleter.Delete(path); err != nil {\n\t\tlogger.Error(err, \"unable to clean the device of PersistentVolume\")\n\t}\n\n\tlogger.Info(\"deleting PersistentVolume from api server\")\n\tif err := r.Delete(context.Background(), &pv); err != nil {\n\t\tlogger.Error(err, \"unable to delete PersistentVolume\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tlogger.Info(\"successful to cleanup PersistentVolume\")\n\treturn ctrl.Result{}, nil\n}", "func (r *DBaaSInventoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tlogger := ctrl.LoggerFrom(ctx, \"DBaaS Inventory\", req.NamespacedName)\n\n\t// only reconcile an Inventory if it's installed to a Tenant object's inventoryNamespace\n\tif contains(TenantInventoryNS, req.Namespace) {\n\t\tvar inventory v1alpha1.DBaaSInventory\n\t\tif err := r.Get(ctx, req.NamespacedName, &inventory); err != nil {\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\t// CR deleted since request queued, child objects getting GC'd, no requeue\n\t\t\t\tlogger.V(1).Info(\"DBaaS Inventory resource not found, has been deleted\")\n\t\t\t\treturn ctrl.Result{}, nil\n\t\t\t}\n\t\t\tlogger.Error(err, \"Error fetching DBaaS Inventory for reconcile\")\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\t\t//\n\t\t// Inventory RBAC\n\t\t//\n\t\trole, rolebinding := inventoryRbacObjs(inventory)\n\t\tvar roleObj rbacv1.Role\n\t\tif exists, err := r.createRbacObj(&role, &roleObj, &inventory, ctx); err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t} else if exists {\n\t\t\tif !reflect.DeepEqual(role.Rules, roleObj.Rules) {\n\t\t\t\troleObj.Rules = role.Rules\n\t\t\t\tif err := r.updateObject(&roleObj, ctx); err != nil {\n\t\t\t\t\tlogger.Error(err, \"Error updating resource\", roleObj.Name, roleObj.Namespace)\n\t\t\t\t\treturn ctrl.Result{}, err\n\t\t\t\t}\n\t\t\t\tlogger.V(1).Info(roleObj.Kind+\" resource updated\", roleObj.Name, roleObj.Namespace)\n\t\t\t}\n\t\t}\n\t\tvar roleBindingObj rbacv1.RoleBinding\n\t\tif exists, err := r.createRbacObj(&rolebinding, &roleBindingObj, &inventory, ctx); err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t} else if exists {\n\t\t\tif !reflect.DeepEqual(rolebinding.RoleRef, roleBindingObj.RoleRef) ||\n\t\t\t\t!reflect.DeepEqual(rolebinding.Subjects, roleBindingObj.Subjects) {\n\t\t\t\troleBindingObj.RoleRef = rolebinding.RoleRef\n\t\t\t\troleBindingObj.Subjects = rolebinding.Subjects\n\t\t\t\tif err := r.updateObject(&roleBindingObj, ctx); err != nil {\n\t\t\t\t\tlogger.Error(err, \"Error updating resource\", roleBindingObj.Name, roleBindingObj.Namespace)\n\t\t\t\t\treturn ctrl.Result{}, err\n\t\t\t\t}\n\t\t\t\tlogger.V(1).Info(roleBindingObj.Kind+\" resource updated\", roleBindingObj.Name, roleBindingObj.Namespace)\n\t\t\t}\n\t\t}\n\n\t\t//\n\t\t// Provider Inventory\n\t\t//\n\t\tprovider, err := r.getDBaaSProvider(inventory.Spec.ProviderRef.Name, ctx)\n\t\tif err != nil {\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\tlogger.Error(err, \"Requested DBaaS Provider is not configured in this environment\", \"DBaaS Provider\", inventory.Spec.ProviderRef)\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t\tlogger.Error(err, \"Error reading configured DBaaS Provider\", \"DBaaS Provider\", inventory.Spec.ProviderRef)\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tlogger.V(1).Info(\"Found DBaaS Provider\", \"DBaaS Provider\", inventory.Spec.ProviderRef)\n\n\t\tproviderInventory := r.createProviderObject(&inventory, provider.Spec.InventoryKind)\n\t\tif result, err := r.reconcileProviderObject(providerInventory, r.providerObjectMutateFn(&inventory, providerInventory, inventory.Spec.DeepCopy()), ctx); err != nil {\n\t\t\tif errors.IsConflict(err) {\n\t\t\t\tlogger.V(1).Info(\"Provider Inventory modified, retry syncing spec\")\n\t\t\t\treturn ctrl.Result{Requeue: true}, nil\n\t\t\t}\n\t\t\tlogger.Error(err, \"Error reconciling the Provider Inventory resource\")\n\t\t\treturn ctrl.Result{}, err\n\t\t} else {\n\t\t\tlogger.V(1).Info(\"Provider Inventory resource reconciled\", \"result\", result)\n\t\t}\n\n\t\tvar DBaaSProviderInventory v1alpha1.DBaaSProviderInventory\n\t\tif err := r.parseProviderObject(&DBaaSProviderInventory, providerInventory); err != nil {\n\t\t\tlogger.Error(err, \"Error parsing the Provider Inventory resource\")\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tif err := r.reconcileDBaaSObjectStatus(&inventory, ctx, func() error {\n\t\t\tDBaaSProviderInventory.Status.DeepCopyInto(&inventory.Status)\n\t\t\treturn nil\n\t\t}); err != nil {\n\t\t\tif errors.IsConflict(err) {\n\t\t\t\tlogger.V(1).Info(\"DBaaS Inventory modified, retry syncing status\")\n\t\t\t\treturn ctrl.Result{Requeue: true}, nil\n\t\t\t}\n\t\t\tlogger.Error(err, \"Error updating the DBaaS Inventory status\")\n\t\t\treturn ctrl.Result{}, err\n\t\t} else {\n\t\t\tlogger.V(1).Info(\"DBaaS Inventory status updated\")\n\t\t}\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (a *actuator) Reconcile(ctx context.Context, ex *extensionsv1alpha1.Extension) error {\n\tcluster, err := controller.GetCluster(ctx, a.Client(), ex.Namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresurrection := false\n\tif ex.Status.State != nil && !common.IsMigrating(ex) {\n\t\tresurrection, err = a.ResurrectFrom(ex, cluster)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Shoots that don't specify a DNS domain or that are scheduled to a seed that is tainted with \"DNS disabled\"\n\t// don't get an DNS service\n\n\t// TODO: remove the deprecated taint check in a future version\n\tif !seedSettingShootDNSEnabled(cluster.Seed.Spec.Settings) ||\n\t\tgardencorev1beta1helper.TaintsHave(cluster.Seed.Spec.Taints, gardencorev1beta1.DeprecatedSeedTaintDisableDNS) ||\n\t\tcluster.Shoot.Spec.DNS == nil {\n\t\ta.Info(\"DNS domain is not specified, the seed .spec.settings.shootDNS.enabled=false or the seed is tainted with 'disable-dns', therefore no shoot dns service is installed\", \"shoot\", ex.Namespace)\n\t\treturn a.Delete(ctx, ex)\n\t}\n\n\tif err := a.createShootResources(ctx, cluster, ex.Namespace); err != nil {\n\t\treturn err\n\t}\n\treturn a.createSeedResources(ctx, cluster, ex, !resurrection)\n}", "func RestoreStorage(storageID, restorePoint string) error {\n\tclient, err := NewExtPacketClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trequest := &extpackngo.RestoreVolumeRequest{\n\t\tRestorePoint: restorePoint,\n\t}\n\n\t_, e := client.Storages.Restore(storageID, request)\n\treturn e\n}", "func (c *Controller) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tdefer metricsC.ReconcileDuration.EvaluateDurationForType(\"node_drive_controller\")()\n\t// read name\n\tdriveName := req.Name\n\t// TODO why do we need 60 seconds here?\n\t// create context\n\tctx, cancelFn := context.WithTimeout(context.Background(), 60*time.Second)\n\tdefer cancelFn()\n\n\t// customize logging\n\tlog := c.log.WithFields(logrus.Fields{\"method\": \"drive/Reconcile\", \"name\": driveName})\n\n\t// obtain corresponding drive\n\tdrive := &drivecrd.Drive{}\n\tif err := c.client.ReadCR(ctx, driveName, \"\", drive); err != nil {\n\t\tlog.Errorf(\"Failed to read Drive %s CR\", driveName)\n\t\t// TODO is this correct error here?\n\t\treturn ctrl.Result{}, client.IgnoreNotFound(err)\n\t}\n\n\tlog.Infof(\"Drive changed: %v\", drive)\n\n\tusage := drive.Spec.GetUsage()\n\thealth := drive.Spec.GetHealth()\n\tid := drive.Spec.GetUUID()\n\ttoUpdate := false\n\n\tswitch usage {\n\tcase apiV1.DriveUsageInUse:\n\t\tif health == apiV1.HealthSuspect || health == apiV1.HealthBad {\n\t\t\t// TODO update health of volumes\n\t\t\tdrive.Spec.Usage = apiV1.DriveUsageReleasing\n\t\t\ttoUpdate = true\n\t\t}\n\tcase apiV1.DriveUsageReleasing:\n\t\tvolumes, err := c.crHelper.GetVolumesByLocation(ctx, id)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{RequeueAfter: base.DefaultRequeueForVolume}, err\n\t\t}\n\t\tallFound := true\n\t\tfor _, vol := range volumes {\n\t\t\tstatus, found := drive.Annotations[fmt.Sprintf(\n\t\t\t\t\"%s/%s\", apiV1.DriveAnnotationVolumeStatusPrefix, vol.Name)]\n\t\t\tif !found || status != apiV1.VolumeUsageReleased {\n\t\t\t\tallFound = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif allFound {\n\t\t\tdrive.Spec.Usage = apiV1.DriveUsageReleased\n\t\t\teventMsg := fmt.Sprintf(\"Drive is ready for replacement, %s\", drive.GetDriveDescription())\n\t\t\tc.eventRecorder.Eventf(drive, eventing.NormalType, eventing.DriveReadyForReplacement, eventMsg)\n\t\t\ttoUpdate = true\n\t\t}\n\n\tcase apiV1.DriveUsageReleased:\n\t\tstatus, found := drive.Annotations[apiV1.DriveAnnotationReplacement]\n\t\tif !found || status != apiV1.DriveAnnotationReplacementReady {\n\t\t\tbreak\n\t\t}\n\t\ttoUpdate = true\n\t\tdrive.Spec.Usage = apiV1.DriveUsageRemoving\n\t\tfallthrough\n\tcase apiV1.DriveUsageRemoving:\n\t\tvolumes, err := c.crHelper.GetVolumesByLocation(ctx, id)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{RequeueAfter: base.DefaultRequeueForVolume}, err\n\t\t}\n\t\tif c.checkAllVolsRemoved(volumes) {\n\t\t\tdrive.Spec.Usage = apiV1.DriveUsageRemoved\n\t\t\tstatus, err := c.driveMgrClient.Locate(ctx, &api.DriveLocateRequest{Action: apiV1.LocateStart, DriveSerialNumber: drive.Spec.SerialNumber})\n\t\t\tif err != nil || status.Status != apiV1.LocateStatusOn {\n\t\t\t\tlog.Errorf(\"Failed to locate LED of drive %s, err %v\", drive.Spec.SerialNumber, err)\n\t\t\t\tdrive.Spec.Usage = apiV1.DriveUsageFailed\n\t\t\t\t// send error level alert\n\t\t\t\teventMsg := fmt.Sprintf(\"Failed to locale LED, %s\", drive.GetDriveDescription())\n\t\t\t\tc.eventRecorder.Eventf(drive, eventing.ErrorType, eventing.DriveReplacementFailed, eventMsg)\n\t\t\t} else {\n\t\t\t\t// send info level alert\n\t\t\t\teventMsg := fmt.Sprintf(\"Drive successfully replaced, %s\", drive.GetDriveDescription())\n\t\t\t\tc.eventRecorder.Eventf(drive, eventing.NormalType, eventing.DriveSuccessfullyReplaced, eventMsg)\n\t\t\t}\n\t\t\ttoUpdate = true\n\t\t}\n\tcase apiV1.DriveUsageRemoved:\n\t\tif drive.Spec.Status == apiV1.DriveStatusOffline {\n\t\t\t// drive was removed from the system. need to clean corresponding custom resource\n\t\t\tif err := c.client.DeleteCR(ctx, drive); err != nil {\n\t\t\t\tlog.Errorf(\"Failed to delete Drive %s CR\", driveName)\n\t\t\t\treturn ctrl.Result{}, client.IgnoreNotFound(err)\n\t\t\t}\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t}\n\n\t// update drive CR if needed\n\tif toUpdate {\n\t\tif err := c.client.UpdateCR(ctx, drive); err != nil {\n\t\t\tlog.Errorf(\"Failed to update Drive %s CR\", driveName)\n\t\t\treturn ctrl.Result{}, client.IgnoreNotFound(err)\n\t\t}\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func CompareStorageRequests(initial corev1.ResourceRequirements, updated corev1.ResourceRequirements) StorageComparison {\n\tinitialSize := initial.Requests.Storage()\n\tupdatedSize := updated.Requests.Storage()\n\tif initialSize.IsZero() || updatedSize.IsZero() {\n\t\treturn StorageComparison{}\n\t}\n\tswitch updatedSize.Cmp(*initialSize) {\n\tcase -1: // decrease\n\t\treturn StorageComparison{Decrease: true}\n\tcase 1: // increase\n\t\treturn StorageComparison{Increase: true}\n\tdefault: // same size\n\t\treturn StorageComparison{}\n\t}\n}", "func (o ArgoCDExportSpecPtrOutput) Storage() ArgoCDExportSpecStoragePtrOutput {\n\treturn o.ApplyT(func(v *ArgoCDExportSpec) *ArgoCDExportSpecStorage {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Storage\n\t}).(ArgoCDExportSpecStoragePtrOutput)\n}", "func (r *TransferReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\ttransfer := accountv1.Transfer{}\n\tif err := r.Get(ctx, req.NamespacedName, &transfer); err != nil {\n\t\treturn ctrl.Result{}, client.IgnoreNotFound(err)\n\t}\n\ttransfer.Spec.From = getUsername(transfer.Namespace)\n\tif time.Since(transfer.CreationTimestamp.Time) > time.Minute*3 {\n\t\treturn ctrl.Result{}, r.Delete(ctx, &transfer)\n\t}\n\tpipeLine := []func(ctx context.Context, transfer *accountv1.Transfer) error{\n\t\tr.check,\n\t\tr.TransferOutSaver,\n\t\tr.TransferInSaver,\n\t}\n\tfor _, f := range pipeLine {\n\t\tif err := f(ctx, &transfer); err != nil {\n\t\t\ttransfer.Status.Reason = err.Error()\n\t\t\ttransfer.Status.Progress = accountv1.TransferStateFailed\n\t\t\tbreak\n\t\t}\n\t}\n\tif transfer.Status.Progress != accountv1.TransferStateFailed {\n\t\ttransfer.Status.Progress = accountv1.TransferStateCompleted\n\t}\n\tif err := r.Status().Update(ctx, &transfer); err != nil {\n\t\treturn ctrl.Result{}, fmt.Errorf(\"update transfer status failed: %w\", err)\n\t}\n\treturn ctrl.Result{RequeueAfter: 3 * time.Minute}, nil\n}", "func (r *CassandraRestoreReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tctx := context.Background()\n\t_ = r.Log.WithValues(\"cassandrarestore\", req.NamespacedName)\n\n\tinstance := &api.CassandraRestore{}\n\terr := r.Get(ctx, req.NamespacedName, instance)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\treturn ctrl.Result{RequeueAfter: 10 * time.Second}, err\n\t}\n\n\trestore := instance.DeepCopy()\n\n\tif len(restore.Status.RestoreKey) == 0 {\n\t\tif err = r.setRestoreKey(ctx, restore); err != nil {\n\t\t\tr.Log.Error(err, \"failed to set restore key\")\n\t\t\treturn ctrl.Result{RequeueAfter: 10 * time.Second}, err\n\t\t}\n\t}\n\n\t// See if the restore is already in progress\n\tif !restore.Status.StartTime.IsZero() {\n\t\tcassdcKey := types.NamespacedName{Namespace: req.Namespace, Name: restore.Spec.CassandraDatacenter.Name}\n\t\tcassdc := &cassdcapi.CassandraDatacenter{}\n\n\t\tif err = r.Get(ctx, cassdcKey, cassdc); err != nil {\n\t\t\t// TODO add some additional logging and/or generate an event if the error is not found\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\tr.Log.Error(err, \"cassandradatacenter not found\", \"CassandraDatacenter\", cassdcKey)\n\n\t\t\t\tpatch := client.MergeFrom(restore.DeepCopy())\n\t\t\t\trestore.Status.FinishTime = metav1.Now()\n\t\t\t\tif err = r.Status().Patch(ctx, restore, patch); err == nil {\n\t\t\t\t\treturn ctrl.Result{Requeue: false}, err\n\t\t\t\t} else {\n\t\t\t\t\tr.Log.Error(err, \"failed to patch status with end time\")\n\t\t\t\t\treturn ctrl.Result{RequeueAfter: 5 * time.Second}, err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tr.Log.Error(err, \"failed to get cassandradatacenter\", \"CassandraDatacenter\", cassdcKey)\n\t\t\t\treturn ctrl.Result{RequeueAfter: 10 * time.Second}, err\n\t\t\t}\n\t\t}\n\n\t\tif isCassdcReady(cassdc) {\n\t\t\tr.Log.Info(\"the cassandradatacenter has been restored and is ready\", \"CassandraDatacenter\", cassdcKey)\n\n\t\t\tpatch := client.MergeFrom(restore.DeepCopy())\n\t\t\trestore.Status.FinishTime = metav1.Now()\n\t\t\tif err = r.Status().Patch(ctx, restore, patch); err == nil {\n\t\t\t\treturn ctrl.Result{Requeue: false}, err\n\t\t\t} else {\n\t\t\t\tr.Log.Error(err, \"failed to patch status with end time\")\n\t\t\t\treturn ctrl.Result{RequeueAfter: 5 * time.Second}, err\n\t\t\t}\n\t\t}\n\n\t\t// TODO handle scenarios in which the CassandraDatacenter fails to become ready\n\n\t\treturn ctrl.Result{RequeueAfter: 5 * time.Second}, nil\n\t}\n\n\tbackupKey := types.NamespacedName{Namespace: req.Namespace, Name: restore.Spec.Backup}\n\tbackup := &api.CassandraBackup{}\n\n\tif err = r.Get(ctx, backupKey, backup); err != nil {\n\t\t// TODO add some additional logging and/or generate an event if the error is not found\n\t\tr.Log.Error(err, \"failed to get backup\", \"CassandraBackup\", backupKey)\n\t\treturn ctrl.Result{RequeueAfter: 10 * time.Second}, err\n\t}\n\n\tif restore.Spec.InPlace {\n\t\tr.Log.Info(\"performing in place restore\")\n\n\t\tcassdcKey := types.NamespacedName{Namespace: req.Namespace, Name: restore.Spec.CassandraDatacenter.Name}\n\t\tcassdc := &cassdcapi.CassandraDatacenter{}\n\n\t\tif err = r.Get(ctx, cassdcKey, cassdc); err != nil {\n\t\t\tr.Log.Error(err, \"failed to get cassandradatacenter\", \"CassandraDatacenter\", cassdcKey)\n\t\t\treturn ctrl.Result{RequeueAfter: 10 * time.Second}, err\n\t\t}\n\n\t\tcassdc = cassdc.DeepCopy()\n\n\t\tif err = setBackupNameInRestoreContainer(backup.Spec.Name, cassdc); err != nil {\n\t\t\tr.Log.Error(err, \"failed to set backup name in restore container\", \"CassandraDatacenter\", cassdcKey)\n\t\t\treturn ctrl.Result{RequeueAfter: 10 * time.Second}, err\n\t\t}\n\n\t\tif err = setRestoreKeyInRestoreContainer(restore.Status.RestoreKey, cassdc); err != nil {\n\t\t\tr.Log.Error(err, \"failed to set restore key in restore container\", \"CassandraDatacenter\", cassdcKey)\n\t\t\treturn ctrl.Result{RequeueAfter: 10 * time.Second}, err\n\t\t}\n\n\t\tpatch := client.MergeFrom(restore.DeepCopy())\n\t\trestore.Status.StartTime = metav1.Now()\n\t\tif err = r.Status().Patch(ctx, restore, patch); err != nil {\n\t\t\tr.Log.Error(err, \"fail to patch status with start time\")\n\t\t\treturn ctrl.Result{RequeueAfter: 5 * time.Second}, err\n\t\t}\n\n\t\tif err = r.Update(ctx, cassdc); err == nil {\n\t\t\tr.Log.Info(\"the cassandradatacenter has been updated and will be restarted\", \"CassandraDatacenter\", cassdcKey)\n\t\t\treturn ctrl.Result{RequeueAfter: 10 * time.Second}, err\n\t\t} else {\n\t\t\tr.Log.Error(err, \"failed to update the cassandradatacenter\", \"CassandraDatacenter\", cassdcKey)\n\t\t\treturn ctrl.Result{RequeueAfter: 10 * time.Second}, err\n\t\t}\n\t}\n\n\tr.Log.Info(\"restoring to new cassandradatacenter\")\n\n\tnewCassdc, err := buildNewCassandraDatacenter(restore, backup)\n\tif err != nil {\n\t\tr.Log.Error(err, \"failed to build new cassandradatacenter\")\n\t\treturn ctrl.Result{RequeueAfter: 10 * time.Second}, err\n\t}\n\n\tcassdcKey := types.NamespacedName{Namespace: newCassdc.Namespace, Name: newCassdc.Name}\n\n\tr.Log.Info(\"creating new cassandradatacenter\", \"CassandraDatacenter\", cassdcKey)\n\n\tif err = r.Create(ctx, newCassdc); err == nil {\n\t\tpatch := client.MergeFrom(restore.DeepCopy())\n\t\trestore.Status.StartTime = metav1.Now()\n\t\tif err = r.Status().Patch(ctx, restore, patch); err != nil {\n\t\t\tr.Log.Error(err, \"fail to patch status with start time\")\n\t\t\treturn ctrl.Result{RequeueAfter: 5 * time.Second}, err\n\t\t} else {\n\t\t\treturn ctrl.Result{RequeueAfter: 10 * time.Second}, err\n\t\t}\n\t} else {\n\t\tr.Log.Error(err, \"failed to create cassandradatacenter\", \"CassandraDatacenter\", cassdcKey)\n\t\treturn ctrl.Result{RequeueAfter: 30 * time.Second}, nil\n\t}\n}", "func (r *MachineDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tlog := r.Log.WithValues(\"machinedeletion\", req.NamespacedName)\n\n\tlog.Info(\"reconciling...\")\n\n\t//fetch the remediation\n\tvar remediation *v1alpha1.MachineDeletion\n\tif remediation = r.getRemediation(ctx, req); remediation == nil {\n\t\treturn ctrl.Result{}, nil\n\t}\n\t//not a machine based remediation\n\tmachineOwnerRef := getMachineOwnerRef(remediation)\n\tif machineOwnerRef == nil {\n\t\treturn ctrl.Result{}, nil\n\t}\n\t//delete the machine\n\tif err := r.deleteMachine(ctx, buildMachine(machineOwnerRef, remediation)); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func TestStorage(t *testing.T) {\n\tvar result Storage\n\terr := json.NewDecoder(strings.NewReader(storageBody)).Decode(&result)\n\n\tif err != nil {\n\t\tt.Errorf(\"Error decoding JSON: %s\", err)\n\t}\n\n\tif result.ID != \"Storage-1\" {\n\t\tt.Errorf(\"Received invalid ID: %s\", result.ID)\n\t}\n\n\tif result.Name != \"StorageOne\" {\n\t\tt.Errorf(\"Received invalid name: %s\", result.Name)\n\t}\n\n\tif len(result.drives) != 6 {\n\t\tt.Errorf(\"Unexpected number of drives: %d\", len(result.drives))\n\t}\n\n\tif result.StorageControllers[0].CacheSummary.PersistentCacheSizeMiB != 1024 {\n\t\tt.Errorf(\"Invalid PersistenCacheSize: %d\",\n\t\t\tresult.StorageControllers[0].CacheSummary.PersistentCacheSizeMiB)\n\t}\n\n\tif result.StorageControllers[0].PCIeInterface.MaxPCIeType != Gen4PCIeTypes {\n\t\tt.Errorf(\"Invalid MaxPCIeType: %s\", result.StorageControllers[0].PCIeInterface.MaxPCIeType)\n\t}\n\n\tif result.setEncryptionKeyTarget != \"/redfish/v1/Storage/Actions/Storage.SetEncryptionKey\" {\n\t\tt.Errorf(\"Invalid SetEncryptionKey target: %s\", result.setEncryptionKeyTarget)\n\t}\n}", "func resourceArmStorageContainerDelete(d *schema.ResourceData, meta interface{}) error {\n\tarmClient := meta.(*ArmClient)\n\tctx := armClient.StopContext\n\n\tresourceGroupName := d.Get(\"resource_group_name\").(string)\n\tstorageAccountName := d.Get(\"storage_account_name\").(string)\n\n\tblobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(ctx, resourceGroupName, storageAccountName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !accountExists {\n\t\tlog.Printf(\"[INFO]Storage Account %q doesn't exist so the container won't exist\", storageAccountName)\n\t\treturn nil\n\t}\n\n\tname := d.Get(\"name\").(string)\n\n\tlog.Printf(\"[INFO] Deleting storage container %q in account %q\", name, storageAccountName)\n\treference := blobClient.GetContainerReference(name)\n\tdeleteOptions := &storage.DeleteContainerOptions{}\n\tif _, err := reference.DeleteIfExists(deleteOptions); err != nil {\n\t\treturn fmt.Errorf(\"Error deleting storage container %q from storage account %q: %s\", name, storageAccountName, err)\n\t}\n\n\td.SetId(\"\")\n\treturn nil\n}", "func (r *Reconciler) reconcileExternalDatasources(ctx context.Context, serverClient k8sclient.Client, activeQuota string, platformType configv1.PlatformType) (integreatlyv1alpha1.StatusPhase, error) {\n\tr.log.Info(\"Reconciling external datastores\")\n\tns := r.installation.Namespace\n\n\t// setup backend redis custom resource\n\t// this will be used by the cloud resources operator to provision a redis instance\n\tr.log.Info(\"Creating backend redis instance\")\n\tbackendRedisName := fmt.Sprintf(\"%s%s\", constants.ThreeScaleBackendRedisPrefix, r.installation.Name)\n\n\t// If there is a quota change, the quota on the installation spec would not be set to the active quota yet\n\tquotaChange := isQuotaChanged(r.installation.Status.Quota, activeQuota)\n\n\t// if we are on GCP set snaphshot frequency and retention\n\tvar snapshotFrequency, snapshotRetention types.Duration\n\tif platformType == configv1.GCPPlatformType {\n\t\tsnapshotFrequency = constants.GcpSnapshotFrequency\n\t\tsnapshotRetention = constants.GcpSnapshotRetention\n\t}\n\n\tr.log.Infof(\"Backend redis config\", map[string]interface{}{\"quotaChange\": quotaChange, \"activeQuota\": activeQuota})\n\tbackendRedis, err := croUtil.ReconcileRedis(ctx, serverClient, defaultInstallationNamespace, r.installation.Spec.Type, croUtil.TierProduction, backendRedisName, ns, backendRedisName, ns, r.Config.GetBackendRedisNodeSize(activeQuota, platformType), quotaChange, quotaChange, func(cr metav1.Object) error {\n\t\towner.AddIntegreatlyOwnerAnnotations(cr, r.installation)\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn integreatlyv1alpha1.PhaseFailed, fmt.Errorf(\"failed to reconcile backend redis request: %w\", err)\n\t}\n\n\t// setup system redis custom resource\n\t// this will be used by the cloud resources operator to provision a redis instance\n\tr.log.Info(\"Creating system redis instance\")\n\tsystemRedisName := fmt.Sprintf(\"%s%s\", constants.ThreeScaleSystemRedisPrefix, r.installation.Name)\n\tsystemRedis, err := croUtil.ReconcileRedis(ctx, serverClient, defaultInstallationNamespace, r.installation.Spec.Type, croUtil.TierProduction, systemRedisName, ns, systemRedisName, ns, \"\", false, false, func(cr metav1.Object) error {\n\t\towner.AddIntegreatlyOwnerAnnotations(cr, r.installation)\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn integreatlyv1alpha1.PhaseFailed, fmt.Errorf(\"failed to reconcile system redis request: %w\", err)\n\t}\n\n\t// setup postgres cr for the cloud resource operator\n\t// this will be used by the cloud resources operator to provision a postgres instance\n\tr.log.Info(\"Creating postgres instance\")\n\tpostgresName := fmt.Sprintf(\"%s%s\", constants.ThreeScalePostgresPrefix, r.installation.Name)\n\tpostgres, err := croUtil.ReconcilePostgres(ctx, serverClient, defaultInstallationNamespace, r.installation.Spec.Type, croUtil.TierProduction, postgresName, ns, postgresName, ns, constants.PostgresApplyImmediately, snapshotFrequency, snapshotRetention, func(cr metav1.Object) error {\n\t\towner.AddIntegreatlyOwnerAnnotations(cr, r.installation)\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn integreatlyv1alpha1.PhaseFailed, fmt.Errorf(\"failed to reconcile postgres request: %w\", err)\n\t}\n\tif postgres.Status.Phase != types.PhaseComplete {\n\t\treturn integreatlyv1alpha1.PhaseAwaitingCloudResources, nil\n\t}\n\tphase, err := resources.ReconcileRedisAlerts(ctx, serverClient, r.installation, backendRedis, r.log)\n\tif err != nil {\n\t\treturn integreatlyv1alpha1.PhaseFailed, fmt.Errorf(\"failed to reconcile redis alerts: %w\", err)\n\t}\n\tif phase != integreatlyv1alpha1.PhaseCompleted {\n\t\treturn phase, nil\n\t}\n\n\t// create Redis Cpu Usage High alert\n\terr = resources.CreateRedisCpuUsageAlerts(ctx, serverClient, r.installation, backendRedis, r.log)\n\tif err != nil {\n\t\treturn integreatlyv1alpha1.PhaseFailed, fmt.Errorf(\"failed to create backend redis prometheus Cpu usage high alerts for threescale: %s\", err)\n\t}\n\t// wait for the backend redis cr to reconcile\n\tif backendRedis.Status.Phase != types.PhaseComplete {\n\t\treturn integreatlyv1alpha1.PhaseAwaitingComponents, nil\n\t}\n\n\t// get the secret created by the cloud resources operator\n\t// containing backend redis connection details\n\tcredSec := &corev1.Secret{}\n\terr = serverClient.Get(ctx, k8sclient.ObjectKey{Name: backendRedis.Status.SecretRef.Name, Namespace: backendRedis.Status.SecretRef.Namespace}, credSec)\n\tif err != nil {\n\t\treturn integreatlyv1alpha1.PhaseFailed, fmt.Errorf(\"failed to get backend redis credential secret: %w\", err)\n\t}\n\n\t// create backend redis external connection secret needed for the 3scale apimanager\n\tbackendRedisSecret := &corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: externalBackendRedisSecretName,\n\t\t\tNamespace: r.Config.GetNamespace(),\n\t\t},\n\t\tData: map[string][]byte{},\n\t}\n\t_, err = controllerutil.CreateOrUpdate(ctx, serverClient, backendRedisSecret, func() error {\n\t\turi := credSec.Data[\"uri\"]\n\t\tport := credSec.Data[\"port\"]\n\t\tbackendRedisSecret.Data[\"REDIS_STORAGE_URL\"] = []byte(fmt.Sprintf(\"redis://%s:%s/0\", uri, port))\n\t\tbackendRedisSecret.Data[\"REDIS_QUEUES_URL\"] = []byte(fmt.Sprintf(\"redis://%s:%s/1\", uri, port))\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn integreatlyv1alpha1.PhaseFailed, fmt.Errorf(\"failed to create or update 3scale %s connection secret: %w\", externalBackendRedisSecretName, err)\n\t}\n\n\tphase, err = resources.ReconcileRedisAlerts(ctx, serverClient, r.installation, systemRedis, r.log)\n\tif err != nil {\n\t\treturn integreatlyv1alpha1.PhaseFailed, fmt.Errorf(\"failed to reconcile redis alerts: %w\", err)\n\t}\n\tif phase != integreatlyv1alpha1.PhaseCompleted {\n\t\treturn phase, nil\n\t}\n\t// wait for the system redis cr to reconcile\n\tif systemRedis.Status.Phase != types.PhaseComplete {\n\t\treturn integreatlyv1alpha1.PhaseAwaitingComponents, nil\n\t}\n\n\t// get the secret created by the cloud resources operator\n\t// containing system redis connection details\n\tsystemCredSec := &corev1.Secret{}\n\terr = serverClient.Get(ctx, k8sclient.ObjectKey{Name: systemRedis.Status.SecretRef.Name, Namespace: systemRedis.Status.SecretRef.Namespace}, systemCredSec)\n\tif err != nil {\n\t\treturn integreatlyv1alpha1.PhaseFailed, fmt.Errorf(\"failed to get system redis credential secret: %w\", err)\n\t}\n\n\t// create system redis external connection secret needed for the 3scale apimanager\n\tredisSecret := &corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: externalRedisSecretName,\n\t\t\tNamespace: r.Config.GetNamespace(),\n\t\t},\n\t\tData: map[string][]byte{},\n\t}\n\n\tmessageBusKeys := []string{\"MESSAGE_BUS_URL\", \"MESSAGE_BUS_NAMESPACE\", \"MESSAGE_BUS_SENTINEL_HOSTS\", \"MESSAGE_BUS_SENTINEL_ROLE\"}\n\n\t_, err = controllerutil.CreateOrUpdate(ctx, serverClient, redisSecret, func() error {\n\t\turi := systemCredSec.Data[\"uri\"]\n\t\tport := systemCredSec.Data[\"port\"]\n\t\tconn := fmt.Sprintf(\"redis://%s:%s/1\", uri, port)\n\t\tredisSecret.Data[\"URL\"] = []byte(conn)\n\t\tfor _, key := range messageBusKeys {\n\t\t\tif redisSecret.Data[key] != nil {\n\t\t\t\tdelete(redisSecret.Data, key)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn integreatlyv1alpha1.PhaseFailed, fmt.Errorf(\"failed to create or update 3scale %s connection secret: %w\", externalRedisSecretName, err)\n\t}\n\n\t// reconcile postgres alerts\n\tphase, err = resources.ReconcilePostgresAlerts(ctx, serverClient, r.installation, postgres, r.log)\n\tproductName := postgres.Labels[\"productName\"]\n\tif err != nil {\n\t\treturn integreatlyv1alpha1.PhaseFailed, fmt.Errorf(\"failed to reconcile postgres alerts for %s: %w\", productName, err)\n\t}\n\tif phase != integreatlyv1alpha1.PhaseCompleted {\n\t\treturn phase, nil\n\t}\n\n\t// get the secret containing redis credentials\n\tpostgresCredSec := &corev1.Secret{}\n\terr = serverClient.Get(ctx, k8sclient.ObjectKey{Name: postgres.Status.SecretRef.Name, Namespace: postgres.Status.SecretRef.Namespace}, postgresCredSec)\n\tif err != nil {\n\t\treturn integreatlyv1alpha1.PhaseFailed, fmt.Errorf(\"failed to get postgres credential secret: %w\", err)\n\t}\n\n\t// create postgres external connection secret\n\tpostgresSecret := &corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: externalPostgresSecretName,\n\t\t\tNamespace: r.Config.GetNamespace(),\n\t\t},\n\t\tData: map[string][]byte{},\n\t}\n\t_, err = controllerutil.CreateOrUpdate(ctx, serverClient, postgresSecret, func() error {\n\t\tusername := postgresCredSec.Data[\"username\"]\n\t\tpassword := postgresCredSec.Data[\"password\"]\n\t\turl := fmt.Sprintf(\"postgresql://%s:%s@%s:%s/%s\", username, password, postgresCredSec.Data[\"host\"], postgresCredSec.Data[\"port\"], postgresCredSec.Data[\"database\"])\n\n\t\tpostgresSecret.Data[\"URL\"] = []byte(url)\n\t\tpostgresSecret.Data[\"DB_USER\"] = username\n\t\tpostgresSecret.Data[\"DB_PASSWORD\"] = password\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn integreatlyv1alpha1.PhaseFailed, fmt.Errorf(\"failed to create or update 3scale %s connection secret: %w\", externalPostgresSecretName, err)\n\t}\n\n\treturn integreatlyv1alpha1.PhaseCompleted, nil\n}", "func (ec *EtcdCustodian) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tec.logger.Info(\"Custodian controller reconciliation started\")\n\tetcd := &druidv1alpha1.Etcd{}\n\tif err := ec.Get(ctx, req.NamespacedName, etcd); err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Object not found, return. Created objects are automatically garbage collected.\n\t\t\t// For additional cleanup logic use finalizers.\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tlogger := ec.logger.WithValues(\"etcd\", kutil.Key(etcd.Namespace, etcd.Name).String())\n\n\t// TODO: (timuthy) remove this as it could block important health checks\n\tif etcd.Status.LastError != nil && *etcd.Status.LastError != \"\" {\n\t\tlogger.Info(fmt.Sprintf(\"Requeue item because of last error: %v\", *etcd.Status.LastError))\n\t\treturn ctrl.Result{\n\t\t\tRequeueAfter: 30 * time.Second,\n\t\t}, nil\n\t}\n\n\tselector, err := metav1.LabelSelectorAsSelector(etcd.Spec.Selector)\n\tif err != nil {\n\t\tlogger.Error(err, \"Error converting etcd selector to selector\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tstatusCheck := status.NewChecker(ec.Client, ec.config)\n\tif err := statusCheck.Check(ctx, logger, etcd); err != nil {\n\t\tlogger.Error(err, \"Error executing status checks\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\trefMgr := NewEtcdDruidRefManager(ec.Client, ec.Scheme, etcd, selector, etcdGVK, nil)\n\n\tstsList, err := refMgr.FetchStatefulSet(ctx, etcd)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Requeue if we found more than one or no StatefulSet.\n\t// The Etcd controller needs to decide what to do in such situations.\n\tif len(stsList.Items) != 1 {\n\t\tif err := ec.updateEtcdStatus(ctx, logger, etcd, nil); err != nil {\n\t\t\tlogger.Error(err, \"Error while updating ETCD status when no statefulset found\")\n\t\t}\n\t\treturn ctrl.Result{\n\t\t\tRequeueAfter: 5 * time.Second,\n\t\t}, nil\n\t}\n\n\tif err := ec.updateEtcdStatus(ctx, logger, etcd, &stsList.Items[0]); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\treturn ctrl.Result{RequeueAfter: ec.config.SyncPeriod}, nil\n}", "func measureStorageDevice(blkDevicePath string) error {\n\tlog.Printf(\"Storage Collector: Measuring block device %s\\n\", blkDevicePath)\n\tfile, err := os.Open(blkDevicePath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't open disk=%s err=%v\", blkDevicePath, err)\n\t}\n\n\teventDesc := fmt.Sprintf(\"Storage Collector: Measured %s\", blkDevicePath)\n\treturn tpm.ExtendPCRDebug(pcr, file, eventDesc)\n}", "func (p *HostClonePhase) Reconcile(ctx context.Context) (*reconcile.Result, error) {\n\tactualClaim := &corev1.PersistentVolumeClaim{}\n\texists, err := getResource(ctx, p.Client, p.Namespace, p.DesiredClaim.Name, actualClaim)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !exists {\n\t\tactualClaim, err = p.createClaim(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif !p.hostCloneComplete(actualClaim) {\n\t\t// requeue to update status\n\t\treturn &reconcile.Result{RequeueAfter: 3 * time.Second}, nil\n\t}\n\n\treturn nil, nil\n}", "func (s SetDefaultValues) Reconcile(r *FoundationDBClusterReconciler, context ctx.Context, cluster *fdbtypes.FoundationDBCluster) (bool, error) {\n\tchanged := false\n\tif cluster.Spec.RedundancyMode == \"\" {\n\t\tcluster.Spec.RedundancyMode = \"double\"\n\t\tchanged = true\n\t}\n\tif cluster.Spec.StorageEngine == \"\" {\n\t\tcluster.Spec.StorageEngine = \"ssd\"\n\t\tchanged = true\n\t}\n\tif cluster.Spec.UsableRegions == 0 {\n\t\tcluster.Spec.UsableRegions = 1\n\t\tchanged = true\n\t}\n\tif cluster.Spec.RunningVersion == \"\" {\n\t\tcluster.Spec.RunningVersion = cluster.Spec.Version\n\t\tchanged = true\n\t}\n\tif changed {\n\t\terr := r.Update(context, cluster)\n\t\treturn false, err\n\t}\n\treturn true, nil\n}", "func setupStorage(c *structs.Config) error {\n\tfmt.Println(\"setup storage...\")\n\t_, err := redis.Setup(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tlogger := r.Logger.WithValues(\"workspace\", req.NamespacedName)\n\trootCtx := context.Background()\n\tworkspace := &tenantv1alpha1.Workspace{}\n\tif err := r.Get(rootCtx, req.NamespacedName, workspace); err != nil {\n\t\treturn ctrl.Result{}, client.IgnoreNotFound(err)\n\t}\n\n\t// name of your custom finalizer\n\tfinalizer := \"finalizers.tenant.kubesphere.io\"\n\n\tif workspace.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\t// The object is not being deleted, so if it does not have our finalizer,\n\t\t// then lets add the finalizer and update the object.\n\t\tif !sliceutil.HasString(workspace.ObjectMeta.Finalizers, finalizer) {\n\t\t\tworkspace.ObjectMeta.Finalizers = append(workspace.ObjectMeta.Finalizers, finalizer)\n\t\t\tif err := r.Update(rootCtx, workspace); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t\tworkspaceOperation.WithLabelValues(\"create\", workspace.Name).Inc()\n\t\t}\n\t} else {\n\t\t// The object is being deleted\n\t\tif sliceutil.HasString(workspace.ObjectMeta.Finalizers, finalizer) {\n\t\t\t// remove our finalizer from the list and update it.\n\t\t\tworkspace.ObjectMeta.Finalizers = sliceutil.RemoveString(workspace.ObjectMeta.Finalizers, func(item string) bool {\n\t\t\t\treturn item == finalizer\n\t\t\t})\n\t\t\tlogger.V(4).Info(\"update workspace\")\n\t\t\tif err := r.Update(rootCtx, workspace); err != nil {\n\t\t\t\tlogger.Error(err, \"update workspace failed\")\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t\tworkspaceOperation.WithLabelValues(\"delete\", workspace.Name).Inc()\n\t\t}\n\t\t// Our finalizer has finished, so the reconciler can do nothing.\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tvar namespaces corev1.NamespaceList\n\tif err := r.List(rootCtx, &namespaces, client.MatchingLabels{tenantv1alpha1.WorkspaceLabel: req.Name}); err != nil {\n\t\tlogger.Error(err, \"list namespaces failed\")\n\t\treturn ctrl.Result{}, err\n\t} else {\n\t\tfor _, namespace := range namespaces.Items {\n\t\t\t// managed by kubefed-controller-manager\n\t\t\tkubefedManaged := namespace.Labels[constants.KubefedManagedLabel] == \"true\"\n\t\t\tif kubefedManaged {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// managed by workspace\n\t\t\tif err := r.bindWorkspace(rootCtx, logger, &namespace, workspace); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t}\n\t}\n\n\tr.Recorder.Event(workspace, corev1.EventTypeNormal, controllerutils.SuccessSynced, controllerutils.MessageResourceSynced)\n\treturn ctrl.Result{}, nil\n}", "func ensureStorageAccount(client *azureclients.AzureClientWrapper, storageAccountName, resourceGroupName, region string, resourceTags map[string]string) error {\n\tvar existingStorageAccount *armstorage.Account\n\tlistAccounts := client.StorageAccountClient.NewListByResourceGroupPager(resourceGroupName, &armstorage.AccountsClientListByResourceGroupOptions{})\n\tfor listAccounts.More() {\n\t\tpageResponse, err := listAccounts.NextPage(context.Background())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, storageAccount := range pageResponse.AccountListResult.Value {\n\t\t\tif *storageAccount.Name != storageAccountName {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\texistingStorageAccount = storageAccount\n\t\t\tbreak\n\t\t}\n\t\tif existingStorageAccount != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tneedToCreateStorageAccount := existingStorageAccount == nil\n\tmergedResourceTags := map[string]*string{}\n\tif !needToCreateStorageAccount {\n\t\tmergedResourceTags = existingStorageAccount.Tags\n\t}\n\tmergedResourceTags, needToUpdateStorageAccount := mergeResourceTags(resourceTags, mergedResourceTags)\n\n\tif !needToCreateStorageAccount && !needToUpdateStorageAccount {\n\t\tlog.Printf(\"Found existing storage account %s\", *existingStorageAccount.ID)\n\t\treturn nil\n\t}\n\n\t// Create storage account\n\tif needToCreateStorageAccount {\n\t\tpollerResp, err := client.StorageAccountClient.BeginCreate(\n\t\t\tcontext.TODO(),\n\t\t\tresourceGroupName,\n\t\t\tstorageAccountName,\n\t\t\tarmstorage.AccountCreateParameters{\n\t\t\t\tKind: to.Ptr(armstorage.KindStorageV2),\n\t\t\t\tSKU: &armstorage.SKU{\n\t\t\t\t\tName: to.Ptr(armstorage.SKUNameStandardLRS),\n\t\t\t\t},\n\t\t\t\tLocation: to.Ptr(region),\n\t\t\t\tTags: mergedResourceTags,\n\t\t\t},\n\t\t\t&armstorage.AccountsClientBeginCreateOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpollerWrapper := azureclients.NewPollerWrapper[armstorage.AccountsClientCreateResponse](\n\t\t\tpollerResp,\n\t\t\t// When client.Mock = true the client.MockStorageClientBeginCreateResp will be returned\n\t\t\t// from PollerWrapper.PollUntilDone(). These fields are set in tests.\n\t\t\tclient.Mock,\n\t\t\tclient.MockStorageClientBeginCreateResp,\n\t\t)\n\t\t// PollUntilDone with frequency of every 10 seconds.\n\t\tresp, err := pollerWrapper.PollUntilDone(context.Background(), &runtime.PollUntilDoneOptions{Frequency: 10 * time.Second})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Printf(\"Created storage account %s\", *resp.Account.ID)\n\t\treturn nil\n\t}\n\n\t// Update storage account\n\tupdateResp, err := client.StorageAccountClient.Update(context.TODO(),\n\t\tresourceGroupName,\n\t\tstorageAccountName,\n\t\tarmstorage.AccountUpdateParameters{\n\t\t\tTags: mergedResourceTags,\n\t\t},\n\t\t&armstorage.AccountsClientUpdateOptions{},\n\t)\n\tlog.Printf(\"Updated storage account %s\", *updateResp.Account.ID)\n\treturn err\n}", "func (m *manager) migrateStorageAccounts(ctx context.Context) error {\n\tresourceGroup := stringutils.LastTokenByte(m.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')\n\tif len(m.doc.OpenShiftCluster.Properties.WorkerProfiles) == 0 {\n\t\tm.log.Error(\"skipping migrateStorageAccounts due to missing WorkerProfiles.\")\n\t\treturn nil\n\t}\n\tclusterStorageAccountName := \"cluster\" + m.doc.OpenShiftCluster.Properties.StorageSuffix\n\tregistryStorageAccountName := m.doc.OpenShiftCluster.Properties.ImageRegistryStorageAccountName\n\n\tt := &arm.Template{\n\t\tSchema: \"https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#\",\n\t\tContentVersion: \"1.0.0.0\",\n\t\tResources: []*arm.Resource{\n\t\t\tm.storageAccount(clusterStorageAccountName, m.doc.OpenShiftCluster.Location, false),\n\t\t\tm.storageAccount(registryStorageAccountName, m.doc.OpenShiftCluster.Location, false),\n\t\t},\n\t}\n\n\treturn arm.DeployTemplate(ctx, m.log, m.deployments, resourceGroup, \"storage\", t, nil)\n}", "func (o *StorageDeleteOptions) Validate() (err error) {\n\tif o.isDevfile {\n\t\treturn\n\t}\n\n\texists := o.LocalConfigInfo.StorageExists(o.storageName)\n\tif !exists {\n\t\treturn fmt.Errorf(\"the storage %v does not exists in the application %v, cause %v\", o.storageName, o.Application, err)\n\t}\n\n\treturn\n}", "func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {\n\tlog := logf.FromContext(ctx)\n\n\tgardenCtx, cancel := controllerutils.GetMainReconciliationContext(ctx, controllerutils.DefaultReconciliationTimeout)\n\tdefer cancel()\n\tseedCtx, cancel := controllerutils.GetChildReconciliationContext(ctx, controllerutils.DefaultReconciliationTimeout)\n\tdefer cancel()\n\n\tcontrollerInstallation := &gardencorev1beta1.ControllerInstallation{}\n\tif err := r.GardenClient.Get(gardenCtx, request.NamespacedName, controllerInstallation); err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\tlog.V(1).Info(\"Object is gone, stop reconciling\")\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\treturn reconcile.Result{}, fmt.Errorf(\"error retrieving object from store: %w\", err)\n\t}\n\n\tif controllerInstallation.DeletionTimestamp != nil {\n\t\treturn r.delete(gardenCtx, seedCtx, log, controllerInstallation)\n\t}\n\treturn r.reconcile(gardenCtx, seedCtx, log, controllerInstallation)\n}", "func (r *ChartGroupReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {\n\treclog := acglog.WithValues(\"namespace\", request.Namespace, \"acg\", request.Name)\n\treclog.Info(\"Reconciling\")\n\n\tinstance := &av1.ArmadaChartGroup{}\n\tinstance.SetNamespace(request.Namespace)\n\tinstance.SetName(request.Name)\n\n\terr := r.client.Get(context.TODO(), request.NamespacedName, instance)\n\n\tif apierrors.IsNotFound(err) {\n\t\t// We are working asynchronously. By the time we receive the event,\n\t\t// the object is already gone\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\tif err != nil {\n\t\treclog.Error(err, \"Failed to lookup ArmadaChartGroup\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tinstance.Init()\n\tmgr := r.managerFactory.NewArmadaChartGroupManager(instance)\n\treclog = reclog.WithValues(\"acg\", mgr.ResourceName())\n\n\tvar shouldRequeue bool\n\tif shouldRequeue, err = r.updateFinalizers(instance); shouldRequeue {\n\t\t// Need to requeue because finalizer update does not change metadata.generation\n\t\treturn reconcile.Result{Requeue: true}, err\n\t}\n\n\tif err := r.ensureSynced(mgr, instance); err != nil {\n\t\tif !instance.IsDeleted() {\n\t\t\t// TODO(jeb): Changed the behavior to stop only if we are not\n\t\t\t// in a delete phase.\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\n\tif instance.IsDeleted() {\n\t\tif shouldRequeue, err = r.deleteArmadaChartGroup(mgr, instance); shouldRequeue {\n\t\t\t// Need to requeue because finalizer update does not change metadata.generation\n\t\t\treturn reconcile.Result{Requeue: true}, err\n\t\t}\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tif instance.IsTargetStateUninitialized() {\n\t\treclog.Info(\"TargetState uninitialized; skipping\")\n\t\terr = r.updateResource(instance)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\terr = r.client.Status().Update(context.TODO(), instance)\n\t\treturn reconcile.Result{}, err\n\t}\n\n\thrc := av1.HelmResourceCondition{\n\t\tType: av1.ConditionInitialized,\n\t\tStatus: av1.ConditionStatusTrue,\n\t}\n\tinstance.Status.SetCondition(hrc, instance.Spec.TargetState)\n\n\tswitch {\n\tcase !mgr.IsInstalled():\n\t\tif shouldRequeue, err = r.installArmadaChartGroup(mgr, instance); shouldRequeue {\n\t\t\t// we updated the ownership of the charts. Let's wake up\n\t\t\t// one more time later to enable the first chart.\n\t\t\treturn reconcile.Result{RequeueAfter: r.reconcilePeriod}, err\n\t\t}\n\t\treturn reconcile.Result{}, err\n\tcase mgr.IsUpdateRequired():\n\t\tif shouldRequeue, err = r.updateArmadaChartGroup(mgr, instance); shouldRequeue {\n\t\t\treturn reconcile.Result{RequeueAfter: r.reconcilePeriod}, err\n\t\t}\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tforcedRequeue, err := r.reconcileArmadaChartGroup(mgr, instance)\n\tif err != nil {\n\t\t// Let's don't force a requeue.\n\t\treturn reconcile.Result{}, err\n\t}\n\tif forcedRequeue {\n\t\t// We have been waked up out of order ?\n\t\treturn reconcile.Result{RequeueAfter: r.reconcilePeriod}, nil\n\t}\n\n\treclog.Info(\"Reconciled ChartGroup\")\n\tif err = r.updateResourceStatus(instance); err != nil {\n\t\treturn reconcile.Result{Requeue: true}, err\n\t}\n\treturn reconcile.Result{}, nil\n}", "func (r *FruitsCatalogGReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\t//ctx := context.Background()\n\treqLogger := r.Log.WithValues(\"fruitscatalogg\", req.NamespacedName)\n\n\t// your logic here\n\treqLogger.Info(\"Starting reconcile loop for \" + req.NamespacedName.Name)\n\t// Fetch the FruitsCatalogG instance of this reconcile request.\n\tinstance := &redhatcomv1alpha1.FruitsCatalogG{}\n\terr := r.Get(context.TODO(), req.NamespacedName, instance)\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue.\n\t\t\treqLogger.Info(\"FruitsCatalogG resource not found. Ignoring since object must be deleted.\")\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treqLogger.Error(err, \"Failed to get FruitsCatalogG.\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Check if the FruitsCatalogG instance is marked to be deleted, which is\n\t// indicated by the deletion timestamp being set.\n\tisFruitsCatalogGMarkedToBeDeleted := instance.GetDeletionTimestamp() != nil\n\tif isFruitsCatalogGMarkedToBeDeleted {\n\t\tif contains(instance.GetFinalizers(), \"finalizer.fruitscatalogg.redhat.com\") {\n\t\t\t// Run finalization logic for finalizer. If the\n\t\t\t// finalization logic fails, don't remove the finalizer so\n\t\t\t// that we can retry during the next reconciliation.\n\t\t\tif err := r.finalizeFruitsCatalogG(reqLogger, instance); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\n\t\t\t// Remove finalizer. Once all finalizers have been\n\t\t\t// removed, the object will be deleted.\n\t\t\tcontrollerutil.RemoveFinalizer(instance, \"finalizer.fruitscatalogg.redhat.com\")\n\t\t\terr := r.Update(context.TODO(), instance)\n\t\t\tif err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t}\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// Ensure all required resources are up-to-date.\n\tinstallMongodb := instance.Spec.MongoDB.Install\n\tif installMongodb {\n\t\t// Deal with MongoDB connection credentials.\n\t\tmongodbSecret := deployment.CreateSecretForMongoDB(&instance.Spec, instance.Namespace)\n\t\tcontrollerutil.SetControllerReference(instance, mongodbSecret, r.Scheme)\n\t\tif err := r.Client.Create(context.TODO(), mongodbSecret); err != nil && !errors.IsAlreadyExists(err) {\n\t\t\treqLogger.Error(err, \"Error while creating \"+mongodbSecret.Name+\" Secret\")\n\t\t\treturn ctrl.Result{}, err\n\t\t} else if err == nil {\n\t\t\treqLogger.Info(\"Create \" + mongodbSecret.Name + \" Secret for MongoDB connection details\")\n\t\t\tinstance.Status.Secret = mongodbSecret.Name + \" is holding connection details to MongoDB\"\n\t\t}\n\n\t\t// Deal with MongoDB persistentn volume.\n\t\tif instance.Spec.MongoDB.Persistent {\n\t\t\tmongodbPVC := deployment.CreatePersistentVolumeClaimMongoDB(&instance.Spec, instance.Namespace)\n\t\t\tcontrollerutil.SetControllerReference(instance, mongodbPVC, r.Scheme)\n\t\t\tif err := r.Client.Create(context.TODO(), mongodbPVC); err != nil && !errors.IsAlreadyExists(err) {\n\t\t\t\treqLogger.Error(err, \"Error while creating \"+mongodbPVC.Name+\" PVC\")\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t} else if err == nil {\n\t\t\t\treqLogger.Info(\"Apply \" + mongodbPVC.Name + \" PVC for MongoDB\")\n\t\t\t}\n\t\t}\n\n\t\t// Deal with MongoDB deployment and service.\n\t\tmongodbDeployment := deployment.CreateDeploymentForMongoDB(&instance.Spec, instance.Namespace)\n\t\tcontrollerutil.SetControllerReference(instance, mongodbDeployment, r.Scheme)\n\t\tif err := r.Client.Create(context.TODO(), mongodbDeployment); err != nil && !errors.IsAlreadyExists(err) {\n\t\t\treqLogger.Error(err, \"Error while creating \"+mongodbDeployment.Name+\" Deployment\")\n\t\t\treturn ctrl.Result{}, err\n\t\t} else if err == nil {\n\t\t\treqLogger.Info(\"Apply \" + mongodbDeployment.Name + \" Deployment for MongoDB\")\n\t\t\tinstance.Status.MongoDB = mongodbDeployment.Name + \" is the Deployment for MongoDB\"\n\t\t}\n\t\tmongodbService := deployment.CreateServiceForMongoDB(&instance.Spec, instance.Namespace)\n\t\tcontrollerutil.SetControllerReference(instance, mongodbService, r.Scheme)\n\t\tif err := r.Client.Create(context.TODO(), mongodbService); err != nil && !errors.IsAlreadyExists(err) {\n\t\t\treqLogger.Error(err, \"Error while creating \"+mongodbService.Name+\" Service\")\n\t\t\treturn ctrl.Result{}, err\n\t\t} else if err == nil {\n\t\t\treqLogger.Info(\"Apply \" + mongodbService.Name + \" Service for MongoDB\")\n\t\t}\n\t}\n\n\t// Deal with WebApp deployment and service.\n\twebappDeployment := deployment.CreateDeploymentForWebapp(&instance.Spec, instance.Namespace)\n\tcontrollerutil.SetControllerReference(instance, webappDeployment, r.Scheme)\n\tif err := r.Client.Create(context.TODO(), webappDeployment); err != nil && !errors.IsAlreadyExists(err) {\n\t\treqLogger.Error(err, \"Error while creating \"+webappDeployment.Name+\" Deployment\")\n\t\treturn ctrl.Result{}, err\n\t} else if err != nil && errors.IsAlreadyExists(err) {\n\t\t// Check if we got the correct number of replicas.\n\t\tif *webappDeployment.Spec.Replicas != instance.Spec.WebApp.ReplicaCount {\n\t\t\twebappDeployment.Spec.Replicas = &instance.Spec.WebApp.ReplicaCount\n\t\t\tif err2 := r.Client.Update(context.TODO(), webappDeployment); err2 != nil {\n\t\t\t\treqLogger.Error(err2, \"Error while updating replicas in \"+webappDeployment.Name+\" Deployment\")\n\t\t\t\treturn ctrl.Result{}, err2\n\t\t\t} else if err2 == nil {\n\t\t\t\treqLogger.Info(\"Update replicas \" + webappDeployment.Name + \" Deployment for WebApp\")\n\t\t\t\tinstance.Status.WebApp = webappDeployment.Name + \" is the Deployment for WebApp\"\n\t\t\t}\n\t\t}\n\t} else if err == nil {\n\t\treqLogger.Info(\"Apply \" + webappDeployment.Name + \" Deployment for WebApp\")\n\t\tinstance.Status.WebApp = webappDeployment.Name + \" is the Deployment for WebApp\"\n\t}\n\twebappService := deployment.CreateServiceForWebapp(&instance.Spec, instance.Namespace)\n\tcontrollerutil.SetControllerReference(instance, webappService, r.Scheme)\n\tif err := r.Client.Create(context.TODO(), webappService); err != nil && !errors.IsAlreadyExists(err) {\n\t\treqLogger.Error(err, \"Error while creating \"+webappService.Name+\" Service\")\n\t\treturn ctrl.Result{}, err\n\t} else if err == nil {\n\t\treqLogger.Info(\"Apply \" + webappService.Name + \" Service for WebApp\")\n\t}\n\n\t// Finally check if we're on OpenShift of Vanilla Kubernetes.\n\tisOpenShift := false\n\n\t// The discovery package is used to discover APIs supported by a Kubernetes API server.\n\tconfig, err := ctrl.GetConfig()\n\tif err == nil && config != nil {\n\t\tdclient, err := getDiscoveryClient(config)\n\t\tif err == nil && dclient != nil {\n\t\t\tapiGroupList, err := dclient.ServerGroups()\n\t\t\tif err != nil {\n\t\t\t\treqLogger.Info(\"Error while querying ServerGroups, assuming we're on Vanilla Kubernetes\")\n\t\t\t} else {\n\t\t\t\tfor i := 0; i < len(apiGroupList.Groups); i++ {\n\t\t\t\t\tif strings.HasSuffix(apiGroupList.Groups[i].Name, \".openshift.io\") {\n\t\t\t\t\t\tisOpenShift = true\n\t\t\t\t\t\treqLogger.Info(\"We detected being on OpenShift! Wouhou!\")\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\treqLogger.Info(\"Cannot retrieve a DiscoveryClient, assuming we're on Vanilla Kubernetes\")\n\t\t}\n\t}\n\t// Create a Route if we're on OpenShift ;-)\n\tif isOpenShift {\n\t\tappRoute := deployment.CreateRouteForWebapp(&instance.Spec, instance.Namespace)\n\t\tcontrollerutil.SetControllerReference(instance, appRoute, r.Scheme)\n\t\tif err := r.Client.Create(context.TODO(), appRoute); err != nil && !errors.IsAlreadyExists(err) {\n\t\t\treqLogger.Error(err, \"Error while creating \"+appRoute.Name+\" Route\")\n\t\t\treturn ctrl.Result{}, err\n\t\t} else if err == nil {\n\t\t\treqLogger.Info(\"Apply \" + appRoute.Name + \" Route for WebApp\")\n\t\t}\n\n\t\t// Maybe on next reconciliation loop ?\n\t\treqLogger.Info(\"Looking for route \" + appRoute.ObjectMeta.Name + \" on namespace \" + instance.Namespace)\n\t\terr = r.Client.Get(context.TODO(), types.NamespacedName{Name: appRoute.ObjectMeta.Name, Namespace: instance.Namespace}, appRoute)\n\t\tif err == nil {\n\t\t\tinstance.Status.Route = appRoute.Status.Ingress[0].Host\n\t\t} else {\n\t\t\treqLogger.Error(err, \"Error while reading Route for getting its Status.Ingress[0].Host field\")\n\t\t}\n\t}\n\n\t// Updating the Status that is modeled as a subresource. This way we can update\n\t// the status of our resources without increasing the ResourceGeneration metadata field.\n\tr.Status().Update(context.Background(), instance)\n\n\t// end of logic block\n\n\treturn ctrl.Result{}, nil\n}", "func (r *Reconciler) reconcileDelete(ctx context.Context, extensionConfig *runtimev1.ExtensionConfig) (ctrl.Result, error) {\n\tlog := ctrl.LoggerFrom(ctx)\n\tlog.Info(\"Unregistering ExtensionConfig information from registry\")\n\tif err := r.RuntimeClient.Unregister(extensionConfig); err != nil {\n\t\treturn ctrl.Result{}, errors.Wrapf(err, \"failed to unregister %s\", tlog.KObj{Obj: extensionConfig})\n\t}\n\treturn ctrl.Result{}, nil\n}", "func (r *ProvisioningReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\t// provisioning.metal3.io is a singleton\n\t// Note: this check is here to make sure that the early startup configuration\n\t// is correct. For day 2 operatations the webhook will validate this.\n\tif req.Name != metal3iov1alpha1.ProvisioningSingletonName {\n\t\tklog.Info(\"ignoring invalid CR\", \"name\", req.Name)\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// Make sure ClusterOperator exists\n\terr := r.ensureClusterOperator(nil)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tresult := ctrl.Result{}\n\tif !r.WebHookEnabled {\n\t\tif provisioning.WebhookDependenciesReady(r.OSClient) {\n\t\t\tklog.Info(\"restarting to enable the webhook\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\t// Keep checking for our webhook dependencies to be ready, so we can\n\t\t// enable the webhook.\n\t\tresult.RequeueAfter = 5 * time.Minute\n\t}\n\n\tenabled, err := r.isEnabled()\n\tif err != nil {\n\t\treturn ctrl.Result{}, errors.Wrap(err, \"could not determine whether to run\")\n\t}\n\tif !enabled {\n\t\t// set ClusterOperator status to disabled=true, available=true\n\t\t// We're disabled; don't requeue\n\t\treturn ctrl.Result{}, errors.Wrapf(\n\t\t\tr.updateCOStatus(ReasonUnsupported, \"Nothing to do on this Platform\", \"\"),\n\t\t\t\"unable to put %q ClusterOperator in Disabled state\", clusterOperatorName)\n\t}\n\n\tbaremetalConfig, err := r.readProvisioningCR(ctx)\n\tif err != nil {\n\t\t// Error reading the object - requeue the request.\n\t\treturn ctrl.Result{}, err\n\t}\n\tif baremetalConfig == nil {\n\t\t// Provisioning configuration not available at this time.\n\t\t// Cannot proceed wtih metal3 deployment.\n\t\tklog.Info(\"Provisioning CR not found\")\n\t\treturn result, nil\n\t}\n\n\t// Make sure ClusterOperator's ownership is updated\n\terr = r.ensureClusterOperator(baremetalConfig)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Read container images from Config Map\n\tvar containerImages provisioning.Images\n\tif err := provisioning.GetContainerImages(&containerImages, r.ImagesFilename); err != nil {\n\t\t// Images config map is not valid\n\t\t// Provisioning configuration is not valid.\n\t\t// Requeue request.\n\t\tklog.ErrorS(err, \"invalid contents in images Config Map\")\n\t\tco_err := r.updateCOStatus(ReasonInvalidConfiguration, err.Error(), \"invalid contents in images Config Map\")\n\t\tif co_err != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to put %q ClusterOperator in Degraded state: %w\", clusterOperatorName, co_err)\n\t\t}\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Get cluster-wide proxy information\n\tclusterWideProxy, err := r.OSClient.ConfigV1().Proxies().Get(context.Background(), \"cluster\", metav1.GetOptions{})\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tinfo := r.provisioningInfo(baremetalConfig, &containerImages, clusterWideProxy)\n\n\t// Check if Provisioning Configuartion is being deleted\n\tdeleted, err := r.checkForCRDeletion(ctx, info)\n\tif err != nil {\n\t\tvar coErr error\n\t\tif deleted {\n\t\t\tcoErr = r.updateCOStatus(ReasonDeployTimedOut, err.Error(), \"Unable to delete a metal3 resource on Provisioning CR deletion\")\n\t\t} else {\n\t\t\tcoErr = r.updateCOStatus(ReasonInvalidConfiguration, err.Error(), \"Unable to add Finalizer on Provisioning CR\")\n\t\t}\n\t\tif coErr != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to put %q ClusterOperator in Degraded state: %w\", clusterOperatorName, coErr)\n\t\t}\n\t\treturn ctrl.Result{}, err\n\t}\n\tif deleted {\n\t\treturn result, errors.Wrapf(\n\t\t\tr.updateCOStatus(ReasonComplete, \"all Metal3 resources deleted\", \"\"),\n\t\t\t\"unable to put %q ClusterOperator in Available state\", clusterOperatorName)\n\t}\n\n\tspecChanged := baremetalConfig.Generation != baremetalConfig.Status.ObservedGeneration\n\tif specChanged {\n\t\terr = r.updateCOStatus(ReasonSyncing, \"\", \"Applying metal3 resources\")\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to put %q ClusterOperator in Syncing state: %w\", clusterOperatorName, err)\n\t\t}\n\t}\n\n\tif !r.WebHookEnabled {\n\t\t// Check if provisioning configuration is valid\n\t\tif err := baremetalConfig.ValidateBaremetalProvisioningConfig(); err != nil {\n\t\t\t// Provisioning configuration is not valid.\n\t\t\t// Requeue request.\n\t\t\tklog.Error(err, \"invalid config in Provisioning CR\")\n\t\t\terr = r.updateCOStatus(ReasonInvalidConfiguration, err.Error(), \"Unable to apply Provisioning CR: invalid configuration\")\n\t\t\tif err != nil {\n\t\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to put %q ClusterOperator in Degraded state: %v\", clusterOperatorName, err)\n\t\t\t}\n\t\t\t// Temporarily not requeuing request\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t}\n\n\t//Create Secrets needed for Metal3 deployment\n\tif err := provisioning.CreateAllSecrets(r.KubeClient.CoreV1(), ComponentNamespace, baremetalConfig, r.Scheme); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Check Metal3 Deployment already exists and managed by MAO.\n\tmetal3DeploymentSelector, maoOwned, err := provisioning.CheckExistingMetal3Deployment(r.KubeClient.AppsV1(), ComponentNamespace)\n\tinfo.PodLabelSelector = metal3DeploymentSelector\n\tif err != nil && !apierrors.IsNotFound(err) {\n\t\treturn ctrl.Result{}, errors.Wrap(err, \"failed to check for existing Metal3 Deployment\")\n\t}\n\n\tif maoOwned {\n\t\tklog.Info(\"Adding annotation for CBO to take ownership of metal3 deployment created by MAO\")\n\t}\n\n\tfor _, ensureResource := range []ensureFunc{\n\t\tprovisioning.EnsureMetal3Deployment,\n\t\tprovisioning.EnsureMetal3StateService,\n\t\tprovisioning.EnsureImageCache,\n\t} {\n\t\tupdated, err := ensureResource(info)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tif updated {\n\t\t\treturn result, r.Client.Status().Update(ctx, baremetalConfig)\n\t\t}\n\t}\n\n\tif specChanged {\n\t\tbaremetalConfig.Status.ObservedGeneration = baremetalConfig.Generation\n\t\terr = r.Client.Status().Update(ctx, baremetalConfig)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to update observed generation: %w\", err)\n\t\t}\n\t}\n\n\t// Determine the status of the deployment\n\tdeploymentState, err := provisioning.GetDeploymentState(r.KubeClient.AppsV1(), ComponentNamespace, baremetalConfig)\n\tif err != nil {\n\t\terr = r.updateCOStatus(ReasonNotFound, \"metal3 deployment inaccessible\", \"\")\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to put %q ClusterOperator in Degraded state: %w\", clusterOperatorName, err)\n\t\t}\n\t\treturn ctrl.Result{}, errors.Wrap(err, \"failed to determine state of metal3 deployment\")\n\t}\n\tif deploymentState == appsv1.DeploymentReplicaFailure {\n\t\terr = r.updateCOStatus(ReasonDeployTimedOut, \"metal3 deployment rollout taking too long\", \"\")\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to put %q ClusterOperator in Degraded state: %w\", clusterOperatorName, err)\n\t\t}\n\t}\n\n\t// Determine the status of the DaemonSet\n\tdaemonSetState, err := provisioning.GetDaemonSetState(r.KubeClient.AppsV1(), ComponentNamespace, baremetalConfig)\n\tif err != nil {\n\t\terr = r.updateCOStatus(ReasonNotFound, \"metal3 image cache daemonset inaccessible\", \"\")\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to put %q ClusterOperator in Degraded state: %w\", clusterOperatorName, err)\n\t\t}\n\t\treturn ctrl.Result{}, errors.Wrap(err, \"failed to determine state of metal3 image cache daemonset\")\n\t}\n\tif daemonSetState == provisioning.DaemonSetReplicaFailure {\n\t\terr = r.updateCOStatus(ReasonDeployTimedOut, \"metal3 image cache rollout taking too long\", \"\")\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to put %q ClusterOperator in Degraded state: %w\", clusterOperatorName, err)\n\t\t}\n\t}\n\tif deploymentState == appsv1.DeploymentAvailable && daemonSetState == provisioning.DaemonSetAvailable {\n\t\terr = r.updateCOStatus(ReasonComplete, \"metal3 pod and image cache are running\", \"\")\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to put %q ClusterOperator in Progressing state: %w\", clusterOperatorName, err)\n\t\t}\n\t}\n\n\treturn result, nil\n}", "func (r *ResticDFCommandExecutor) ExecuteStorageCommands(podRef *corev1.Pod, persistentVolumes []MigAnalyticPersistentVolumeDetails) (DF, DU) {\n\t// TODO: use the appropriate block size based on PVCs\n\tstorageCommand := StorageCommand{\n\t\tBaseLocation: \"/host_pods\",\n\t\tBlockSize: DecimalSIMega,\n\t\tStdOut: \"\",\n\t\tStdErr: \"\",\n\t}\n\tdfCmd := DF{StorageCommand: storageCommand}\n\tduCmd := DU{StorageCommand: storageCommand}\n\tdfCmdString := dfCmd.PrepareCommand(persistentVolumes)\n\tduCmdString := duCmd.PrepareCommand(persistentVolumes)\n\trestCfg := r.Client.RestConfig()\n\tpodDfCommand := pods.PodCommand{\n\t\tPod: podRef,\n\t\tRestCfg: restCfg,\n\t\tArgs: dfCmdString,\n\t}\n\tpodDuCommand := pods.PodCommand{\n\t\tPod: podRef,\n\t\tRestCfg: restCfg,\n\t\tArgs: duCmdString,\n\t}\n\tlog.Info(\"Executing df command inside source cluster Restic Pod to measure actual usage for extended PV analysis\",\n\t\t\"pod\", path.Join(podRef.Namespace, podRef.Name),\n\t\t\"command\", dfCmdString)\n\terr := podDfCommand.Run()\n\tif err != nil {\n\t\tlog.Error(err, \"Failed running df command inside Restic Pod\",\n\t\t\t\"pod\", path.Join(podRef.Namespace, podRef.Name),\n\t\t\t\"command\", dfCmdString)\n\t}\n\tdfCmd.StdErr = podDfCommand.Err.String()\n\tdfCmd.StdOut = podDfCommand.Out.String()\n\tlog.Info(\"Executing du command inside source cluster Restic Pod to measure actual usage for extended PV analysis\",\n\t\t\"pod\", path.Join(podRef.Namespace, podRef.Name),\n\t\t\"command\", duCmdString)\n\terr = podDuCommand.Run()\n\tif err != nil {\n\t\tlog.Error(err, \"Failed running du command inside Restic Pod\",\n\t\t\t\"pod\", path.Join(podRef.Namespace, podRef.Name),\n\t\t\t\"command\", duCmdString)\n\t}\n\tduCmd.StdErr = podDuCommand.Err.String()\n\tduCmd.StdOut = podDuCommand.Out.String()\n\treturn dfCmd, duCmd\n}", "func (r *GlotpodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\t_ = log.FromContext(ctx)\n\n\t// your logic here\n\tpod := &goglotdevv1alpha1.Glotpod{}\n\terr := r.Get(ctx, types.NamespacedName{Name: req.Name, Namespace: req.Namespace}, pod)\n\tif err != nil {\n\t\treturn ctrl.Result{}, nil\n\t}\n\terr = r.createJob(ctx, pod)\n\tif err != nil {\n\t\tfmt.Println(\"loldu \", err.Error())\n\t\treturn ctrl.Result{}, err\n\t}\n\treturn ctrl.Result{}, nil\n}", "func (r *VolumeReplicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\n\t_ = r.Log.WithValues(\"controller_volumereplication\", req.NamespacedName)\n\n\t// Fetch VolumeReplication instance\n\tinstance := &replicationv1alpha1.VolumeReplication{}\n\terr := r.Client.Get(context.TODO(), req.NamespacedName, instance)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\tr.Log.Info(\"no VolumeReplication resource found\")\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Get VolumeReplicationClass\n\tvrcObj, err := r.getVolumeReplicaCLass(instance.Spec.VolumeReplicationClass)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif r.DriverConfig.DriverName != vrcObj.Spec.Provisioner {\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tvar volumeHandle string\n\tnameSpacedName := types.NamespacedName{Name: instance.Spec.DataSource.Name, Namespace: req.Namespace}\n\tswitch instance.Spec.DataSource.Kind {\n\tcase pvcDataSource:\n\t\t_, pv, err := r.getPVCDataSource(nameSpacedName)\n\t\tif err != nil {\n\t\t\tr.Log.Error(err, \"failed to get dataSource for PVC\", \"dataSourceName\", nameSpacedName.Name)\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tvolumeHandle = pv.Spec.CSI.VolumeHandle\n\tdefault:\n\t\treturn ctrl.Result{}, fmt.Errorf(\"unsupported datasource kind %q\", instance.Spec.DataSource.Kind)\n\t}\n\n\tr.Log.Info(\"volume handle\", volumeHandle)\n\n\tif instance.Spec.ImageState == replicationv1alpha1.Secondary {\n\t\tfailedTask, err := markVolumeAsSecondary()\n\t\tif err != nil {\n\t\t\tr.Log.Error(err, \"task failed\", \"taskName\", failedTask)\n\t\t}\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (obj *ocsCephFilesystems) ensureDeleted(r *StorageClusterReconciler, sc *ocsv1.StorageCluster) (reconcile.Result, error) {\n\tfoundCephFilesystem := &cephv1.CephFilesystem{}\n\tcephFilesystems, err := r.newCephFilesystemInstances(sc)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tfor _, cephFilesystem := range cephFilesystems {\n\t\terr := r.Client.Get(context.TODO(), types.NamespacedName{Name: cephFilesystem.Name, Namespace: sc.Namespace}, foundCephFilesystem)\n\t\tif err != nil {\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\tr.Log.Info(\"Uninstall: CephFileSystem not found.\", \"CephFileSystem\", klog.KRef(cephFilesystem.Namespace, cephFilesystem.Name))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tr.Log.Error(err, \"Uninstall: Unable to retrieve CephFileSystem.\", \"CephFileSystem\", klog.KRef(cephFilesystem.Namespace, cephFilesystem.Name))\n\t\t\treturn reconcile.Result{}, fmt.Errorf(\"uninstall: Unable to retrieve CephFileSystem %v: %v\", cephFilesystem.Name, err)\n\t\t}\n\n\t\tif cephFilesystem.GetDeletionTimestamp().IsZero() {\n\t\t\tr.Log.Info(\"Uninstall: Deleting cephFilesystem.\", \"CephFileSystem\", klog.KRef(foundCephFilesystem.Namespace, foundCephFilesystem.Name))\n\t\t\terr = r.Client.Delete(context.TODO(), foundCephFilesystem)\n\t\t\tif err != nil {\n\t\t\t\tr.Log.Error(err, \"Uninstall: Failed to delete CephFileSystem.\", \"CephFileSystem\", klog.KRef(foundCephFilesystem.Namespace, foundCephFilesystem.Name))\n\t\t\t\treturn reconcile.Result{}, fmt.Errorf(\"uninstall: Failed to delete CephFileSystem %v: %v\", foundCephFilesystem.Name, err)\n\t\t\t}\n\t\t}\n\n\t\terr = r.Client.Get(context.TODO(), types.NamespacedName{Name: cephFilesystem.Name, Namespace: sc.Namespace}, foundCephFilesystem)\n\t\tif err != nil {\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\tr.Log.Info(\"Uninstall: CephFilesystem is deleted.\", \"CephFileSystem\", klog.KRef(cephFilesystem.Namespace, cephFilesystem.Name))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tr.Log.Error(err, \"Uninstall: Waiting for CephFileSystem to be deleted.\", \"CephFileSystem\", klog.KRef(cephFilesystem.Namespace, cephFilesystem.Name))\n\t\treturn reconcile.Result{}, fmt.Errorf(\"uninstall: Waiting for CephFileSystem %v to be deleted\", cephFilesystem.Name)\n\n\t}\n\treturn reconcile.Result{}, nil\n}", "func (r *DaemonReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {\n\terr := r.cleanupOldObjects(ctx, request.Namespace)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tlvSets, lvs, tolerations, ownerRefs, nodeSelector, err := r.aggregateDeamonInfo(ctx, request)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\tif len(lvSets.Items) < 1 && len(lvs.Items) < 1 {\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tconfigMap, opResult, err := r.reconcileProvisionerConfigMap(ctx, request, lvSets.Items, lvs.Items, ownerRefs)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t} else if opResult == controllerutil.OperationResultUpdated || opResult == controllerutil.OperationResultCreated {\n\t\tklog.InfoS(\"provisioner configmap\", \"configMap\", configMap.GetName(), \"result\", opResult)\n\t}\n\n\t// enable service and servicemonitor for diskmaker daemonset\n\tmetricsExportor := localmetrics.NewExporter(ctx, r.Client, common.DiskMakerServiceName, request.Namespace, common.DiskMakerMetricsServingCert,\n\t\townerRefs, DiskMakerName)\n\tif err := metricsExportor.EnableMetricsExporter(); err != nil {\n\t\tklog.ErrorS(err, \"failed to create service and servicemonitors for diskmaker daemonset\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif err := localmetrics.CreateOrUpdateAlertRules(ctx, r.Client, request.Namespace, DiskMakerName, ownerRefs); err != nil {\n\t\tklog.ErrorS(err, \"failed to create alerting rules\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tconfigMapDataHash := dataHash(configMap.Data)\n\n\tdiskMakerDSMutateFn := getDiskMakerDSMutateFn(request, tolerations, ownerRefs, nodeSelector, configMapDataHash)\n\tds, opResult, err := CreateOrUpdateDaemonset(ctx, r.Client, diskMakerDSMutateFn)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t} else if opResult == controllerutil.OperationResultUpdated || opResult == controllerutil.OperationResultCreated {\n\t\tklog.InfoS(\"daemonset changed\", \"dsName\", ds.GetName(), \"opResult\", opResult)\n\t}\n\n\treturn ctrl.Result{}, err\n}", "func (c *VaultController) reconcileVault(vs *api.VaultServer, v Vault) error {\n\tstatus := vs.Status\n\n\terr := c.CreateVaultTLSSecret(vs, v)\n\tif err != nil {\n\t\tstatus.Conditions = []api.VaultServerCondition{\n\t\t\t{\n\t\t\t\tType: api.VaultServerConditionFailure,\n\t\t\t\tStatus: core.ConditionTrue,\n\t\t\t\tReason: \"FailedToCreateVaultTLSSecret\",\n\t\t\t\tMessage: err.Error(),\n\t\t\t},\n\t\t}\n\n\t\terr2 := c.updatedVaultServerStatus(&status, vs)\n\t\tif err2 != nil {\n\t\t\treturn errors.Wrap(err2, \"failed to update status\")\n\t\t}\n\t\treturn errors.Wrap(err, \"failed to create vault server tls secret\")\n\t}\n\n\terr = c.CreateVaultConfig(vs, v)\n\tif err != nil {\n\t\tstatus.Conditions = []api.VaultServerCondition{\n\t\t\t{\n\t\t\t\tType: api.VaultServerConditionFailure,\n\t\t\t\tStatus: core.ConditionTrue,\n\t\t\t\tReason: \"FailedToCreateVaultConfig\",\n\t\t\t\tMessage: err.Error(),\n\t\t\t},\n\t\t}\n\n\t\terr2 := c.updatedVaultServerStatus(&status, vs)\n\t\tif err2 != nil {\n\t\t\treturn errors.Wrap(err2, \"failed to update status\")\n\t\t}\n\t\treturn errors.Wrap(err, \"failed to create vault config\")\n\t}\n\n\terr = c.DeployVault(vs, v)\n\tif err != nil {\n\t\tstatus.Conditions = []api.VaultServerCondition{\n\t\t\t{\n\t\t\t\tType: api.VaultServerConditionFailure,\n\t\t\t\tStatus: core.ConditionTrue,\n\t\t\t\tReason: \"FailedToDeployVault\",\n\t\t\t\tMessage: err.Error(),\n\t\t\t},\n\t\t}\n\n\t\terr2 := c.updatedVaultServerStatus(&status, vs)\n\t\tif err2 != nil {\n\t\t\treturn errors.Wrap(err2, \"failed to update status\")\n\t\t}\n\t\treturn errors.Wrap(err, \"failed to deploy vault\")\n\t}\n\n\tstatus.Conditions = []api.VaultServerCondition{}\n\tstatus.ObservedGeneration = types.NewIntHash(vs.Generation, meta_util.GenerationHash(vs))\n\terr = c.updatedVaultServerStatus(&status, vs)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to update status\")\n\t}\n\n\t// Add vault monitor to watch vault seal or unseal status\n\tkey := vs.GetKey()\n\tif _, ok := c.ctxCancels[key]; !ok {\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tc.ctxCancels[key] = cancel\n\t\tgo c.monitorAndUpdateStatus(ctx, vs)\n\t}\n\treturn nil\n}", "func (r *yandexContainerRegistryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tlog := r.log.WithValues(\"name\", req.NamespacedName)\n\tlog.V(1).Info(\"started reconciliation\")\n\n\t// Try to retrieve object from k8s\n\tvar object connectorsv1.YandexContainerRegistry\n\tif err := r.Get(ctx, req.NamespacedName, &object); err != nil {\n\t\t// It still can be OK if we have not found it, and we do not need to reconcile it again\n\n\t\t// This outcome signifies that we just cannot find object, that is ok\n\t\tif apierrors.IsNotFound(err) {\n\t\t\tlog.V(1).Info(\"object not found in k8s, reconciliation not possible\")\n\t\t\treturn config.GetNeverResult()\n\t\t}\n\n\t\treturn config.GetErroredResult(fmt.Errorf(\"unable to get object from k8s: %w\", err))\n\t}\n\n\t// If object must be currently finalized, do it and quit\n\tif phase.MustBeFinalized(&object.ObjectMeta, ycrconfig.FinalizerName) {\n\t\tif err := r.finalize(ctx, log.WithName(\"finalize\"), &object); err != nil {\n\t\t\treturn config.GetErroredResult(fmt.Errorf(\"unable to finalize object: %w\", err))\n\t\t}\n\t\treturn config.GetNormalResult()\n\t}\n\n\tif err := phase.RegisterFinalizer(\n\t\tctx, r.Client, log, &object.ObjectMeta, &object, ycrconfig.FinalizerName,\n\t); err != nil {\n\t\treturn config.GetErroredResult(fmt.Errorf(\"unable to register finalizer: %w\", err))\n\t}\n\n\tres, err := r.allocateResource(ctx, log.WithName(\"allocate-resource\"), &object)\n\tif err != nil {\n\t\treturn config.GetErroredResult(fmt.Errorf(\"unable to allocate resource: %w\", err))\n\t}\n\n\tif err := r.matchSpec(ctx, log.WithName(\"match-spec\"), &object, res); err != nil {\n\t\treturn config.GetErroredResult(fmt.Errorf(\"unable to match spec: %w\", err))\n\t}\n\n\tif err := r.updateStatus(ctx, log.WithName(\"update-status\"), &object, res); err != nil {\n\t\treturn config.GetErroredResult(fmt.Errorf(\"unable to update status: %w\", err))\n\t}\n\n\tif err := phase.ProvideConfigmap(\n\t\tctx,\n\t\tr.Client,\n\t\tlog.WithName(\"provide-configmap\"),\n\t\tobject.Name, ycrconfig.ShortName, object.Namespace,\n\t\tmap[string]string{\"ID\": object.Status.ID},\n\t); err != nil {\n\t\treturn config.GetErroredResult(fmt.Errorf(\"unable to provide configmap: %w\", err))\n\t}\n\n\tlog.V(1).Info(\"finished reconciliation\")\n\treturn config.GetNormalResult()\n}", "func (s *Service) IntermediateStorage(c context.Context) (storage.Storage, error) {\n\tcfg := s.config.Config()\n\tif cfg.GetStorage() == nil {\n\t\tlog.Errorf(c, \"Missing storage configuration.\")\n\t\treturn nil, ErrInvalidConfig\n\t}\n\n\tbtcfg := cfg.GetStorage().GetBigtable()\n\tif btcfg == nil {\n\t\tlog.Errorf(c, \"Missing BigTable storage configuration\")\n\t\treturn nil, ErrInvalidConfig\n\t}\n\n\t// Initialize Storage authentication.\n\ta, err := s.Authenticator(c, func(o *auth.Options) {\n\t\to.Scopes = bigtable.StorageScopes\n\t\tif s.storageCredentialJSONPath != \"\" {\n\t\t\to.ServiceAccountJSONPath = s.storageCredentialJSONPath\n\t\t}\n\t})\n\tif err != nil {\n\t\tlog.WithError(err).Errorf(c, \"Failed to create BigTable Authenticator.\")\n\t\treturn nil, err\n\t}\n\n\tbt, err := bigtable.New(c, bigtable.Options{\n\t\tProject: btcfg.Project,\n\t\tZone: btcfg.Zone,\n\t\tCluster: btcfg.Cluster,\n\t\tLogTable: btcfg.LogTableName,\n\t\tClientOptions: []cloud.ClientOption{\n\t\t\tcloud.WithTokenSource(a.TokenSource()),\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bt, nil\n}", "func SetSkipStorageSetup(bool) {\n}", "func (r *ReconcileHive) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\tlogrus.Infof(\"Reconciling Hive %s/%s\\n\", request.Namespace, request.Name)\n\n\t// Fetch the Hive instance\n\tinstance := &hivev1alpha1.Hive{}\n\terr := r.client.Get(context.TODO(), request.NamespacedName, instance)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// files is an array of strings, that are to be registered to the client\n\t// using resourceapply.ApplyDirectly\n\tfiles := []string{\n\t\t\"deploy/config/rbac-role.yaml\",\n\t\t\"deploy/config/rbac_role_binding.yaml\",\n\t\t\"deploy/config/manager_service.yaml\",\n\t}\n\trecorder := events.NewRecorder(r.kubeClient.CoreV1().Events(request.Namespace), \"hive-operator\", &corev1.ObjectReference{\n\t\tName: request.Name,\n\t\tNamespace: request.Namespace,\n\t})\n\tresourceapply.ApplyDirectly(r.kubeClient, recorder, assets.Asset, files...)\n\t// managerDeployment is the byte array for manager_deployment.yaml, also we have to call\n\t// resourceapply.ApplyDeployment to register a deployment to the client\n\tmanagerDeployment := resourceread.ReadDeploymentV1OrDie(assets.MustAsset(\"deploy/config/manager_deployment.yaml\"))\n\t// containers is the array of containers that manager-deployment creates.\n\t// It is one container for now but the code handles changing the image for multiple containers\n\tcontainers := managerDeployment.Spec.Template.Spec.Containers\n\tfor containerIndex := 0; containerIndex < len(containers); containerIndex++ {\n\t\tcontainers[containerIndex].Image = instance.Spec.Image\n\t}\n\tresourceapply.ApplyDeployment(r.deploymentClient,\n\t\trecorder,\n\t\tmanagerDeployment,\n\t\t0,\n\t\ttrue)\n\n\treturn reconcile.Result{}, nil\n}", "func storageProvisioner(mirror string) string {\n\tcv := version.GetStorageProvisionerVersion()\n\tin := \"k8s-minikube/storage-provisioner:\" + cv\n\tif mirror == \"\" {\n\t\tmirror = \"gcr.io\"\n\t} else if mirror == constants.AliyunMirror {\n\t\tin = \"storage-provisioner:\" + cv\n\t}\n\treturn path.Join(mirror, in)\n}", "func (r *MachineTester) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\treturn ctrl.Result{}, nil\n}", "func (o *StorageDeleteOptions) Run() (err error) {\n\tvar deleteMsg string\n\n\tvar devFile devfileParser.DevfileObj\n\tmPath := \"\"\n\tif o.isDevfile {\n\t\tdevFile, err = devfile.ParseAndValidate(o.devfilePath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = validate.ValidateDevfileData(devFile.Data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmPath, err = devFile.Data.GetVolumeMountPath(o.storageName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tmPath = o.LocalConfigInfo.GetMountPath(o.storageName)\n\t}\n\n\tdeleteMsg = fmt.Sprintf(\"Are you sure you want to delete the storage %v mounted to %v in %v component\", o.storageName, mPath, o.componentName)\n\n\tif log.IsJSON() || o.storageForceDeleteFlag || ui.Proceed(deleteMsg) {\n\t\tif o.isDevfile {\n\t\t\terr = devFile.Data.DeleteVolume(o.storageName)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = devFile.WriteYamlDevfile()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\terr = o.LocalConfigInfo.StorageDelete(o.storageName)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to delete storage, cause %v\", err)\n\t\t\t}\n\t\t}\n\n\t\tsuccessMessage := fmt.Sprintf(\"Deleted storage %v from %v\", o.storageName, o.componentName)\n\n\t\tif log.IsJSON() {\n\t\t\tstorage.MachineReadableSuccessOutput(o.storageName, successMessage)\n\t\t} else {\n\t\t\tlog.Infof(successMessage)\n\t\t\tlog.Italic(\"\\nPlease use `odo push` command to delete the storage from the cluster\")\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"aborting deletion of storage: %v\", o.storageName)\n\t}\n\n\treturn\n}", "func (r *otherNamespaceReconciler) Reconcile(ctx context.Context, in *v2.CatalogSourceConfig) (out *v2.CatalogSourceConfig, nextPhase *shared.Phase, err error) {\n\t// Do nothing as this object has already been placed in the failed phase.\n\tif in.Status.CurrentPhase.Name == phase.Failed {\n\t\treturn\n\t}\n\n\terr = fmt.Errorf(\"Will only reconcile resources in the operator's namespace\")\n\tr.log.Error(err)\n\tout = in\n\tnextPhase = phase.GetNextWithMessage(phase.Failed, err.Error())\n\treturn\n}", "func getStorageSet(ctx context.Context, log logging.Logger, hostList []string, client UnaryInvoker) (*HostStorageSet, error) {\n\tscanReq := &StorageScanReq{NvmeBasic: true}\n\tscanReq.SetHostList(hostList)\n\n\tscanResp, err := StorageScan(ctx, client, scanReq)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(scanResp.GetHostErrors()) > 0 {\n\t\treturn nil, &ConfigGenerateError{HostErrorsResp: scanResp.HostErrorsResp}\n\t}\n\n\t// verify homogeneous storage\n\tswitch len(scanResp.HostStorage) {\n\tcase 0:\n\t\treturn nil, errors.New(\"no host responses\")\n\tcase 1: // success\n\tdefault: // more than one means non-homogeneous hardware\n\t\tlog.Info(\"Heterogeneous storage hardware configurations detected, \" +\n\t\t\t\"cannot proceed. The following sets of hosts have different \" +\n\t\t\t\"storage hardware:\")\n\t\tfor _, hss := range scanResp.HostStorage {\n\t\t\tlog.Info(hss.HostSet.String())\n\t\t}\n\n\t\treturn nil, errors.New(\"storage hardware not consistent across hosts\")\n\t}\n\n\tstorageSet := scanResp.HostStorage[scanResp.HostStorage.Keys()[0]]\n\n\tlog.Debugf(\"Storage hardware is consistent for hosts %s:\\n\\t%s\\n\\t%s\",\n\t\tstorageSet.HostSet.String(), storageSet.HostStorage.ScmNamespaces.Summary(),\n\t\tstorageSet.HostStorage.NvmeDevices.Summary())\n\n\treturn storageSet, nil\n}", "func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\t_ = r.Log.WithValues(\"database\", req.NamespacedName)\n\n\treconcilePeriod := r.Interval * time.Second\n\treconcileResult := reconcile.Result{RequeueAfter: reconcilePeriod}\n\t// Fetch the Database custom resource\n\tdbcr := &kciv1beta1.Database{}\n\terr := r.Get(ctx, req.NamespacedName, dbcr)\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\t// Requested object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\treturn reconcileResult, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn reconcileResult, err\n\t}\n\n\t// Update object status always when function exit abnormally or through a panic.\n\tdefer func() {\n\t\tif err := r.Status().Update(ctx, dbcr); err != nil {\n\t\t\tlogrus.Errorf(\"failed to update status - %s\", err)\n\t\t}\n\t}()\n\n\tpromDBsStatus.WithLabelValues(dbcr.Namespace, dbcr.Spec.Instance, dbcr.Name).Set(boolToFloat64(dbcr.Status.Status))\n\tpromDBsPhase.WithLabelValues(dbcr.Namespace, dbcr.Spec.Instance, dbcr.Name).Set(dbPhaseToFloat64(dbcr.Status.Phase))\n\n\t// Check if the Database is marked to be deleted, which is\n\t// indicated by the deletion timestamp being set.\n\tisDatabaseMarkedToBeDeleted := dbcr.GetDeletionTimestamp() != nil\n\tif isDatabaseMarkedToBeDeleted {\n\t\tdbcr.Status.Phase = dbPhaseDelete\n\t\t// Run finalization logic for database. If the\n\t\t// finalization logic fails, don't remove the finalizer so\n\t\t// that we can retry during the next reconciliation.\n\t\tif containsString(dbcr.ObjectMeta.Finalizers, \"db.\"+dbcr.Name) {\n\t\t\terr := r.deleteDatabase(ctx, dbcr)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"DB: namespace=%s, name=%s failed deleting database - %s\", dbcr.Namespace, dbcr.Name, err)\n\t\t\t\t// when database deletion failed, don't requeue request. to prevent exceeding api limit (ex: against google api)\n\t\t\t\treturn r.manageError(ctx, dbcr, err, false)\n\t\t\t}\n\t\t\tkci.RemoveFinalizer(&dbcr.ObjectMeta, \"db.\"+dbcr.Name)\n\t\t\terr = r.Update(ctx, dbcr)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"error resource updating - %s\", err)\n\t\t\t\treturn r.manageError(ctx, dbcr, err, true)\n\t\t\t}\n\t\t}\n\t\t// legacy finalizer just remove\n\t\t// we set owner reference for monitoring related resource instead of handling finalizer\n\t\tif containsString(dbcr.ObjectMeta.Finalizers, \"monitoring.\"+dbcr.Name) {\n\t\t\tkci.RemoveFinalizer(&dbcr.ObjectMeta, \"monitoring.\"+dbcr.Name)\n\t\t\terr = r.Update(ctx, dbcr)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"error resource updating - %s\", err)\n\t\t\t\treturn r.manageError(ctx, dbcr, err, true)\n\t\t\t}\n\t\t}\n\t\treturn reconcileResult, nil\n\t}\n\n\tdatabaseSecret, err := r.getDatabaseSecret(ctx, dbcr)\n\tif err != nil && !k8serrors.IsNotFound(err) {\n\t\tlogrus.Errorf(\"could not get database secret - %s\", err)\n\t\treturn r.manageError(ctx, dbcr, err, true)\n\t}\n\n\tif isDBChanged(dbcr, databaseSecret) {\n\t\tlogrus.Infof(\"DB: namespace=%s, name=%s spec changed\", dbcr.Namespace, dbcr.Name)\n\t\terr := r.initialize(ctx, dbcr)\n\t\tif err != nil {\n\t\t\treturn r.manageError(ctx, dbcr, err, true)\n\t\t}\n\t\terr = r.Status().Update(ctx, dbcr)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"error status subresource updating - %s\", err)\n\t\t\treturn r.manageError(ctx, dbcr, err, true)\n\t\t}\n\n\t\taddDBChecksum(dbcr, databaseSecret)\n\t\terr = r.Update(ctx, dbcr)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"error resource updating - %s\", err)\n\t\t\treturn r.manageError(ctx, dbcr, err, true)\n\t\t}\n\t\tlogrus.Infof(\"DB: namespace=%s, name=%s initialized\", dbcr.Namespace, dbcr.Name)\n\t}\n\n\t// database status not true, process phase\n\tif !dbcr.Status.Status {\n\t\townership := []metav1.OwnerReference{}\n\t\tif dbcr.Spec.Cleanup {\n\t\t\townership = append(ownership, metav1.OwnerReference{\n\t\t\t\tAPIVersion: dbcr.APIVersion,\n\t\t\t\tKind: dbcr.Kind,\n\t\t\t\tName: dbcr.Name,\n\t\t\t\tUID: dbcr.GetUID(),\n\t\t\t},\n\t\t\t)\n\t\t}\n\n\t\tphase := dbcr.Status.Phase\n\t\tlogrus.Infof(\"DB: namespace=%s, name=%s start %s\", dbcr.Namespace, dbcr.Name, phase)\n\n\t\tdefer promDBsPhaseTime.WithLabelValues(phase).Observe(kci.TimeTrack(time.Now()))\n\t\terr := r.createDatabase(ctx, dbcr, ownership)\n\t\tif err != nil {\n\t\t\t// when database creation failed, don't requeue request. to prevent exceeding api limit (ex: against google api)\n\t\t\treturn r.manageError(ctx, dbcr, err, false)\n\t\t}\n\n\t\tdbcr.Status.Phase = dbPhaseInstanceAccessSecret\n\n\t\tif err = r.createInstanceAccessSecret(ctx, dbcr, ownership); err != nil {\n\t\t\treturn r.manageError(ctx, dbcr, err, true)\n\t\t}\n\t\tdbcr.Status.Phase = dbPhaseProxy\n\t\terr = r.createProxy(ctx, dbcr, ownership)\n\t\tif err != nil {\n\t\t\treturn r.manageError(ctx, dbcr, err, true)\n\t\t}\n\t\tdbcr.Status.Phase = dbPhaseSecretsTemplating\n\t\tif err = r.createTemplatedSecrets(ctx, dbcr, ownership); err != nil {\n\t\t\treturn r.manageError(ctx, dbcr, err, true)\n\t\t}\n\t\tdbcr.Status.Phase = dbPhaseConfigMap\n\t\tif err = r.createInfoConfigMap(ctx, dbcr, ownership); err != nil {\n\t\t\treturn r.manageError(ctx, dbcr, err, true)\n\t\t}\n\t\tdbcr.Status.Phase = dbPhaseBackupJob\n\t\terr = r.createBackupJob(ctx, dbcr, ownership)\n\t\tif err != nil {\n\t\t\treturn r.manageError(ctx, dbcr, err, true)\n\t\t}\n\t\tdbcr.Status.Phase = dbPhaseFinish\n\t\tdbcr.Status.Status = true\n\t\tdbcr.Status.Phase = dbPhaseReady\n\n\t\terr = r.Status().Update(ctx, dbcr)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"error status subresource updating - %s\", err)\n\t\t\treturn r.manageError(ctx, dbcr, err, true)\n\t\t}\n\t\tlogrus.Infof(\"DB: namespace=%s, name=%s finish %s\", dbcr.Namespace, dbcr.Name, phase)\n\t}\n\n\t// status true do nothing and don't requeue\n\treturn reconcileResult, nil\n}", "func (c *Container) mountStorage() (err error) {\n\t// Container already mounted, nothing to do\n\tif c.state.Mounted {\n\t\treturn nil\n\t}\n\n\t// TODO: generalize this mount code so it will mount every mount in ctr.config.Mounts\n\n\tmounted, err := mount.Mounted(c.config.ShmDir)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"unable to determine if %q is mounted\", c.config.ShmDir)\n\t}\n\n\tif !mounted {\n\t\tshmOptions := \"mode=1777,size=\" + strconv.Itoa(DefaultShmSize)\n\t\tif err := unix.Mount(\"shm\", c.config.ShmDir, \"tmpfs\", unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV,\n\t\t\tlabel.FormatMountLabel(shmOptions, c.config.MountLabel)); err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to mount shm tmpfs %q\", c.config.ShmDir)\n\t\t}\n\t}\n\n\tmountPoint, err := c.runtime.storageService.MountContainerImage(c.ID())\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"error mounting storage for container %s\", c.ID())\n\t}\n\tc.state.Mounted = true\n\tc.state.Mountpoint = mountPoint\n\n\tlogrus.Debugf(\"Created root filesystem for container %s at %s\", c.ID(), c.state.Mountpoint)\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tif err2 := c.cleanupStorage(); err2 != nil {\n\t\t\t\tlogrus.Errorf(\"Error unmounting storage for container %s: %v\", c.ID(), err)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn c.save()\n}", "func StorageInit() {\n\tm := new(v2.MetaV2)\n\t// get the most recent record as the init resource version\n\t_, err := dbm.DBAccess.QueryTable(v2.NewMetaTableName).OrderBy(\"-\" + v2.RV).Limit(1).All(m)\n\tutilruntime.Must(err)\n\tDefaultV2Client.SetRevision(m.ResourceVersion)\n}", "func (c *Container) teardownStorage() error {\n\tif !c.valid {\n\t\treturn errors.Wrapf(ErrCtrRemoved, \"container %s is not valid\", c.ID())\n\t}\n\n\tif c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {\n\t\treturn errors.Wrapf(ErrCtrStateInvalid, \"cannot remove storage for container %s as it is running or paused\", c.ID())\n\t}\n\n\tartifacts := filepath.Join(c.config.StaticDir, artifactsDir)\n\tif err := os.RemoveAll(artifacts); err != nil {\n\t\treturn errors.Wrapf(err, \"error removing artifacts %q\", artifacts)\n\t}\n\n\tif err := c.cleanupStorage(); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to cleanup container %s storage\", c.ID())\n\t}\n\n\tif err := c.runtime.storageService.DeleteContainer(c.ID()); err != nil {\n\t\treturn errors.Wrapf(err, \"error removing container %s root filesystem\", c.ID())\n\t}\n\n\treturn nil\n}", "func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) {\n\tlog := r.log.WithValues(strings.ToLower(r.gvk.Kind), req.NamespacedName)\n\n\tobj := &unstructured.Unstructured{}\n\tobj.SetGroupVersionKind(*r.gvk)\n\terr = r.client.Get(ctx, req.NamespacedName, obj)\n\tif apierrors.IsNotFound(err) {\n\t\treturn ctrl.Result{}, nil\n\t}\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tu := updater.New(r.client)\n\tdefer func() {\n\t\tapplyErr := u.Apply(ctx, obj)\n\t\tif err == nil && !apierrors.IsNotFound(applyErr) {\n\t\t\terr = applyErr\n\t\t}\n\t}()\n\n\tactionClient, err := r.actionClientGetter.ActionClientFor(obj)\n\tif err != nil {\n\t\tu.UpdateStatus(\n\t\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonErrorGettingClient, err)),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeDeployed),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeInitialized),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeReleaseFailed),\n\t\t\tupdater.EnsureDeployedRelease(nil),\n\t\t)\n\t\t// NOTE: If obj has the uninstall finalizer, that means a release WAS deployed at some point\n\t\t// in the past, but we don't know if it still is because we don't have an actionClient to check.\n\t\t// So the question is, what do we do with the finalizer? We could:\n\t\t// - Leave it in place. This would make the CR impossible to delete without either resolving this error, or\n\t\t// manually uninstalling the release, deleting the finalizer, and deleting the CR.\n\t\t// - Remove the finalizer. This would make it possible to delete the CR, but it would leave around any\n\t\t// release resources that are not owned by the CR (those in the cluster scope or in other namespaces).\n\t\t//\n\t\t// The decision made for now is to leave the finalizer in place, so that the user can intervene and try to\n\t\t// resolve the issue, instead of the operator silently leaving some dangling resources hanging around after the\n\t\t// CR is deleted.\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// As soon as we get the actionClient, lookup the release and\n\t// update the status with this info. We need to do this as\n\t// early as possible in case other irreconcilable errors occur.\n\t//\n\t// We also make sure not to return any errors we encounter so\n\t// we can still attempt an uninstall if the CR is being deleted.\n\trel, err := actionClient.Get(obj.GetName())\n\tif errors.Is(err, driver.ErrReleaseNotFound) {\n\t\tu.UpdateStatus(updater.EnsureCondition(conditions.Deployed(corev1.ConditionFalse, \"\", \"\")))\n\t} else if err == nil {\n\t\tr.ensureDeployedRelease(&u, rel)\n\t}\n\tu.UpdateStatus(updater.EnsureCondition(conditions.Initialized(corev1.ConditionTrue, \"\", \"\")))\n\n\tfor _, ext := range r.preExtensions {\n\t\tif err := ext(ctx, obj, u.UpdateStatusCustom, r.log); err != nil {\n\t\t\tu.UpdateStatus(\n\t\t\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonReconcileError, err)),\n\t\t\t\tupdater.EnsureConditionUnknown(conditions.TypeReleaseFailed),\n\t\t\t)\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\tif obj.GetDeletionTimestamp() != nil {\n\t\terr := r.handleDeletion(ctx, actionClient, obj, log)\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tvals, err := r.getValues(ctx, obj)\n\tif err != nil {\n\t\tu.UpdateStatus(\n\t\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonErrorGettingValues, err)),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeReleaseFailed),\n\t\t)\n\t\treturn ctrl.Result{}, err\n\t}\n\n\trel, state, err := r.getReleaseState(actionClient, obj, vals.AsMap())\n\tif err != nil {\n\t\tu.UpdateStatus(\n\t\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonErrorGettingReleaseState, err)),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeReleaseFailed),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeDeployed),\n\t\t\tupdater.EnsureDeployedRelease(nil),\n\t\t)\n\t\treturn ctrl.Result{}, err\n\t}\n\tif state == statePending {\n\t\treturn r.handlePending(actionClient, rel, &u, log)\n\t}\n\n\tu.UpdateStatus(updater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionFalse, \"\", \"\")))\n\n\tfor _, h := range r.preHooks {\n\t\tif err := h.Exec(obj, vals, log); err != nil {\n\t\t\tlog.Error(err, \"pre-release hook failed\")\n\t\t}\n\t}\n\n\tswitch state {\n\tcase stateNeedsInstall:\n\t\trel, err = r.doInstall(actionClient, &u, obj, vals.AsMap(), log)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\tcase stateNeedsUpgrade:\n\t\trel, err = r.doUpgrade(actionClient, &u, obj, vals.AsMap(), log)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\tcase stateUnchanged:\n\t\tif err := r.doReconcile(actionClient, &u, rel, log); err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\tdefault:\n\t\treturn ctrl.Result{}, fmt.Errorf(\"unexpected release state: %s\", state)\n\t}\n\n\tfor _, h := range r.postHooks {\n\t\tif err := h.Exec(obj, *rel, log); err != nil {\n\t\t\tlog.Error(err, \"post-release hook failed\", \"name\", rel.Name, \"version\", rel.Version)\n\t\t}\n\t}\n\n\tfor _, ext := range r.postExtensions {\n\t\tif err := ext(ctx, obj, u.UpdateStatusCustom, r.log); err != nil {\n\t\t\tu.UpdateStatus(\n\t\t\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonReconcileError, err)),\n\t\t\t\tupdater.EnsureConditionUnknown(conditions.TypeReleaseFailed),\n\t\t\t)\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\tr.ensureDeployedRelease(&u, rel)\n\tu.UpdateStatus(\n\t\tupdater.EnsureCondition(conditions.ReleaseFailed(corev1.ConditionFalse, \"\", \"\")),\n\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionFalse, \"\", \"\")),\n\t)\n\n\treturn ctrl.Result{RequeueAfter: r.reconcilePeriod}, nil\n}", "func (r *ReconcileEgressGateway) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {\n\treqLogger := log.WithValues(\"Request.Namespace\", request.Namespace, \"Request.Name\", request.Name)\n\treqLogger.Info(\"Reconciling EgressGateway\")\n\n\t// Get all the Egress Gateway resources available.\n\tegws, err := getEgressGateways(ctx, r.client)\n\tif err != nil {\n\t\treqLogger.Error(err, \"Error querying for Egress Gateway\")\n\t\tr.status.SetDegraded(operatorv1.ResourceReadError, \"Error querying for Egress Gateway\", err, reqLogger)\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// If there are no Egress Gateway resources, return.\n\tch := utils.NewComponentHandler(log, r.client, r.scheme, nil)\n\tif len(egws) == 0 {\n\t\tvar objects []client.Object\n\t\tif r.provider == operatorv1.ProviderOpenShift {\n\t\t\tobjects = append(objects, egressgateway.SecurityContextConstraints())\n\t\t}\n\t\tif r.usePSP {\n\t\t\tobjects = append(objects, egressgateway.PodSecurityPolicy())\n\t\t}\n\t\terr := ch.CreateOrUpdateOrDelete(ctx, render.NewDeletionPassthrough(objects...), r.status)\n\t\tif err != nil {\n\t\t\treqLogger.Error(err, \"error deleting cluster scoped resources\")\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\tr.status.OnCRNotFound()\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\t/* Reconcile is done as follows.\n\t1. At the start, assume all the EGW resources need to be reconciled.\n\t2. If the request is to a particular EGW resource, find the requested EGW from the list of EGWs.\n\t3. If the requested EGW resource is not present, it could have been deleted.\n\t 'egws' is now the list of all EGW resources present. Get the cumulative status and update\n\t TigeraStatus object.\n\t4. If the requested EGW resource is present, then it is the only resource to reconcile. Remove this\n\t EGW resource from the list and get the status of all the other EGW resources present and update\n\t TigeraStatus accordingly.\n\t5. If the request is not to a particular EGW resource, reconcile all the resources.\n\t*/\n\n\t// egwsToReconcile is the list of Egress Gateway resources that needs to be reconciled.\n\t// To start with all EGW resources must be reconciled.\n\tegwsToReconcile := egws\n\tnamespaceAndNames := getEGWNamespaceAndNames(egws)\n\tif request.Namespace != \"\" {\n\t\trequestedEGW, idx := getRequestedEgressGateway(egws, request)\n\t\tif requestedEGW == nil {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\treqLogger.Info(\"EgressGateway object not found\")\n\t\t\t// Since the EGW resource is not found, remove the deployment.\n\t\t\tr.status.RemoveDeployments(types.NamespacedName{Name: request.Name, Namespace: request.Namespace})\n\t\t\t// In the case of OpenShift, we are using a single SCC.\n\t\t\t// Whenever a EGW resource is deleted, remove the corresponding user from the SCC\n\t\t\t// and update the resource.\n\t\t\tif r.provider == operatorv1.ProviderOpenShift {\n\t\t\t\tscc, err := getOpenShiftSCC(ctx, r.client)\n\t\t\t\tif err != nil {\n\t\t\t\t\treqLogger.Error(err, \"Error querying SecurityContextConstraints\")\n\t\t\t\t\treturn reconcile.Result{}, err\n\t\t\t\t}\n\t\t\t\tuserString := fmt.Sprintf(\"system:serviceaccount:%s:%s\", request.Namespace, request.Name)\n\t\t\t\tfor index, user := range scc.Users {\n\t\t\t\t\tif user == userString {\n\t\t\t\t\t\tscc.Users = append(scc.Users[:index], scc.Users[index+1:]...)\n\t\t\t\t\t\terr := ch.CreateOrUpdateOrDelete(ctx, render.NewPassthrough(scc), r.status)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treqLogger.Error(err, \"error updating security context constraints\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\t\t\t// Get the unready EGW. Let's say we have 2 EGW resources red and blue.\n\t\t\t// Red has already degraded. When the user deletes Red, TigeraStatus should go back to available\n\t\t\t// as Blue is healthy. If all the EGWs are ready, clear the degraded TigeraStatus.\n\t\t\t// If at least one of the EGWs is unhealthy, get the degraded msg from the conditions and\n\t\t\t// update the TigeraStatus.\n\t\t\tunreadyEGW := getUnreadyEgressGateway(egws)\n\t\t\tif unreadyEGW != nil {\n\t\t\t\tr.status.SetDegraded(operatorv1.ResourceNotReady,\n\t\t\t\t\tfmt.Sprintf(\"Error reconciling Egress Gateway resource. Name=%s Namespace=%s\", unreadyEGW.Name, unreadyEGW.Namespace),\n\t\t\t\t\tfmt.Errorf(\"%s\", getDegradedMsg(unreadyEGW)), reqLogger)\n\t\t\t\treturn reconcile.Result{}, nil\n\t\t\t}\n\t\t\tr.status.ClearDegraded()\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// If the EGW resource is present, reconcile only that resource.\n\t\t// Remove this from the list of EGWs before computing status.\n\t\tegwsToReconcile = []operatorv1.EgressGateway{*requestedEGW}\n\t\tegws = append(egws[:idx], egws[idx+1:]...)\n\t}\n\tr.status.OnCRFound()\n\n\t// Get the unready EGW.\n\tunreadyEGW := getUnreadyEgressGateway(egws)\n\n\tif !r.licenseAPIReady.IsReady() {\n\t\tr.status.SetDegraded(operatorv1.ResourceNotReady, \"Waiting for LicenseKeyAPI to be ready\", nil, reqLogger)\n\t\treturn reconcile.Result{RequeueAfter: 10 * time.Second}, nil\n\t}\n\n\tvariant, installation, err := utils.GetInstallation(ctx, r.client)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treqLogger.Error(err, \"Installation not found\")\n\t\t\tr.status.SetDegraded(operatorv1.ResourceNotFound, \"Installation not found\", err, reqLogger)\n\t\t\t// Set the EGW resource's condition to Degraded.\n\t\t\tfor _, egw := range egwsToReconcile {\n\t\t\t\tsetDegraded(r.client, ctx, &egw, reconcileErr, fmt.Sprintf(\"Installation not found err = %s\", err.Error()))\n\t\t\t}\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\treqLogger.Error(err, \"Error querying installation\")\n\t\tr.status.SetDegraded(operatorv1.ResourceReadError, \"Error querying installation\", err, reqLogger)\n\t\tfor _, egw := range egwsToReconcile {\n\t\t\tsetDegraded(r.client, ctx, &egw, reconcileErr, fmt.Sprintf(\"Error querying installation err = %s\", err.Error()))\n\t\t}\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tif variant != operatorv1.TigeraSecureEnterprise {\n\t\tdegradedMsg := fmt.Sprintf(\"Waiting for network to be %s\", operatorv1.TigeraSecureEnterprise)\n\t\treqLogger.Error(err, degradedMsg)\n\t\tr.status.SetDegraded(operatorv1.ResourceNotReady, degradedMsg, nil, reqLogger)\n\t\tfor _, egw := range egwsToReconcile {\n\t\t\tsetDegraded(r.client, ctx, &egw, reconcileErr, degradedMsg)\n\t\t}\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\tinstallStatus, err := utils.GetInstallationStatus(ctx, r.client)\n\tif err != nil {\n\t\treqLogger.Error(err, \"Error querying installation status\")\n\t\tr.status.SetDegraded(operatorv1.ResourceReadError, \"Error querying installation status\", err, reqLogger)\n\t\tfor _, egw := range egwsToReconcile {\n\t\t\tsetDegraded(r.client, ctx, &egw, reconcileErr, fmt.Sprintf(\"Error querying installation status err = %s\", err.Error()))\n\t\t}\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tif installStatus.CalicoVersion != components.EnterpriseRelease {\n\t\treqLogger.WithValues(\"version\", components.EnterpriseRelease).Info(\"Waiting for expected version of Calico to be installed\")\n\t\treturn reconcile.Result{RequeueAfter: 30 * time.Second}, nil\n\t}\n\n\tpullSecrets, err := utils.GetNetworkingPullSecrets(installation, r.client)\n\tif err != nil {\n\t\treqLogger.Error(err, \"Error retrieving pull secrets\")\n\t\tr.status.SetDegraded(operatorv1.ResourceReadError, \"Error retrieving pull secrets\", err, reqLogger)\n\t\tfor _, egw := range egwsToReconcile {\n\t\t\tsetDegraded(r.client, ctx, &egw, reconcileErr, fmt.Sprintf(\"Error retrieving pull secrets err = %s\", err.Error()))\n\t\t}\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// patch and get the felix configuration\n\tfc, err := utils.PatchFelixConfiguration(ctx, r.client, func(fc *crdv1.FelixConfiguration) bool {\n\t\tif fc.Spec.PolicySyncPathPrefix != \"\" {\n\t\t\treturn false // don't proceed with the patch\n\t\t}\n\t\tfc.Spec.PolicySyncPathPrefix = \"/var/run/nodeagent\"\n\t\treturn true // proceed with this patch\n\t})\n\tif err != nil {\n\t\treqLogger.Error(err, \"Error patching felix configuration\")\n\t\tr.status.SetDegraded(operatorv1.ResourcePatchError, \"Error patching felix configuration\", err, reqLogger)\n\t\tfor _, egw := range egwsToReconcile {\n\t\t\tsetDegraded(r.client, ctx, &egw, reconcileErr, fmt.Sprintf(\"Error patching felix configuration err = %s\", err.Error()))\n\t\t}\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Reconcile all the EGWs\n\tvar errMsgs []string\n\tfor _, egw := range egwsToReconcile {\n\t\terr = r.reconcileEgressGateway(ctx, &egw, reqLogger, variant, fc, pullSecrets, installation, namespaceAndNames)\n\t\tif err != nil {\n\t\t\treqLogger.Error(err, \"Error reconciling egress gateway\")\n\t\t\terrMsgs = append(errMsgs, err.Error())\n\t\t}\n\t}\n\tif len(errMsgs) != 0 {\n\t\treturn reconcile.Result{}, fmt.Errorf(strings.Join(errMsgs, \";\"))\n\t}\n\n\tif unreadyEGW != nil {\n\t\tr.status.SetDegraded(operatorv1.ResourceCreateError,\n\t\t\tfmt.Sprintf(\"Error reconciling Egress Gateway resource. Name=%s Namespace=%s\", unreadyEGW.Name, unreadyEGW.Namespace),\n\t\t\tfmt.Errorf(\"%s\", getDegradedMsg(unreadyEGW)), reqLogger)\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\tr.status.ClearDegraded()\n\tif !r.status.IsAvailable() {\n\t\t// Schedule a kick to check again in the near future, hopefully by then things will be available.\n\t\treturn reconcile.Result{RequeueAfter: 30 * time.Second}, nil\n\t}\n\treturn reconcile.Result{}, nil\n}", "func (r *ReconcileZdyfapi) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\treqLogger := log.WithValues(\"Request.Namespace\", request.Namespace, \"Request.Name\", request.Name)\n\treqLogger.Info(\"Reconciling Zdyfapi\")\n\n\n\n\t// Fetch the Zdyfapi instance for hdfs\n\t//next := 0\n\tinstance := &zdyfv1alpha1.Zdyfapi{}\n\terr := r.client.Get(context.TODO(), request.NamespacedName, instance)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\treqLogger.Info(\"instance resource not found.Ignoring since object nust be deleted.\")\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treqLogger.Error(err, \"Failed to get instance.\")\n\t\treturn reconcile.Result{}, err\n\t}\n\t//如果不为空,就会阻止删除\n\tif instance.DeletionTimestamp != nil {\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\treqLogger.Info(\"instance is\", *instance, *instance.Spec.NameReplicas)\n\n\t//HDFS sc\n\tfoundsc := v1.StorageClass{}\n\terr = r.client.Get(context.TODO(), types.NamespacedName{Namespace: instance.Namespace, Name: instance.Name}, &foundsc)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tnamesc := r.NewSc(instance)\n\t\treqLogger.Info(\"Creating a new sc \", \"StorageClass.Namespace\", namesc.Namespace, \"StorageClass.Name\", namesc.Name)\n\t\terr = r.client.Create(context.TODO(), namesc)\n\t\tif err != nil {\n\t\t\treqLogger.Error(err, \"Failed to create new sc\", \"StorageClass.Namespace\", namesc.Namespace, \"StorageClass.Name\", namesc.Name)\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\treqLogger.Info(\"Creating a new sc successfully\")\n\n\t\t// 关联 Annotations - spec\n\t\tdata, _ := json.Marshal(instance.Spec)\n\t\tif instance.Annotations != nil {\n\t\t\tinstance.Annotations[\"spec\"] = string(data)\n\t\t} else {\n\t\t\tinstance.Annotations = map[string]string{\"spec\": string(data)}\n\t\t}\n\n\t\tif err := r.client.Update(context.TODO(), instance); err != nil {\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t//return reconcile.Result{Requeue: true}, nil\n\t} else if err != nil {\n\t\treqLogger.Error(err, \"Failed to get pv .\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\treqLogger.Info(\"next is name pv!!!!\")\n\n\t//namenode pv\n\t//create pv pvc before the deployment and service\n\tfoundpv := &corev1.PersistentVolume{}\n\terr = r.client.Get(context.TODO(), types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}, foundpv)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tnamepv := r.NameNewPv(instance)\n\t\treqLogger.Info(\"Creating a new Pv.\", \"PersistentVolume.Namespace\", namepv.Namespace, \"PersistentVolume.Name\", namepv.Name)\n\t\terr = r.client.Create(context.TODO(), namepv) //saves the object obj in the Kubernetes cluster\n\t\tif err != nil {\n\t\t\treqLogger.Error(err, \"Failed to create new pv .\", \"PersistentVolume.Namespace\", namepv.Namespace, \"PersistentVolume.Name\", namepv.Name)\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\t// 关联 Annotations - spec\n\t\tdata, _ := json.Marshal(instance.Spec)\n\t\tif instance.Annotations != nil {\n\t\t\tinstance.Annotations[\"spec\"] = string(data)\n\t\t} else {\n\t\t\tinstance.Annotations = map[string]string{\"spec\": string(data)}\n\t\t}\n\n\t\tif err := r.client.Update(context.TODO(), instance); err != nil {\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// return reconcile.Result{Requeue: true}, nil\n\t} else if err != nil {\n\t\treqLogger.Error(err, \"Failed to get name pv .\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t//namenode pvc\n\tfoundpvc := &corev1.PersistentVolumeClaim{}\n\terr = r.client.Get(context.TODO(), types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}, foundpvc)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tnamepvc := r.NameNewPvc(instance)\n\t\treqLogger.Info(\"Creating a new pvc.\", \"PersistentVolumeClaim.Namespace\", namepvc.Namespace, \"PersistentVolumeClaim.Name\", namepvc.Name)\n\t\terr = r.client.Create(context.TODO(), namepvc) //saves the object obj in the Kubernetes cluster\n\t\tif err != nil {\n\t\t\treqLogger.Info(\"the err is \", err)\n\t\t\treqLogger.Error(err, \"Failed to create new pvc.\", \"PersistentVolumeClaim.Namespace\", namepvc.Namespace, \"PersistentVolumeClaim.Name\", namepvc.Name)\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\n\t\t// 关联 Annotations - spec\n\t\tdata, _ := json.Marshal(instance.Spec)\n\t\tif instance.Annotations != nil {\n\t\t\tinstance.Annotations[\"spec\"] = string(data)\n\t\t} else {\n\t\t\tinstance.Annotations = map[string]string{\"spec\": string(data)}\n\t\t}\n\n\t\tif err := r.client.Update(context.TODO(), instance); err != nil {\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\n\t} else if err != nil {\n\t\treqLogger.Error(err, \"Failed to get pvc .\")\n\t\treturn reconcile.Result{}, err\n\t}\n\t///\n\t//datanode pv\n\tdfoundpv := &corev1.PersistentVolume{}\n\terr = r.client.Get(context.TODO(), types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}, dfoundpv)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tdatapv := r.DataNewPv(instance)\n\t\treqLogger.Info(\"Creating a new data Pv.\", \"PersistentVolume.Namespace\", datapv.Namespace, \"PersistentVolume.Name\", datapv.Name)\n\t\terr = r.client.Create(context.TODO(), datapv) //saves the object obj in the Kubernetes cluster\n\t\tif err != nil {\n\t\t\treqLogger.Error(err, \"Failed to create new data pv .\", \"PersistentVolume.Namespace\", datapv.Namespace, \"PersistentVolume.Name\", datapv.Name)\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\t// 关联 Annotations - spec\n\t\tdata, _ := json.Marshal(instance.Spec)\n\t\tif instance.Annotations != nil {\n\t\t\tinstance.Annotations[\"spec\"] = string(data)\n\t\t} else {\n\t\t\tinstance.Annotations = map[string]string{\"spec\": string(data)}\n\t\t}\n\n\t\tif err := r.client.Update(context.TODO(), instance); err != nil {\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// return reconcile.Result{Requeue: true}, nil\n\t} else if err != nil {\n\t\treqLogger.Error(err, \"Failed to get data pv .\")\n\t\treturn reconcile.Result{}, err\n\t}\n\t///////\n\t//datanode the second pv\n\tdfoundpvse := &corev1.PersistentVolume{}\n\terr = r.client.Get(context.TODO(), types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}, dfoundpvse)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tdatapvs := r.DataNewPvse(instance)\n\t\treqLogger.Info(\"Creating next new data Pv two.\", \"PersistentVolume.Namespace\", datapvs.Namespace, \"PersistentVolume.Name\", datapvs.Name)\n\t\terr = r.client.Create(context.TODO(), datapvs) //saves the object obj in the Kubernetes cluster\n\t\tif err != nil {\n\t\t\treqLogger.Error(err, \"Failed to create new data pv .\", \"PersistentVolume.Namespace\", datapvs.Namespace, \"PersistentVolume.Name\", datapvs.Name)\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\t// 关联 Annotations - spec\n\t\tdata, _ := json.Marshal(instance.Spec)\n\t\tif instance.Annotations != nil {\n\t\t\tinstance.Annotations[\"spec\"] = string(data)\n\t\t} else {\n\t\t\tinstance.Annotations = map[string]string{\"spec\": string(data)}\n\t\t}\n\n\t\tif err := r.client.Update(context.TODO(), instance); err != nil {\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// return reconcile.Result{Requeue: true}, nil\n\t} else if err != nil {\n\t\treqLogger.Error(err, \"Failed to get data pv .\")\n\t\treturn reconcile.Result{}, err\n\t}\n\t///////\n\t//datanode the 3 pv\n\tdfoundpvth := &corev1.PersistentVolume{}\n\terr = r.client.Get(context.TODO(), types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}, dfoundpvth)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tdatapvs := r.DataNewPvth(instance)\n\t\treqLogger.Info(\"Creating next new data Pv two.\", \"PersistentVolume.Namespace\", datapvs.Namespace, \"PersistentVolume.Name\", datapvs.Name)\n\t\terr = r.client.Create(context.TODO(), datapvs) //saves the object obj in the Kubernetes cluster\n\t\tif err != nil {\n\t\t\treqLogger.Error(err, \"Failed to create new data pv .\", \"PersistentVolume.Namespace\", datapvs.Namespace, \"PersistentVolume.Name\", datapvs.Name)\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\t// 关联 Annotations - spec\n\t\tdata, _ := json.Marshal(instance.Spec)\n\t\tif instance.Annotations != nil {\n\t\t\tinstance.Annotations[\"spec\"] = string(data)\n\t\t} else {\n\t\t\tinstance.Annotations = map[string]string{\"spec\": string(data)}\n\t\t}\n\n\t\tif err := r.client.Update(context.TODO(), instance); err != nil {\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// return reconcile.Result{Requeue: true}, nil\n\t} else if err != nil {\n\t\treqLogger.Error(err, \"Failed to get data pv .\")\n\t\treturn reconcile.Result{}, err\n\t}\n\t//////\n\t//datanode the 4 pv\n\tdfoundpvfo := &corev1.PersistentVolume{}\n\terr = r.client.Get(context.TODO(), types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}, dfoundpvfo)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tdatapvs := r.DataNewPvfo(instance)\n\t\treqLogger.Info(\"Creating next new data Pv two.\", \"PersistentVolume.Namespace\", datapvs.Namespace, \"PersistentVolume.Name\", datapvs.Name)\n\t\terr = r.client.Create(context.TODO(), datapvs) //saves the object obj in the Kubernetes cluster\n\t\tif err != nil {\n\t\t\treqLogger.Error(err, \"Failed to create new data pv .\", \"PersistentVolume.Namespace\", datapvs.Namespace, \"PersistentVolume.Name\", datapvs.Name)\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\t// 关联 Annotations - spec\n\t\tdata, _ := json.Marshal(instance.Spec)\n\t\tif instance.Annotations != nil {\n\t\t\tinstance.Annotations[\"spec\"] = string(data)\n\t\t} else {\n\t\t\tinstance.Annotations = map[string]string{\"spec\": string(data)}\n\t\t}\n\n\t\tif err := r.client.Update(context.TODO(), instance); err != nil {\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// return reconcile.Result{Requeue: true}, nil\n\t} else if err != nil {\n\t\treqLogger.Error(err, \"Failed to get data pv .\")\n\t\treturn reconcile.Result{}, err\n\t}\n\t///////\n\t//datanode the 5 pv\n\tdfoundpvfi := &corev1.PersistentVolume{}\n\terr = r.client.Get(context.TODO(), types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}, dfoundpvfi)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tdatapvs := r.DataNewPvfi(instance)\n\t\treqLogger.Info(\"Creating next new data Pv five.\", \"PersistentVolume.Namespace\", datapvs.Namespace, \"PersistentVolume.Name\", datapvs.Name)\n\t\terr = r.client.Create(context.TODO(), datapvs) //saves the object obj in the Kubernetes cluster\n\t\tif err != nil {\n\t\t\treqLogger.Error(err, \"Failed to create new data pv .\", \"PersistentVolume.Namespace\", datapvs.Namespace, \"PersistentVolume.Name\", datapvs.Name)\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\t// 关联 Annotations - spec\n\t\tdata, _ := json.Marshal(instance.Spec)\n\t\tif instance.Annotations != nil {\n\t\t\tinstance.Annotations[\"spec\"] = string(data)\n\t\t} else {\n\t\t\tinstance.Annotations = map[string]string{\"spec\": string(data)}\n\t\t}\n\n\t\tif err := r.client.Update(context.TODO(), instance); err != nil {\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// return reconcile.Result{Requeue: true}, nil\n\t} else if err != nil {\n\t\treqLogger.Error(err, \"Failed to get data pv .\")\n\t\treturn reconcile.Result{}, err\n\t}\n\t////\n\n\t//Check if the deployment and service already exists, if not create a new one\n\tfounddep := &appsv1.Deployment{}\n\terr = r.client.Get(context.TODO(), types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}, founddep) //request.NamespacedName,\n\tif err != nil && errors.IsNotFound(err) {\n\t\t//define a new deployment namenodedep (instance)\n\t\tnamenodedep := r.NewDeploy(instance)\n\t\treqLogger.Info(\"Creating a new Deployment namenodedep.\", \"Deployment.Namespace\", namenodedep.Namespace, \"Deployment.Name\", namenodedep.Name)\n\t\terr = r.client.Create(context.TODO(), namenodedep) //saves the object obj in the Kubernetes cluster\n\t\tif err != nil {\n\t\t\treqLogger.Error(err, \"Failed to create new Deployment namenodedep.\", \"Deployment.Namespace\", namenodedep.Namespace, \"Deployment.Name\", namenodedep.Name)\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\t//deployment namenodedep created successfully - return and requeue\n\t\treqLogger.Info(\"After create namenodedep successfully, create the next nginx version\")\n\n\t\t//define a new service namenodeser\n\t\tnamenodeser := r.NameNewService(instance)\n\t\treqLogger.Info(\"Creating a new service namenodeser.\", \"Service.Namespace\", namenodedep.Namespace, \"Service.Name\", namenodedep.Name)\n\t\terr = r.client.Create(context.TODO(), namenodeser) //saves the object obj in the Kubernetes cluster\n\t\tif err != nil {\n\t\t\treqLogger.Error(err, \"Failed to create new Service namenodeser.\", \"Service.Namespace\", namenodedep.Namespace, \"Service.Name\", namenodedep.Name)\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\t//service namenodeser created successfully - return and requeue\n\t\treqLogger.Info(\"After create namenodeser successfully\", \"Test\", instance.Spec.NameEnvsName2)\n\n\t\t// 关联 Annotations - spec\n\t\tdata, _ := json.Marshal(instance.Spec)\n\t\tif instance.Annotations != nil {\n\t\t\tinstance.Annotations[\"spec\"] = string(data)\n\t\t} else {\n\t\t\tinstance.Annotations = map[string]string{\"spec\": string(data)}\n\t\t}\n\n\t\tif err := r.client.Update(context.TODO(), instance); err != nil {\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t//return reconcile.Result{Requeue: true}, nil\n\n\t} else if err != nil {\n\t\treqLogger.Error(err, \"Failed to get namenode Deployment and service .\")\n\t\treturn reconcile.Result{}, err\n\t}\n\treqLogger.Info(\"namenode is ok!!!!!!\")\n\t/////////\n\n\t//datanode 先service 后statefulset\n\tfoundser := corev1.Service{}\n\n\terr = r.client.Get(context.TODO(), types.NamespacedName{Namespace: instance.Namespace, Name: instance.Name}, &foundser)\n\tif err != nil && errors.IsNotFound(err) {\n\t\t//define a new service datanodeser\n\t\tdatanodeser := r.DataNewService(instance)\n\t\treqLogger.Info(\"Creating a new service datanodeser.\", \"Service.Namespace\", datanodeser.Namespace, \"Service.Name\", datanodeser.Name)\n\t\terr = r.client.Create(context.TODO(), datanodeser) //saves the object obj in the Kubernetes cluster\n\t\tif err != nil {\n\t\t\treqLogger.Error(err, \"Failed to create new Service datanodeser.\", \"Service.Namespace\", datanodeser.Namespace, \"Service.Name\", datanodeser.Name)\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\t//service namenodeser created successfully - return and requeue\n\t\treqLogger.Info(\"After create datanodeser successfully\")\n\n\t\t// 关联 Annotations - spec\n\t\tdata, _ := json.Marshal(instance.Spec)\n\t\tif instance.Annotations != nil {\n\t\t\tinstance.Annotations[\"spec\"] = string(data)\n\t\t} else {\n\t\t\tinstance.Annotations = map[string]string{\"spec\": string(data)}\n\t\t}\n\n\t\tif err := r.client.Update(context.TODO(), instance); err != nil {\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t//return reconcile.Result{Requeue: true}, nil\n\t} else if err != nil {\n\t\treqLogger.Error(err, \"Failed to get Statefulset datanode.\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t//datanode statefulset\n\tfoundset := &appsv1.StatefulSet{}\n\terr = r.client.Get(context.TODO(), types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}, foundset)\n\tif err != nil && errors.IsNotFound(err) {\n\n\t\tdatanodeset := r.NewState(instance)\n\t\treqLogger.Info(\"Creating a new StatefulSet datanodeset.\", \"StatefulSet.Namespace\", datanodeset.Namespace, \"StatefulSet.Name\", datanodeset.Name)\n\t\terr = r.client.Create(context.TODO(), datanodeset) //saves the object obj in the Kubernetes cluster\n\t\tif err != nil {\n\t\t\treqLogger.Error(err, \"Failed to create new StatefulSet datanodeset.\", \"StatefulSet.Namespace\", datanodeset.Namespace, \"StatefulSet.Name\", datanodeset.Name)\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\treqLogger.Info(\"After create datanodeset successfull\")\n\t\t// 关联 Annotations - spec\n\t\tdata, _ := json.Marshal(instance.Spec)\n\t\tif instance.Annotations != nil {\n\t\t\tinstance.Annotations[\"spec\"] = string(data)\n\t\t} else {\n\t\t\tinstance.Annotations = map[string]string{\"spec\": string(data)}\n\t\t}\n\n\t\tif err := r.client.Update(context.TODO(), instance); err != nil {\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t//return reconcile.Result{Requeue: true}, nil\n\t} else if err != nil {\n\t\treqLogger.Error(err, \"Failed to get Statefulset datanode.\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t//////////////////////////datanode////////////////////////////////\n\n\toldSpec := &zdyfv1alpha1.Zdyfapi{}\n\tif err := json.Unmarshal([]byte(instance.Annotations[\"spec\"]), oldSpec); err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\t// update deployment and service 's status\n\tif !reflect.DeepEqual(instance.Spec, oldSpec) {\n\t\t// 更新关联资源\n\t\t//namenode\n\t\tnamenewDeploy := r.NewDeploy(instance)\n\t\tnameoldDeploy := &appsv1.Deployment{}\n\t\tnameoldDeploy.Spec = namenewDeploy.Spec\n\t\tif err := r.client.Get(context.TODO(), request.NamespacedName, nameoldDeploy); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\tif err := r.client.Update(context.TODO(), nameoldDeploy); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\n\t\t//update the service for namenode\n\t\tnamenewSer := r.NameNewService(instance)\n\t\tnameoldSer := &corev1.Service{}\n\t\tnameoldSer.Spec = namenewSer.Spec\n\t\tif err := r.client.Get(context.TODO(), request.NamespacedName, nameoldSer); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\tif err := r.client.Update(context.TODO(), nameoldSer); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\t// update the pv for namenode\n\t\tnamenewpv := r.NameNewPv(instance)\n\t\tnameoldpv := &corev1.PersistentVolume{}\n\t\tnameoldpv.Spec = namenewpv.Spec\n\t\tif err := r.client.Get(context.TODO(), request.NamespacedName, nameoldpv); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\tif err := r.client.Update(context.TODO(), nameoldpv); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\n\t\tnamenewpvc := r.NameNewPvc(instance)\n\t\tnameoldpvc := &corev1.PersistentVolumeClaim{}\n\t\tnameoldpvc.Spec = namenewpvc.Spec\n\t\tif err := r.client.Get(context.TODO(), request.NamespacedName, nameoldpvc); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\tif err := r.client.Update(context.TODO(), nameoldpvc); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\t//datanode\n\t\tnewState := r.NewState(instance)\n\t\toldState := &appsv1.StatefulSet{}\n\t\toldState.Spec = newState.Spec\n\t\tif err := r.client.Get(context.TODO(), request.NamespacedName, oldState); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\tif err := r.client.Update(context.TODO(), oldState); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\n\t\tdatanewSer := r.DataNewService(instance)\n\t\tdataoldSer := &corev1.Service{}\n\t\tdataoldSer.Spec = datanewSer.Spec\n\t\tif err := r.client.Get(context.TODO(), request.NamespacedName, dataoldSer); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\tif err := r.client.Update(context.TODO(), dataoldSer); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\n\t\tdatanewPv := r.DataNewPv(instance)\n\t\tdataoldPv := &corev1.PersistentVolume{}\n\t\tdataoldPv.Spec = datanewPv.Spec\n\t\tif err := r.client.Get(context.TODO(), request.NamespacedName, dataoldPv); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\tif err := r.client.Update(context.TODO(), dataoldPv); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\n\t\tdatanewPvs := r.DataNewPv(instance)\n\t\tdataoldPvs := &corev1.PersistentVolume{}\n\t\tdataoldPvs.Spec = datanewPvs.Spec\n\t\tif err := r.client.Get(context.TODO(), request.NamespacedName, dataoldPvs); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\tif err := r.client.Update(context.TODO(), dataoldPvs); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\treturn reconcile.Result{}, nil\n\n}", "func (r *ReconcileHostPathProvisioner) reconcileDaemonSet(reqLogger logr.Logger, instance *hostpathprovisionerv1alpha1.HostPathProvisioner, namespace string) (reconcile.Result, error) {\n\t// Define a new DaemonSet object\n\tprovisionerImage := os.Getenv(provisionerImageEnvVarName)\n\tif provisionerImage == \"\" {\n\t\treqLogger.Info(\"PROVISIONER_IMAGE not set, defaulting to hostpath-provisioner\")\n\t\tprovisionerImage = ProvisionerImageDefault\n\t}\n\n\tdesired := createDaemonSetObject(instance, provisionerImage, namespace)\n\tdesiredMetaObj := &desired.ObjectMeta\n\tsetLastAppliedConfiguration(desiredMetaObj)\n\n\t// Set HostPathProvisioner instance as the owner and controller\n\tif err := controllerutil.SetControllerReference(instance, desired, r.scheme); err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Check if this DaemonSet already exists\n\tfound := &appsv1.DaemonSet{}\n\terr := r.client.Get(context.TODO(), types.NamespacedName{Name: desired.Name, Namespace: desired.Namespace}, found)\n\tif err != nil && errors.IsNotFound(err) {\n\t\treqLogger.Info(\"Creating a new DaemonSet\", \"DaemonSet.Namespace\", desired.Namespace, \"Daemonset.Name\", desired.Name)\n\t\terr = r.client.Create(context.TODO(), desired)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\n\t\t// DaemonSet created successfully - don't requeue\n\t\treturn reconcile.Result{}, nil\n\t} else if err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Keep a copy of the original for comparison later.\n\tcurrentRuntimeObjCopy := found.DeepCopyObject()\n\t// Copy found status fields, so the compare won't fail on desired/scheduled/ready pods being different. Updating will ignore them anyway.\n\tdesired = copyStatusFields(desired, found)\n\n\t// allow users to add new annotations (but not change ours)\n\tmergeLabelsAndAnnotations(desiredMetaObj, &found.ObjectMeta)\n\n\t// create merged DaemonSet from found and desired.\n\tmerged, err := mergeObject(desired, found)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tif !reflect.DeepEqual(currentRuntimeObjCopy, merged) {\n\t\tlogJSONDiff(reqLogger, currentRuntimeObjCopy, merged)\n\t\t// Current is different from desired, update.\n\t\treqLogger.Info(\"Updating DaemonSet\", \"DaemonSet.Name\", desired.Name)\n\t\terr = r.client.Update(context.TODO(), merged)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\t// DaemonSet already exists and matches the desired state - don't requeue\n\treqLogger.Info(\"Skip reconcile: DaemonSet already exists\", \"DaemonSet.Namespace\", found.Namespace, \"Daemonset.Name\", found.Name)\n\treturn reconcile.Result{}, nil\n}", "func (o OceanLaunchSpecOutput) Storage() OceanLaunchSpecStorageOutput {\n\treturn o.ApplyT(func(v *OceanLaunchSpec) OceanLaunchSpecStorageOutput { return v.Storage }).(OceanLaunchSpecStorageOutput)\n}", "func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) {\n\tlog := r.log.WithValues(strings.ToLower(r.gvk.Kind), req.NamespacedName)\n\tlog.V(1).Info(\"Reconciliation triggered\")\n\n\tobj := &unstructured.Unstructured{}\n\tobj.SetGroupVersionKind(*r.gvk)\n\terr = r.client.Get(ctx, req.NamespacedName, obj)\n\tif apierrors.IsNotFound(err) {\n\t\tlog.V(1).Info(\"Resource %s/%s not found, nothing to do\", req.NamespacedName.Namespace, req.NamespacedName.Name)\n\t\treturn ctrl.Result{}, nil\n\t}\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tu := updater.New(r.client)\n\tdefer func() {\n\t\tapplyErr := u.Apply(ctx, obj)\n\t\tif err == nil && !apierrors.IsNotFound(applyErr) {\n\t\t\terr = applyErr\n\t\t}\n\t}()\n\n\tactionClient, err := r.actionClientGetter.ActionClientFor(obj)\n\tif err != nil {\n\t\tu.UpdateStatus(\n\t\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonErrorGettingClient, err)),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeDeployed),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeInitialized),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeReleaseFailed),\n\t\t\tupdater.EnsureDeployedRelease(nil),\n\t\t)\n\t\t// NOTE: If obj has the uninstall finalizer, that means a release WAS deployed at some point\n\t\t// in the past, but we don't know if it still is because we don't have an actionClient to check.\n\t\t// So the question is, what do we do with the finalizer? We could:\n\t\t// - Leave it in place. This would make the CR impossible to delete without either resolving this error, or\n\t\t// manually uninstalling the release, deleting the finalizer, and deleting the CR.\n\t\t// - Remove the finalizer. This would make it possible to delete the CR, but it would leave around any\n\t\t// release resources that are not owned by the CR (those in the cluster scope or in other namespaces).\n\t\t//\n\t\t// The decision made for now is to leave the finalizer in place, so that the user can intervene and try to\n\t\t// resolve the issue, instead of the operator silently leaving some dangling resources hanging around after the\n\t\t// CR is deleted.\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// As soon as we get the actionClient, lookup the release and\n\t// update the status with this info. We need to do this as\n\t// early as possible in case other irreconcilable errors occur.\n\t//\n\t// We also make sure not to return any errors we encounter so\n\t// we can still attempt an uninstall if the CR is being deleted.\n\trel, err := actionClient.Get(obj.GetName())\n\tif errors.Is(err, driver.ErrReleaseNotFound) {\n\t\tu.UpdateStatus(updater.EnsureCondition(conditions.Deployed(corev1.ConditionFalse, \"\", \"\")))\n\t} else if err == nil {\n\t\tensureDeployedRelease(&u, rel)\n\t}\n\tu.UpdateStatus(updater.EnsureCondition(conditions.Initialized(corev1.ConditionTrue, \"\", \"\")))\n\n\tif obj.GetDeletionTimestamp() != nil {\n\t\terr := r.handleDeletion(ctx, actionClient, obj, log)\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tvals, err := r.getValues(ctx, obj)\n\tif err != nil {\n\t\tu.UpdateStatus(\n\t\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonErrorGettingValues, err)),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeReleaseFailed),\n\t\t)\n\t\treturn ctrl.Result{}, err\n\t}\n\n\trel, state, err := r.getReleaseState(actionClient, obj, vals.AsMap())\n\tif err != nil {\n\t\tu.UpdateStatus(\n\t\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonErrorGettingReleaseState, err)),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeReleaseFailed),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeDeployed),\n\t\t\tupdater.EnsureDeployedRelease(nil),\n\t\t)\n\t\treturn ctrl.Result{}, err\n\t}\n\tu.UpdateStatus(updater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionFalse, \"\", \"\")))\n\n\tfor _, h := range r.preHooks {\n\t\tif err := h.Exec(obj, vals, log); err != nil {\n\t\t\tlog.Error(err, \"pre-release hook failed\")\n\t\t}\n\t}\n\n\tswitch state {\n\tcase stateNeedsInstall:\n\t\trel, err = r.doInstall(actionClient, &u, obj, vals.AsMap(), log)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\tcase stateNeedsUpgrade:\n\t\trel, err = r.doUpgrade(actionClient, &u, obj, vals.AsMap(), log)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\tcase stateUnchanged:\n\t\tif err := r.doReconcile(actionClient, &u, rel, log); err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\tdefault:\n\t\treturn ctrl.Result{}, fmt.Errorf(\"unexpected release state: %s\", state)\n\t}\n\n\tfor _, h := range r.postHooks {\n\t\tif err := h.Exec(obj, *rel, log); err != nil {\n\t\t\tlog.Error(err, \"post-release hook failed\", \"name\", rel.Name, \"version\", rel.Version)\n\t\t}\n\t}\n\n\tensureDeployedRelease(&u, rel)\n\tu.UpdateStatus(\n\t\tupdater.EnsureCondition(conditions.ReleaseFailed(corev1.ConditionFalse, \"\", \"\")),\n\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionFalse, \"\", \"\")),\n\t)\n\n\treturn ctrl.Result{RequeueAfter: r.reconcilePeriod}, nil\n}", "func resourceArmStorageAccountUpdate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*ArmClient).storageServiceClient\n\tid, err := parseAzureResourceID(d.Id())\n\tif err != nil {\n\t\treturn err\n\t}\n\tstorageAccountName := id.Path[\"storageAccounts\"]\n\tresourceGroupName := id.ResourceGroup\n\n\td.Partial(true)\n\n\tif d.HasChange(\"account_type\") {\n\t\taccountType := d.Get(\"account_type\").(string)\n\n\t\tsku := storage.Sku{\n\t\t\tName: storage.SkuName(accountType),\n\t\t}\n\n\t\topts := storage.AccountUpdateParameters{\n\t\t\tSku: &sku,\n\t\t}\n\t\t_, err := client.Update(resourceGroupName, storageAccountName, opts)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error updating Azure Storage Account type %q: %s\", storageAccountName, err)\n\t\t}\n\n\t\td.SetPartial(\"account_type\")\n\t}\n\n\tif d.HasChange(\"access_tier\") {\n\t\taccessTier := d.Get(\"access_tier\").(string)\n\n\t\topts := storage.AccountUpdateParameters{\n\t\t\tAccountPropertiesUpdateParameters: &storage.AccountPropertiesUpdateParameters{\n\t\t\t\tAccessTier: storage.AccessTier(accessTier),\n\t\t\t},\n\t\t}\n\t\t_, err := client.Update(resourceGroupName, storageAccountName, opts)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error updating Azure Storage Account access_tier %q: %s\", storageAccountName, err)\n\t\t}\n\n\t\td.SetPartial(\"access_tier\")\n\t}\n\n\tif d.HasChange(\"tags\") {\n\t\ttags := d.Get(\"tags\").(map[string]interface{})\n\n\t\topts := storage.AccountUpdateParameters{\n\t\t\tTags: expandTags(tags),\n\t\t}\n\t\t_, err := client.Update(resourceGroupName, storageAccountName, opts)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error updating Azure Storage Account tags %q: %s\", storageAccountName, err)\n\t\t}\n\n\t\td.SetPartial(\"tags\")\n\t}\n\n\tif d.HasChange(\"enable_blob_encryption\") {\n\t\tenableBlobEncryption := d.Get(\"enable_blob_encryption\").(bool)\n\n\t\topts := storage.AccountUpdateParameters{\n\t\t\tAccountPropertiesUpdateParameters: &storage.AccountPropertiesUpdateParameters{\n\t\t\t\tEncryption: &storage.Encryption{\n\t\t\t\t\tServices: &storage.EncryptionServices{\n\t\t\t\t\t\tBlob: &storage.EncryptionService{\n\t\t\t\t\t\t\tEnabled: &enableBlobEncryption,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tKeySource: &storageAccountEncryptionSource,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\t_, err := client.Update(resourceGroupName, storageAccountName, opts)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error updating Azure Storage Account enable_blob_encryption %q: %s\", storageAccountName, err)\n\t\t}\n\n\t\td.SetPartial(\"enable_blob_encryption\")\n\t}\n\n\td.Partial(false)\n\treturn nil\n}", "func (cp *Compactor) Compact(ctx context.Context, opts *brtypes.CompactOptions) (*brtypes.Snapshot, error) {\n\tcp.logger.Info(\"Start compacting\")\n\n\t// Deepcopy restoration options ro to avoid any mutation of the passing object\n\tcompactorRestoreOptions := opts.RestoreOptions.DeepCopy()\n\n\t// If no base snapshot is found, abort compaction as there would be nothing to compact\n\tif compactorRestoreOptions.BaseSnapshot == nil {\n\t\tcp.logger.Error(\"No base snapshot found. Nothing is available for compaction\")\n\t\treturn nil, fmt.Errorf(\"no base snapshot found. Nothing is available for compaction\")\n\t}\n\n\tcp.logger.Infof(\"Creating temporary etcd directory %s for restoration.\", compactorRestoreOptions.Config.DataDir)\n\terr := os.MkdirAll(compactorRestoreOptions.Config.DataDir, 0700)\n\tif err != nil {\n\t\tcp.logger.Errorf(\"Unable to create temporary etcd directory for compaction: %s\", err.Error())\n\t\treturn nil, err\n\t}\n\n\tdefer func() {\n\t\tif err := os.RemoveAll(compactorRestoreOptions.Config.DataDir); err != nil {\n\t\t\tcp.logger.Errorf(\"Failed to remove temporary etcd directory %s: %v\", compactorRestoreOptions.Config.DataDir, err)\n\t\t}\n\t}()\n\n\t// Then restore from the snapshots\n\tr := restorer.NewRestorer(cp.store, cp.logger)\n\tembeddedEtcd, err := r.Restore(*compactorRestoreOptions, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to restore snapshots during compaction: %v\", err)\n\t}\n\n\tcp.logger.Info(\"Restoration for compaction is done.\")\n\t// There is a possibility that restore operation may not start an embedded ETCD.\n\tif embeddedEtcd == nil {\n\t\tembeddedEtcd, err = miscellaneous.StartEmbeddedEtcd(cp.logger, compactorRestoreOptions)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tdefer func() {\n\t\tembeddedEtcd.Server.Stop()\n\t\tembeddedEtcd.Close()\n\t}()\n\n\tep := []string{embeddedEtcd.Clients[0].Addr().String()}\n\n\t// Then compact ETCD\n\n\t// Build Client\n\tclientFactory := etcdutil.NewClientFactory(compactorRestoreOptions.NewClientFactory, brtypes.EtcdConnectionConfig{\n\t\tMaxCallSendMsgSize: compactorRestoreOptions.Config.MaxCallSendMsgSize,\n\t\tEndpoints: ep,\n\t\tInsecureTransport: true,\n\t})\n\tclientKV, err := clientFactory.NewKV()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to build etcd KV client\")\n\t}\n\tdefer clientKV.Close()\n\n\tclientMaintenance, err := clientFactory.NewMaintenance()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to build etcd maintenance client\")\n\t}\n\tdefer clientMaintenance.Close()\n\n\trevCheckCtx, cancel := context.WithTimeout(ctx, etcdDialTimeout)\n\tgetResponse, err := clientKV.Get(revCheckCtx, \"foo\")\n\tcancel()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to connect to etcd KV client: %v\", err)\n\t}\n\tetcdRevision := getResponse.Header.GetRevision()\n\n\t// Compact\n\t// Please refer below issue for why physical compaction was necessary\n\t// https://github.com/gardener/etcd-backup-restore/issues/451\n\tif _, err := clientKV.Compact(ctx, etcdRevision, clientv3.WithCompactPhysical()); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to compact: %v\", err)\n\t}\n\n\t// Then defrag ETCD\n\tif opts.NeedDefragmentation {\n\t\tclient, err := clientFactory.NewCluster()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to build etcd cluster client\")\n\t\t}\n\t\tdefer client.Close()\n\n\t\terr = etcdutil.DefragmentData(ctx, clientMaintenance, client, ep, opts.DefragTimeout.Duration, cp.logger)\n\t\tif err != nil {\n\t\t\tcp.logger.Errorf(\"failed to defragment: %v\", err)\n\t\t}\n\t}\n\n\t// Then take snapshot of ETCD\n\tsnapshotReqCtx, cancel := context.WithTimeout(ctx, opts.SnapshotTimeout.Duration)\n\tdefer cancel()\n\n\t// Determine suffix of compacted snapshot that will be result of this compaction\n\tsuffix := compactorRestoreOptions.BaseSnapshot.CompressionSuffix\n\tif len(compactorRestoreOptions.DeltaSnapList) > 0 {\n\t\tsuffix = compactorRestoreOptions.DeltaSnapList[compactorRestoreOptions.DeltaSnapList.Len()-1].CompressionSuffix\n\t}\n\n\tisCompressed, compressionPolicy, err := compressor.IsSnapshotCompressed(suffix)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to determine if snapshot is compressed: %v\", compactorRestoreOptions.BaseSnapshot.CompressionSuffix)\n\t}\n\n\tisFinal := compactorRestoreOptions.BaseSnapshot.IsFinal\n\n\tcc := &compressor.CompressionConfig{Enabled: isCompressed, CompressionPolicy: compressionPolicy}\n\tsnapshot, err := etcdutil.TakeAndSaveFullSnapshot(snapshotReqCtx, clientMaintenance, cp.store, etcdRevision, cc, suffix, isFinal, cp.logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Update snapshot lease only if lease update flag is enabled\n\tif opts.EnabledLeaseRenewal {\n\t\t// Update revisions in holder identity of full snapshot lease.\n\t\tctx, cancel := context.WithTimeout(ctx, brtypes.LeaseUpdateTimeoutDuration)\n\t\tif err := heartbeat.FullSnapshotCaseLeaseUpdate(ctx, cp.logger, snapshot, cp.k8sClientset, opts.FullSnapshotLeaseName, opts.DeltaSnapshotLeaseName); err != nil {\n\t\t\tcp.logger.Warnf(\"Snapshot lease update failed : %v\", err)\n\t\t}\n\t\tcancel()\n\t}\n\n\treturn snapshot, nil\n}", "func (s *Service) arcCompare(old *ugcmdl.ArchDatabus, new *ugcmdl.ArchDatabus) (err error) {\n\tvar (\n\t\tdiff *ugcmdl.VideoDiff\n\t\thitPGC bool\n\t)\n\tif hitPGC, err = s.delPGC(new.TypeID, new.Aid); err != nil {\n\t\treturn\n\t}\n\tif hitPGC { // if the archive hits PGC types, delete it\n\t\tlog.Warn(\"arcCompare Del Aid %d, Because of its typeID %d\", new.Aid, new.TypeID)\n\t\treturn\n\t}\n\tif s.diffArc(old, new) { // archive level info update if different\n\t\tif err = s.dao.UpdateArc(ctx, new); err != nil {\n\t\t\tappDao.PromError(\"DsUpdArc:Err\")\n\t\t\treturn\n\t\t}\n\t\ts.modArcCh <- []int64{new.Aid} // add one archive to submit\n\t\tappDao.PromInfo(\"DsUpdArc:Succ\")\n\t}\n\t// video level info update if different\n\tif diff, err = s.diffVideos(new.Aid); err != nil {\n\t\tappDao.PromError(\"DsUpdVideo:Err\")\n\t\treturn\n\t}\n\tlog.Info(\"Diff Result For Aid %d, Equal %v, Updated %v, Removed %v, New %v\", new.Aid, diff.Equal, diff.Updated, diff.Removed, diff.New)\n\tif err = s.treatDiffV(diff); err != nil {\n\t\tappDao.PromError(\"DsUpdVideo:Err\")\n\t\treturn\n\t}\n\tappDao.PromInfo(\"DsUpdVideo:Succ\")\n\treturn\n}", "func (obj *ocsCephFilesystems) ensureCreated(r *StorageClusterReconciler, instance *ocsv1.StorageCluster) (reconcile.Result, error) {\n\treconcileStrategy := ReconcileStrategy(instance.Spec.ManagedResources.CephFilesystems.ReconcileStrategy)\n\tif reconcileStrategy == ReconcileStrategyIgnore {\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\tcephFilesystems, err := r.newCephFilesystemInstances(instance)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\tfor _, cephFilesystem := range cephFilesystems {\n\t\texisting := cephv1.CephFilesystem{}\n\t\terr = r.Client.Get(context.TODO(), types.NamespacedName{Name: cephFilesystem.Name, Namespace: cephFilesystem.Namespace}, &existing)\n\t\tswitch {\n\t\tcase err == nil:\n\t\t\tif reconcileStrategy == ReconcileStrategyInit {\n\t\t\t\treturn reconcile.Result{}, nil\n\t\t\t}\n\t\t\tif existing.DeletionTimestamp != nil {\n\t\t\t\tr.Log.Info(\"Unable to restore CephFileSystem because it is marked for deletion.\", \"CephFileSystem\", klog.KRef(existing.Namespace, existing.Name))\n\t\t\t\treturn reconcile.Result{}, fmt.Errorf(\"failed to restore initialization object %s because it is marked for deletion\", existing.Name)\n\t\t\t}\n\n\t\t\tr.Log.Info(\"Restoring original CephFilesystem.\", \"CephFileSystem\", klog.KRef(cephFilesystem.Namespace, cephFilesystem.Name))\n\t\t\texisting.ObjectMeta.OwnerReferences = cephFilesystem.ObjectMeta.OwnerReferences\n\t\t\texisting.Spec = cephFilesystem.Spec\n\t\t\terr = r.Client.Update(context.TODO(), &existing)\n\t\t\tif err != nil {\n\t\t\t\tr.Log.Error(err, \"Unable to update CephFileSystem.\", \"CephFileSystem\", klog.KRef(cephFilesystem.Namespace, cephFilesystem.Name))\n\t\t\t\treturn reconcile.Result{}, err\n\t\t\t}\n\t\tcase errors.IsNotFound(err):\n\t\t\tr.Log.Info(\"Creating CephFileSystem.\", \"CephFileSystem\", klog.KRef(cephFilesystem.Namespace, cephFilesystem.Name))\n\t\t\terr = r.Client.Create(context.TODO(), cephFilesystem)\n\t\t\tif err != nil {\n\t\t\t\tr.Log.Error(err, \"Unable to create CephFileSystem.\", \"CephFileSystem\", klog.KRef(cephFilesystem.Namespace, cephFilesystem.Name))\n\t\t\t\treturn reconcile.Result{}, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn reconcile.Result{}, nil\n}", "func (o *StorageListOptions) Run() (err error) {\n\tif o.outputFlag == \"json\" {\n\t\tvar storeList []storage.Storage\n\t\tif o.storageListAllFlag {\n\t\t\tcomponentList, err := component.List(o.Client, o.Application)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor _, component := range componentList {\n\t\t\t\tmountedStorages, err := storage.ListMounted(o.Client, component.ComponentName, o.Application)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfor _, storage := range mountedStorages {\n\t\t\t\t\tmounted := getMachineReadableFormat(true, storage)\n\t\t\t\t\tstoreList = append(storeList, mounted)\n\t\t\t\t}\n\t\t\t}\n\n\t\t} else {\n\t\t\tcomponentName := o.Component()\n\t\t\tmountedStorages, err := storage.ListMounted(o.Client, componentName, o.Application)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor _, storage := range mountedStorages {\n\t\t\t\tmounted := getMachineReadableFormat(true, storage)\n\t\t\t\tstoreList = append(storeList, mounted)\n\n\t\t\t}\n\t\t}\n\t\tunmountedStorages, err := storage.ListUnmounted(o.Client, o.Application)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, storage := range unmountedStorages {\n\t\t\tunmounted := getMachineReadableFormat(false, storage)\n\t\t\tstoreList = append(storeList, unmounted)\n\t\t}\n\t\tstorageList := storage.StorageList{\n\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\tKind: \"List\",\n\t\t\t\tAPIVersion: \"odo.openshift.io/v1aplha1\",\n\t\t\t},\n\t\t\tListMeta: metav1.ListMeta{},\n\t\t\tItems: storeList,\n\t\t}\n\t\tout, err := json.Marshal(storageList)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(string(out))\n\t} else {\n\n\t\tif o.storageListAllFlag {\n\t\t\tprintMountedStorageInAllComponent(o.Client, o.Application)\n\t\t} else {\n\t\t\t// storageComponent is the input component name\n\t\t\tcomponentName := o.Component()\n\t\t\tprintMountedStorageInComponent(o.Client, componentName, o.Application)\n\t\t}\n\t\tprintUnmountedStorage(o.Client, o.Application)\n\t}\n\treturn\n}", "func (r *TestResourceReconciler) Reconcile(req ctrl.Request) (result ctrl.Result, err error) {\n\tlog := r.Log.WithValues(\"testresource\", req.NamespacedName)\n\tresource := new(naglfarv1.TestResource)\n\n\tif err = r.Get(r.Ctx, req.NamespacedName, resource); err != nil {\n\t\tlog.Error(err, \"unable to fetch TestResource\")\n\n\t\t// maybe resource deleted\n\t\terr = client.IgnoreNotFound(err)\n\t\treturn\n\t}\n\tif resource.ObjectMeta.DeletionTimestamp.IsZero() && !util.StringsContains(resource.ObjectMeta.Finalizers, resourceFinalizer) {\n\t\tresource.ObjectMeta.Finalizers = append(resource.ObjectMeta.Finalizers, resourceFinalizer)\n\t\terr = r.Update(r.Ctx, resource)\n\t\treturn\n\t}\n\n\tif !resource.ObjectMeta.DeletionTimestamp.IsZero() && util.StringsContains(resource.ObjectMeta.Finalizers, resourceFinalizer) {\n\t\tvar relation *naglfarv1.Relationship\n\n\t\tif relation, err = r.getRelationship(); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tresourceRef := ref.CreateRef(&resource.ObjectMeta)\n\t\tresourceKey := resourceRef.Key()\n\n\t\tif machineRef, ok := relation.Status.ResourceToMachine[resourceKey]; ok {\n\t\t\tvar machine naglfarv1.Machine\n\t\t\tvar requeue bool\n\t\t\tif err = r.Get(r.Ctx, machineRef.Namespaced(), &machine); err != nil {\n\t\t\t\t// TODO: deal with not found\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequeue, err = r.finalize(resource, &machine)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif requeue {\n\t\t\t\treturn ctrl.Result{RequeueAfter: time.Second}, nil\n\t\t\t}\n\n\t\t\tmachineKey := ref.CreateRef(&machine.ObjectMeta).Key()\n\t\t\trelation.Status.MachineToResources[machineKey] = refsRemove(relation.Status.MachineToResources[machineKey], resourceRef)\n\t\t\tdelete(relation.Status.ResourceToMachine, resourceKey)\n\t\t\tif err = r.Status().Update(r.Ctx, relation); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tresource.ObjectMeta.Finalizers = util.StringsRemove(resource.ObjectMeta.Finalizers, resourceFinalizer)\n\t\terr = r.Update(r.Ctx, resource)\n\t\treturn\n\t}\n\n\tswitch resource.Status.State {\n\tcase \"\":\n\t\tresource.Status.State = naglfarv1.ResourcePending\n\t\terr = r.Status().Update(r.Ctx, resource)\n\t\treturn\n\tcase naglfarv1.ResourcePending:\n\t\treturn r.reconcileStatePending(log, resource)\n\tcase naglfarv1.ResourceUninitialized:\n\t\treturn r.reconcileStateUninitialized(log, resource)\n\tcase naglfarv1.ResourceReady:\n\t\treturn r.reconcileStateReady(log, resource)\n\tcase naglfarv1.ResourceFinish:\n\t\treturn r.reconcileStateFinish(log, resource)\n\tcase naglfarv1.ResourceDestroy:\n\t\treturn r.reconcileStateDestroy(log, resource)\n\tdefault:\n\t\treturn\n\t}\n}", "func (r *Reconciler) reconcileRepos(ctx context.Context,\n\tpostgresCluster *v1beta1.PostgresCluster, extConfigHashes map[string]string,\n\trepoResources *RepoResources) (v1beta1.PGBackRestRepo, error) {\n\n\tlog := logging.FromContext(ctx).WithValues(\"reconcileResource\", \"repoVolume\")\n\n\terrors := []error{}\n\terrMsg := \"reconciling repository volume\"\n\trepoVols := []*corev1.PersistentVolumeClaim{}\n\tvar replicaCreateRepo v1beta1.PGBackRestRepo\n\tfor i, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos {\n\t\t// the repo at index 0 is the replica creation repo\n\t\tif i == 0 {\n\t\t\treplicaCreateRepo = postgresCluster.Spec.Backups.PGBackRest.Repos[i]\n\t\t}\n\t\t// we only care about reconciling repo volumes, so ignore everything else\n\t\tif repo.Volume == nil {\n\t\t\tcontinue\n\t\t}\n\t\trepo, err := r.applyRepoVolumeIntent(ctx, postgresCluster, repo.Volume.VolumeClaimSpec,\n\t\t\trepo.Name, repoResources)\n\t\tif err != nil {\n\t\t\tlog.Error(err, errMsg)\n\t\t\terrors = append(errors, err)\n\t\t\tcontinue\n\t\t}\n\t\tif repo != nil {\n\t\t\trepoVols = append(repoVols, repo)\n\t\t}\n\t}\n\n\tpostgresCluster.Status.PGBackRest.Repos =\n\t\tgetRepoVolumeStatus(postgresCluster.Status.PGBackRest.Repos, repoVols, extConfigHashes,\n\t\t\treplicaCreateRepo.Name)\n\n\treturn replicaCreateRepo, utilerrors.NewAggregate(errors)\n}", "func (as *AppServant) GetStorage() []*agentd.Storage {\n\treturn nil\n}", "func (r *ReconcileBareMetalAsset) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\treqLogger := log.WithValues(\"Request.Namespace\", request.Namespace, \"Request.Name\", request.Name)\n\treqLogger.Info(\"Reconciling BareMetalAsset\")\n\n\t// Fetch the BareMetalAsset instance\n\tinstance := &midasv1alpha1.BareMetalAsset{}\n\terr := r.client.Get(context.TODO(), request.NamespacedName, instance)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Check if the secret exists\n\tsecretName := instance.Spec.BMC.CredentialsName\n\tsecret := &corev1.Secret{}\n\terr = r.client.Get(context.TODO(), types.NamespacedName{Name: secretName, Namespace: request.Namespace}, secret)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treqLogger.Error(err, \"Secret not found\", \"Namespace\", request.Namespace, \"Secret.Name\", secretName)\n\t\t\tconditionsv1.SetStatusCondition(&instance.Status.Conditions, conditionsv1.Condition{\n\t\t\t\tType: midasv1alpha1.ConditionCredentialsFound,\n\t\t\t\tStatus: corev1.ConditionFalse,\n\t\t\t\tReason: \"SecretNotFound\",\n\t\t\t\tMessage: fmt.Sprintf(\"A secret with the name %v in namespace %v could not be found\", secretName, request.Namespace),\n\t\t\t})\n\t\t\treturn reconcile.Result{}, r.client.Status().Update(context.TODO(), instance)\n\t\t}\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Turn the secret into a reference we can use in status\n\tsecretRef, err := reference.GetReference(r.scheme, secret)\n\tif err != nil {\n\t\treqLogger.Error(err, \"Failed to get reference from secret\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Add the condition and relatedObject, but only update the status once\n\tconditionsv1.SetStatusCondition(&instance.Status.Conditions, conditionsv1.Condition{\n\t\tType: midasv1alpha1.ConditionCredentialsFound,\n\t\tStatus: corev1.ConditionTrue,\n\t\tReason: \"SecretFound\",\n\t\tMessage: fmt.Sprintf(\"A secret with the name %v in namespace %v was found\", secretName, request.Namespace),\n\t})\n\tobjectreferencesv1.SetObjectReference(&instance.Status.RelatedObjects, *secretRef)\n\terr = r.client.Status().Update(context.TODO(), instance)\n\tif err != nil {\n\t\treqLogger.Error(err, \"Failed to add secret to related objects\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Set BaremetalAsset instance as the owner and controller\n\tif secret.OwnerReferences == nil || len(secret.OwnerReferences) == 0 {\n\t\tif err := controllerutil.SetControllerReference(instance, secret, r.scheme); err != nil {\n\t\t\treqLogger.Error(err, \"Failed to set ControllerReference\")\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\tif err := r.client.Update(context.TODO(), secret); err != nil {\n\t\t\treqLogger.Error(err, \"Failed to update secret with OwnerReferences\")\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\n\t// Update the cluster label from instance spec.clusterName\n\t// If not specified, removes the label.\n\tupdateLabels := false\n\tclusterName := instance.Spec.ClusterDeployment.Name\n\tclusterNamespace := instance.Spec.ClusterDeployment.Namespace\n\tif instance.Labels[ClusterDeploymentNameLabel] != clusterName {\n\t\tsetLabel(instance, ClusterDeploymentNameLabel, clusterName)\n\t\tsetLabel(instance, ClusterDeploymentNamespaceLabel, clusterNamespace)\n\t\tupdateLabels = true\n\t}\n\t// Update the role label from instance spec.role\n\t// If not specified, removes the label\n\tif instance.Labels[RoleLabel] != fmt.Sprintf(\"%v\", instance.Spec.Role) {\n\t\tsetLabel(instance, RoleLabel, fmt.Sprintf(\"%v\", instance.Spec.Role))\n\t\tupdateLabels = true\n\t}\n\tif updateLabels {\n\t\tif err := r.client.Update(context.TODO(), instance); err != nil {\n\t\t\treqLogger.Error(err, \"Failed to update instance with cluster and role label\")\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\n\tif clusterName != \"\" {\n\t\t// If clusterName is specified in the spec, we need to find the clusterdeployment for that clusterName and create\n\t\t// hive syncset in the same namespace as the clusterdeployment.\n\t\tfoundCd := &hivev1.ClusterDeployment{}\n\t\terr = r.client.Get(context.TODO(), types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, foundCd)\n\t\tif err != nil {\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\treqLogger.Error(err, \"ClusterDeployment not found\", \"Namespace\", clusterNamespace, \"ClusterDeployment\", clusterName)\n\t\t\t\tconditionsv1.SetStatusCondition(&instance.Status.Conditions, conditionsv1.Condition{\n\t\t\t\t\tType: midasv1alpha1.ConditionClusterDeploymentFound,\n\t\t\t\t\tStatus: corev1.ConditionFalse,\n\t\t\t\t\tReason: \"ClusterDeploymentNotFound\",\n\t\t\t\t\tMessage: fmt.Sprintf(\"A cluster deployment with the name %v in namespace %v could not be found\", clusterName, clusterNamespace),\n\t\t\t\t})\n\t\t\t\treturn reconcile.Result{}, r.client.Status().Update(context.TODO(), instance)\n\t\t\t}\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\tconditionsv1.SetStatusCondition(&instance.Status.Conditions, conditionsv1.Condition{\n\t\t\tType: midasv1alpha1.ConditionClusterDeploymentFound,\n\t\t\tStatus: corev1.ConditionTrue,\n\t\t\tReason: \"ClusterDeploymentFound\",\n\t\t\tMessage: fmt.Sprintf(\"A clusterdeployment with the name %v in namespace %v was found\", clusterName, clusterNamespace),\n\t\t})\n\t\tstatusErr := r.client.Status().Update(context.TODO(), instance)\n\t\tif statusErr != nil {\n\t\t\treqLogger.Error(statusErr, \"Failed to update instance status\")\n\t\t\treturn reconcile.Result{}, statusErr\n\t\t}\n\n\t\t// If clusterDeployment is found, ensure syncset is created\n\t\terr = r.ensureHiveSyncSet(instance, foundCd, reqLogger)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t} else {\n\t\t// Without a clusterName, we do not know what namespace the syncset is in.\n\t\t// Get the syncset from relatedobjects if it exists\n\t\thscRef := corev1.ObjectReference{}\n\t\tfor _, ro := range instance.Status.RelatedObjects {\n\t\t\tif ro.Name == instance.Name && ro.Kind == \"SyncSet\" && ro.APIVersion == hivev1.SchemeGroupVersion.String() {\n\t\t\t\thscRef = ro\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif hscRef == (corev1.ObjectReference{}) {\n\t\t\t// No syncset found in relatedObjects. Nothing to do.\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// If clusterName is not specified, delete the syncset if it exists\n\t\treqLogger.Info(\"Cleaning up Hive SyncSet\", \"Name\", hscRef.Name, \"Namespace\", hscRef.Namespace)\n\t\terr = r.client.Delete(context.TODO(), &hivev1.SyncSet{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: hscRef.Namespace,\n\t\t\t\tName: hscRef.Name,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\tif !errors.IsNotFound(err) {\n\t\t\t\treqLogger.Error(err, \"Failed to delete Hive SyncSet\")\n\t\t\t\treturn reconcile.Result{}, err\n\t\t\t}\n\t\t}\n\t\t// Remove SyncSet from related objects\n\t\tobjectreferencesv1.RemoveObjectReference(&instance.Status.RelatedObjects, hscRef)\n\t\treturn reconcile.Result{}, r.client.Status().Update(context.TODO(), instance)\n\t}\n\n\treqLogger.Info(\"Reconciled\")\n\n\treturn reconcile.Result{}, nil\n}", "func resourceArmStorageContainerRead(d *schema.ResourceData, meta interface{}) error {\n\tarmClient := meta.(*ArmClient)\n\tctx := armClient.StopContext\n\n\tresourceGroupName := d.Get(\"resource_group_name\").(string)\n\tstorageAccountName := d.Get(\"storage_account_name\").(string)\n\n\tblobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(ctx, resourceGroupName, storageAccountName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !accountExists {\n\t\tlog.Printf(\"[DEBUG] Storage account %q not found, removing container %q from state\", storageAccountName, d.Id())\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\n\tname := d.Get(\"name\").(string)\n\tcontainers, err := blobClient.ListContainers(storage.ListContainersParameters{\n\t\tPrefix: name,\n\t\tTimeout: 90,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to retrieve storage containers in account %q: %s\", name, err)\n\t}\n\n\tvar found bool\n\tfor _, cont := range containers.Containers {\n\t\tif cont.Name == name {\n\t\t\tfound = true\n\n\t\t\tprops := make(map[string]interface{})\n\t\t\tprops[\"last_modified\"] = cont.Properties.LastModified\n\t\t\tprops[\"lease_status\"] = cont.Properties.LeaseStatus\n\t\t\tprops[\"lease_state\"] = cont.Properties.LeaseState\n\t\t\tprops[\"lease_duration\"] = cont.Properties.LeaseDuration\n\n\t\t\td.Set(\"properties\", props)\n\t\t}\n\t}\n\n\tif !found {\n\t\tlog.Printf(\"[INFO] Storage container %q does not exist in account %q, removing from state...\", name, storageAccountName)\n\t\td.SetId(\"\")\n\t}\n\n\treturn nil\n}", "func (r *reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {\n\tlog := r.log.With(\"request\", request)\n\tlog.Debug(\"Reconciling\")\n\n\tresourceQuota := &kubermaticv1.ResourceQuota{}\n\tif err := r.masterClient.Get(ctx, request.NamespacedName, resourceQuota); err != nil {\n\t\treturn reconcile.Result{}, fmt.Errorf(\"failed to get resource quota %q: %w\", resourceQuota.Name, err)\n\t}\n\n\terr := r.reconcile(ctx, resourceQuota, log)\n\tif err != nil {\n\t\tlog.Errorw(\"ReconcilingError\", zap.Error(err))\n\t\tr.recorder.Event(resourceQuota, corev1.EventTypeWarning, \"ReconcilingError\", err.Error())\n\t}\n\n\treturn reconcile.Result{}, err\n}", "func (r *ConfigAuditReportReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\t// your logic here\n\t// Fetch the ConfigAuditReport instance\n\tinstance := &aquasecurityv1alpha1.ConfigAuditReport{}\n\terr := r.Client.Get(context.TODO(), req.NamespacedName, instance)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tworkloadInfo, err := r.getWorkloadInfo(instance)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// if !instance.ObjectMeta.DeletionTimestamp.IsZero() {\n\t// \t// The object is not being deleted, so do nothing\n\t// \tlogger.Info(\"Remove the original report\")\n\t// \tlogger.Info(\"workloadInfo\")\n\t// \terr := r.removeReport(workloadInfo)\n\t// \tif err != nil {\n\t// \t\treturn reconcile.Result{}, err\n\t// \t}\n\t// \treturn reconcile.Result{}, nil\n\t// }\n\n\t// examine DeletionTimestamp to determine if object is under deletion\n\tif instance.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\t// The object is not being deleted, so if it does not have our finalizer,\n\t\t// then lets add the finalizer and update the object. This is equivalent\n\t\t// registering our finalizer.\n\t\tif !containsString(instance.GetFinalizers(), finalizerName) {\n\t\t\taddFinalizer(instance, finalizerName)\n\t\t\tif err := r.Client.Update(context.TODO(), instance); err != nil {\n\t\t\t\treturn reconcile.Result{}, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// The object is being deleted\n\t\tif containsString(instance.GetFinalizers(), finalizerName) {\n\t\t\t// our finalizer is present, so lets handle any external dependency\n\t\t\tif err := r.removeReport(workloadInfo); err != nil {\n\t\t\t\t// if fail to delete the external dependency here, return with error\n\t\t\t\t// so that it can be retried\n\t\t\t\treturn reconcile.Result{}, err\n\t\t\t}\n\n\t\t\t// remove our finalizer from the list and update it.\n\t\t\tremoveFinalizer(instance, finalizerName)\n\t\t\tif err := r.Client.Update(context.TODO(), instance); err != nil {\n\t\t\t\treturn reconcile.Result{}, err\n\t\t\t}\n\t\t}\n\n\t\t// Stop reconciliation as the item is being deleted\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\terr = r.generateReport(ctx, workloadInfo)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func TestShrinkStorageFolder(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\tcmt, err := newContractManagerTester(\"TestShrinkStorageFolder\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cmt.panicClose()\n\n\t// Add a storage folder.\n\tstorageFolderOne := filepath.Join(cmt.persistDir, \"storageFolderOne\")\n\t// Create the storage folder dir.\n\terr = os.MkdirAll(storageFolderOne, 0700)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = cmt.cm.AddStorageFolder(storageFolderOne, modules.SectorSize*storageFolderGranularity*8)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Get the index of the storage folder.\n\tsfs := cmt.cm.StorageFolders()\n\tif len(sfs) != 1 {\n\t\tt.Fatal(\"there should only be one storage folder\")\n\t}\n\tsfIndex := sfs[0].Index\n\t// Verify that the storage folder has the correct capacity.\n\tif sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*8 {\n\t\tt.Error(\"new storage folder is reporting the wrong capacity\")\n\t}\n\t// Verify that the on-disk files are the right size.\n\tmfn := filepath.Join(storageFolderOne, metadataFile)\n\tsfn := filepath.Join(storageFolderOne, sectorFile)\n\tmfi, err := os.Stat(mfn)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsfi, err := os.Stat(sfn)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*8 {\n\t\tt.Error(\"metadata file is the wrong size\")\n\t}\n\tif uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*8 {\n\t\tt.Error(\"sector file is the wrong size\")\n\t}\n\n\t// Decrease the size of the storage folder.\n\terr = cmt.cm.ResizeStorageFolder(sfIndex, modules.SectorSize*storageFolderGranularity*2, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Verify that the capacity and file sizes are correct.\n\tsfs = cmt.cm.StorageFolders()\n\tif sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*2 {\n\t\tt.Error(\"new storage folder is reporting the wrong capacity\")\n\t}\n\tmfi, err = os.Stat(mfn)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsfi, err = os.Stat(sfn)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*2 {\n\t\tt.Error(\"metadata file is the wrong size\")\n\t}\n\tif uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*2 {\n\t\tt.Error(\"sector file is the wrong size\")\n\t}\n\n\t// Restart the contract manager to see that the change is persistent.\n\terr = cmt.cm.Close()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify that the capacity and file sizes are correct.\n\tsfs = cmt.cm.StorageFolders()\n\tif sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*2 {\n\t\tt.Error(\"new storage folder is reporting the wrong capacity\")\n\t}\n\tmfi, err = os.Stat(mfn)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsfi, err = os.Stat(sfn)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*2 {\n\t\tt.Error(\"metadata file is the wrong size\")\n\t}\n\tif uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*2 {\n\t\tt.Error(\"sector file is the wrong size\")\n\t}\n}", "func (r *shootReferenceReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {\n\tshoot := &gardencorev1beta1.Shoot{}\n\tif err := r.gardenClient.Client().Get(ctx, request.NamespacedName, shoot); err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\tr.logger.Infof(\"Object %q is gone, stop reconciling: %v\", request.Name, err)\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\tr.logger.Infof(\"Unable to retrieve object %q from store: %v\", request.Name, err)\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tr.logger.Infof(\"[SHOOT REFERENCE CONTROL] %s\", request)\n\n\treturn reconcile.Result{}, r.reconcileShootReferences(ctx, shoot)\n}", "func (postgres *PostgreSQLReconciler) Reconcile() (*lcm.CRStatus, error) {\n\n\tpostgres.Client.WithContext(postgres.Ctx)\n\tpostgres.DClient.WithContext(postgres.Ctx)\n\n\tcrdClient := postgres.DClient.WithResource(databaseFailoversGVR).WithNamespace(postgres.HarborCluster.Namespace)\n\tif postgres.HarborCluster.Spec.Database.Kind == goharborv1.InClusterComponent {\n\n\t\tname := fmt.Sprintf(\"%s-%s\", postgres.HarborCluster.Namespace, postgres.HarborCluster.Name)\n\t\tactualCR, err := crdClient.Get(name, metav1.GetOptions{})\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn postgres.Provision()\n\t\t} else if err != nil {\n\t\t\treturn databaseNotReadyStatus(GetDatabaseCrError, err.Error()), err\n\t\t}\n\t\texpectCR, err := postgres.generatePostgresCR()\n\t\tif err != nil {\n\t\t\treturn databaseNotReadyStatus(GenerateDatabaseCrError, err.Error()), err\n\t\t}\n\n\t\tif err := controllerutil.SetControllerReference(postgres.HarborCluster, expectCR, postgres.Scheme); err != nil {\n\t\t\treturn databaseNotReadyStatus(SetOwnerReferenceError, err.Error()), err\n\t\t}\n\n\t\tpostgres.ActualCR = actualCR\n\t\tpostgres.ExpectCR = expectCR\n\n\t\tcrStatus, err := postgres.Update()\n\t\tif err != nil {\n\t\t\treturn crStatus, err\n\t\t}\n\t}\n\n\tcrStatus, err := postgres.Readiness()\n\tif err != nil {\n\t\treturn databaseNotReadyStatus(CheckDatabaseHealthError, err.Error()), err\n\t}\n\n\treturn crStatus, nil\n}", "func TestCalculateStorageUsage(t *testing.T) {\n\tt.Skip()\n\tdefer leaktest.AfterTest(t)()\n\tif testing.Short() {\n\t\tt.Skip(\"skipping in short mode.\")\n\t\treturn\n\t}\n\tctx := context.Background()\n\n\t// initialize cluster\n\topt := service.DefaultOptions()\n\tc, err := service.NewCluster(ctx, t, opt.WithLogLevel(zap.ErrorLevel))\n\trequire.NoError(t, err)\n\t// close the cluster\n\tdefer func(c service.Cluster) {\n\t\trequire.NoError(t, c.Close())\n\t}(c)\n\t// start the cluster\n\trequire.NoError(t, c.Start())\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\n\tc.WaitCNStoreTaskServiceCreatedIndexed(ctx, 0)\n\tc.WaitTNStoreTaskServiceCreatedIndexed(ctx, 0)\n\tc.WaitLogStoreTaskServiceCreatedIndexed(ctx, 0)\n\n\tctrl := gomock.NewController(t)\n\ttxnOperator := mock_frontend.NewMockTxnOperator(ctrl)\n\ttxnOperator.EXPECT().Txn().Return(txn.TxnMeta{}).AnyTimes()\n\ttxnOperator.EXPECT().Commit(gomock.Any()).Return(nil).AnyTimes()\n\ttxnOperator.EXPECT().Rollback(gomock.Any()).Return(nil).AnyTimes()\n\ttxnClient := mock_frontend.NewMockTxnClient(ctrl)\n\ttxnClient.EXPECT().New(gomock.Any(), gomock.Any()).Return(txnOperator, nil).AnyTimes()\n\ttable := mock_frontend.NewMockRelation(ctrl)\n\ttable.EXPECT().Ranges(gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes()\n\ttable.EXPECT().TableDefs(gomock.Any()).Return(nil, nil).AnyTimes()\n\ttable.EXPECT().GetPrimaryKeys(gomock.Any()).Return(nil, nil).AnyTimes()\n\ttable.EXPECT().GetHideKeys(gomock.Any()).Return(nil, nil).AnyTimes()\n\ttable.EXPECT().TableColumns(gomock.Any()).Return(nil, nil).AnyTimes()\n\ttable.EXPECT().GetTableID(gomock.Any()).Return(uint64(10)).AnyTimes()\n\tdb := mock_frontend.NewMockDatabase(ctrl)\n\tdb.EXPECT().Relations(gomock.Any()).Return(nil, nil).AnyTimes()\n\tdb.EXPECT().Relation(gomock.Any(), gomock.Any(), gomock.Any()).Return(table, nil).AnyTimes()\n\teng := mock_frontend.NewMockEngine(ctrl)\n\teng.EXPECT().New(gomock.Any(), gomock.Any()).Return(nil).AnyTimes()\n\teng.EXPECT().Database(gomock.Any(), gomock.Any(), txnOperator).Return(db, nil).AnyTimes()\n\teng.EXPECT().Hints().Return(engine.Hints{CommitOrRollbackTimeout: time.Second}).AnyTimes()\n\tpu := config.NewParameterUnit(&config.FrontendParameters{}, eng, txnClient, nil)\n\tpu.SV.SetDefaultValues()\n\n\t// Mock autoIncrCache\n\taicm := &defines.AutoIncrCacheManager{}\n\n\tieFactory := func() ie.InternalExecutor {\n\t\treturn frontend.NewInternalExecutor(pu, aicm)\n\t}\n\n\terr = mometric.CalculateStorageUsage(ctx, ieFactory)\n\trequire.Nil(t, err)\n\n\ts := metric.StorageUsage(\"sys\")\n\tdm := &dto.Metric{}\n\ts.Write(dm)\n\tlogutil.Infof(\"size: %f\", dm.GetGauge().GetValue())\n\tt.Logf(\"size: %f\", dm.GetGauge().GetValue())\n}", "func (r *TestReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {\n\tlogger := log.FromContext(ctx)\n\n\tlogger.Info(\"Reconcile started\")\n\n\ttest := &testv1alpha1.Test{}\n\terr := r.Get(ctx, request.NamespacedName, test)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\tlogger.Info(\"Resource not found\")\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\tlogger.Info(\"Error retrieving resource\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Check if we are finalizing\n\tisMarkedToBeDeleted := test.GetDeletionTimestamp() != nil\n\tif isMarkedToBeDeleted {\n\t\tlogger.Info(\"Marked to be deleted\")\n\t\tif controllerutil.ContainsFinalizer(test, FinalizerName) {\n\t\t\tlogger.Info(\"Contains finalizer\")\n\n\t\t\t// Remove finalizer. Once all finalizers have been\n\t\t\t// removed, the object will be deleted.\n\t\t\tcontrollerutil.RemoveFinalizer(test, FinalizerName)\n\t\t\terr := r.Update(ctx, test)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Info(\"Removing finalizer error\")\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t\tlogger.Info(\"Removed finalizer\")\n\t\t}\n\t\tlogger.Info(\"Returning successfully within mark check\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// Add finalizer for this CR\n\tif !controllerutil.ContainsFinalizer(test, FinalizerName) {\n\t\tlogger.Info(\"Adding finalizer\")\n\t\tcontrollerutil.AddFinalizer(test, FinalizerName)\n\t\terr = r.Update(ctx, test)\n\t\tif err != nil {\n\t\t\tlogger.Info(\"Failed to add finalizer\")\n\t\t\treturn ctrl.Result{}, err\n\t\t} else {\n\t\t\tlogger.Info(\"Added finalizer\")\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t}\n\n\tlogger.Info(\"Started normal processing\")\n\n\tlogger.Info(\"Final successful return\")\n\treturn ctrl.Result{}, nil\n}", "func (r *ReconcileMessagingEndpoint) reconcileFinalizer(ctx context.Context, logger logr.Logger, endpoint *v1.MessagingEndpoint) (processorResult, error) {\n\t// Handle finalizing an deletion state first\n\tif endpoint.DeletionTimestamp != nil && endpoint.Status.Phase != v1.MessagingEndpointTerminating {\n\t\tendpoint.Status.Phase = v1.MessagingEndpointTerminating\n\t\terr := r.client.Status().Update(ctx, endpoint)\n\t\treturn processorResult{Requeue: true}, err\n\t}\n\n\toriginal := endpoint.DeepCopy()\n\tresult, err := finalizer.ProcessFinalizers(ctx, r.client, r.reader, r.recorder, endpoint, []finalizer.Finalizer{\n\t\t{\n\t\t\tName: FINALIZER_NAME,\n\t\t\tDeconstruct: func(c finalizer.DeconstructorContext) (reconcile.Result, error) {\n\t\t\t\t_, ok := c.Object.(*v1.MessagingEndpoint)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn reconcile.Result{}, fmt.Errorf(\"provided wrong object type to finalizer, only supports MessagingEndpoint\")\n\t\t\t\t}\n\n\t\t\t\t_, infra, err := messaginginfra.LookupInfra(ctx, r.client, endpoint.Namespace)\n\t\t\t\tif err != nil {\n\t\t\t\t\t// Not bound - allow dropping finalizer\n\t\t\t\t\tif utilerrors.IsNotBound(err) || utilerrors.IsNotFound(err) {\n\t\t\t\t\t\tlogger.Info(\"[Finalizer] Messaging project not found or bound, ignoring!\")\n\t\t\t\t\t\treturn reconcile.Result{}, nil\n\t\t\t\t\t}\n\t\t\t\t\tlogger.Info(\"[Finalizer] Error looking up infra\")\n\t\t\t\t\treturn reconcile.Result{}, err\n\t\t\t\t}\n\t\t\t\tclient := r.clientManager.GetClient(infra)\n\t\t\t\terr = client.DeleteEndpoint(endpoint)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn reconcile.Result{}, err\n\t\t\t\t}\n\n\t\t\t\terr = r.certController.DeleteEndpointCert(ctx, logger, infra, endpoint)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn reconcile.Result{}, err\n\t\t\t\t}\n\n\t\t\t\tserviceName := getServiceName(endpoint)\n\t\t\t\tmeta := metav1.ObjectMeta{Namespace: infra.Namespace, Name: serviceName}\n\t\t\t\tif endpoint.Spec.Ingress != nil {\n\t\t\t\t\tingress := &netv1beta1.Ingress{\n\t\t\t\t\t\tObjectMeta: meta,\n\t\t\t\t\t}\n\t\t\t\t\terr = r.client.Delete(ctx, ingress)\n\t\t\t\t\tif err != nil && !k8errors.IsNotFound(err) {\n\t\t\t\t\t\treturn reconcile.Result{}, err\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif endpoint.Spec.Route != nil {\n\t\t\t\t\troute := &routev1.Route{\n\t\t\t\t\t\tObjectMeta: meta,\n\t\t\t\t\t}\n\t\t\t\t\terr = r.client.Delete(ctx, route)\n\t\t\t\t\tif err != nil && !k8errors.IsNotFound(err) {\n\t\t\t\t\t\treturn reconcile.Result{}, err\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tservice := &corev1.Service{\n\t\t\t\t\tObjectMeta: meta,\n\t\t\t\t}\n\t\t\t\terr = r.client.Delete(ctx, service)\n\t\t\t\tif err != nil && !k8errors.IsNotFound(err) {\n\t\t\t\t\treturn reconcile.Result{}, err\n\t\t\t\t}\n\n\t\t\t\tlogger.Info(\"[Finalizer] Deleted endpoint\", \"err\", err)\n\t\t\t\treturn reconcile.Result{}, nil\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn processorResult{}, err\n\t}\n\n\tif result.Requeue {\n\t\t// Update and requeue if changed\n\t\tif !reflect.DeepEqual(original, endpoint) {\n\t\t\terr := r.client.Update(ctx, endpoint)\n\t\t\treturn processorResult{Return: true}, err\n\t\t}\n\t}\n\treturn processorResult{Requeue: result.Requeue}, nil\n}", "func (r *KonfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\treqLogger := log.FromContext(ctx)\n\treconcileStart := time.Now()\n\n\treqLogger.Info(\"Reconciling konfiguration\")\n\n\t// Look up the konfiguration that triggered this request\n\tkonfig := &konfigurationv1.Konfiguration{}\n\tif err := r.Client.Get(ctx, req.NamespacedName, konfig); err != nil {\n\t\t// Check if object was deleted\n\t\tif client.IgnoreNotFound(err) == nil {\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Record suspended status metric\n\tdefer r.recordSuspension(ctx, konfig)\n\n\t// Add our finalizer if it does not exist\n\tif !controllerutil.ContainsFinalizer(konfig, konfigurationv1.KonfigurationFinalizer) {\n\t\treqLogger.Info(\"Registering finalizer to Konfiguration\")\n\t\tcontrollerutil.AddFinalizer(konfig, konfigurationv1.KonfigurationFinalizer)\n\t\tif err := r.Update(ctx, konfig); err != nil {\n\t\t\treqLogger.Error(err, \"failed to register finalizer\")\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\t// Examine if the object is under deletion\n\tif !konfig.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\treturn r.reconcileDelete(ctx, konfig)\n\t}\n\n\t// Check if the konfiguration is suspended\n\tif konfig.IsSuspended() {\n\t\treturn ctrl.Result{\n\t\t\tRequeueAfter: konfig.GetInterval(),\n\t\t}, nil\n\t}\n\n\t// Get the revision and the path we are going to operate on\n\trevision, path, clean, err := r.prepareSource(ctx, konfig)\n\tif err != nil {\n\t\tr.recordReadiness(ctx, konfig)\n\t\treturn ctrl.Result{\n\t\t\tRequeueAfter: konfig.GetRetryInterval(),\n\t\t}, nil\n\t}\n\tdefer clean()\n\n\t// Check if there are any dependencies and that they are all ready\n\tif err := r.checkDependencies(ctx, konfig); err != nil {\n\t\tif statusErr := konfig.SetNotReady(ctx, r.Client, konfigurationv1.NewStatusMeta(\n\t\t\trevision, meta.DependencyNotReadyReason, err.Error(),\n\t\t)); statusErr != nil {\n\t\t\treqLogger.Error(err, \"failed to update status for dependency not ready\")\n\t\t}\n\t\tmsg := fmt.Sprintf(\"Dependencies do not meet ready condition, retrying in %s\", r.dependencyRequeueDuration.String())\n\t\treqLogger.Info(msg)\n\t\tr.event(ctx, konfig, &EventData{\n\t\t\tRevision: revision,\n\t\t\tSeverity: events.EventSeverityInfo,\n\t\t\tMessage: msg,\n\t\t})\n\t\tr.recordReadiness(ctx, konfig)\n\t\treturn ctrl.Result{RequeueAfter: r.dependencyRequeueDuration}, nil\n\t}\n\n\t// record reconciliation duration\n\tif r.MetricsRecorder != nil {\n\t\tobjRef, err := reference.GetReference(r.Scheme, konfig)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tdefer r.MetricsRecorder.RecordDuration(*objRef, reconcileStart)\n\t}\n\n\t// set the status to progressing\n\tif err := konfig.SetProgressing(ctx, r.Client); err != nil {\n\t\treqLogger.Error(err, \"unable to update status to progressing\")\n\t\treturn ctrl.Result{Requeue: true}, err\n\t}\n\tr.recordReadiness(ctx, konfig)\n\n\t// Do reconciliation\n\tsnapshot, err := r.reconcile(ctx, konfig, revision, path)\n\tif err != nil {\n\t\treqLogger.Error(err, \"Error during reconciliation\")\n\t\tr.event(ctx, konfig, &EventData{\n\t\t\tRevision: revision,\n\t\t\tSeverity: events.EventSeverityError,\n\t\t\tMessage: err.Error(),\n\t\t})\n\t\treturn ctrl.Result{\n\t\t\tRequeueAfter: konfig.GetRetryInterval(),\n\t\t}, nil\n\t}\n\n\tupdated := konfig.Status.Snapshot == nil || snapshot.Checksum != konfig.Status.Snapshot.Checksum\n\n\t// Set the konfiguration as ready\n\tmsg := fmt.Sprintf(\"Applied revision: %s\", revision)\n\tif err := konfig.SetReady(ctx, r.Client, snapshot, konfigurationv1.NewStatusMeta(\n\t\trevision, meta.ReconciliationSucceededReason, msg),\n\t); err != nil {\n\t\treturn ctrl.Result{Requeue: true}, err\n\t}\n\n\treqLogger.Info(fmt.Sprintf(\"Reconcile finished, next run in %s\", konfig.GetInterval().String()), \"Revision\", revision)\n\n\tif updated {\n\t\tr.event(ctx, konfig, &EventData{\n\t\t\tRevision: revision,\n\t\t\tSeverity: events.EventSeverityInfo,\n\t\t\tMessage: \"Update Complete\",\n\t\t\tMetadata: map[string]string{\n\t\t\t\t\"commit_status\": \"update\",\n\t\t\t},\n\t\t})\n\t}\n\treturn ctrl.Result{\n\t\tRequeueAfter: konfig.GetInterval(),\n\t}, nil\n}", "func (c *IDProvider) UpdateStorage() error {\n\tc.mutex.RLock()\n\tdefer c.mutex.RUnlock()\n\n\tf, err := os.Create(c.localFilePath)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"opening ID provider file for writing\")\n\t}\n\tdefer func() {\n\t\tif fCloseErr := f.Close(); fCloseErr != nil {\n\t\t\terr = fmt.Errorf(\"%w; and error closing file - %s\", err, fCloseErr.Error())\n\t\t}\n\t}()\n\n\tencoder := yaml.NewEncoder(f)\n\tif err = encoder.Encode(c.peerIDsByAlias); err != nil {\n\t\treturn errors.Wrap(err, \"encoding data as yaml\")\n\t}\n\terr = errors.Wrap(encoder.Close(), \"closing encoder\")\n\t// receive the error in \"err\" before returning to ensure file close error is captured.\n\treturn err\n}", "func (r *HybrisBaseReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tctx := context.Background()\n\tlog := r.Log.WithValues(\"hybrisbase\", req.NamespacedName)\n\n\t// Fetch the HybrisBase instance\n\thybrisBase := &hybrisv1alpha1.HybrisBase{}\n\terr := r.Get(ctx, req.NamespacedName, hybrisBase)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\tlog.Info(\"HybrisBase resource not found. Ignoring since object must be deleted\")\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\tlog.Error(err, \"Failed to get HybrisBase\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tupdated, err := r.ensureImageStream(hybrisBase, ctx, log)\n\tif updated {\n\t\treturn ctrl.Result{}, nil\n\t} else if err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\tupdated, err = r.ensureSecret(hybrisBase, ctx, log)\n\tif updated {\n\t\treturn ctrl.Result{}, nil\n\t} else if err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\tupdated, err = r.ensureBuildConfig(hybrisBase, ctx, log)\n\tif updated {\n\t\treturn ctrl.Result{}, nil\n\t} else if err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif hybrisBase.Status.BuildConditions == nil {\n\t\thybrisBase.Status.BuildConditions = []hybrisv1alpha1.BuildStatusCondition{}\n\t}\n\n\tbuilding, updated, err := r.updateBuildStatus(hybrisBase, ctx, log)\n\n\tif updated {\n\t\terr = r.Status().Update(ctx, hybrisBase)\n\t\tif err != nil {\n\t\t\tlog.Error(err, \"Failed to update HybrisBase status\")\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tlog.Info(\"HybrisBase status updated\")\n\t\treturn ctrl.Result{Requeue: true}, nil\n\t} else if building {\n\t\tlog.Info(\"HybrisBase building in process\")\n\t\treturn ctrl.Result{RequeueAfter: time.Minute}, nil\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (r *Reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\tlog.Printf(\"reconciling %s: %v\", bucketv1alpha1.BucketKindApiVersion, request)\n\n\t// fetch the CRD instance\n\tbucket := &bucketv1alpha1.Bucket{}\n\n\terr := r.Get(ctx, request.NamespacedName, bucket)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Object not found, return. Created objects are automatically garbage collected.\n\t\t\t// For additional cleanup logic use finalizers.\n\t\t\treturn result, nil\n\t\t}\n\t\treturn result, err\n\t}\n\n\t// Check for deletion\n\tif bucket.DeletionTimestamp != nil && bucket.Status.Condition(corev1alpha1.Deleting) == nil {\n\t\treturn r.delete(bucket)\n\t}\n\n\t// Add finalizer\n\tutil.AddFinalizer(&bucket.ObjectMeta, finalizer)\n\tif err := r.Update(ctx, bucket); err != nil {\n\t\treturn resultRequeue, err\n\t}\n\n\t// check if instance reference is set, if not - create new instance\n\tif bucket.Spec.ResourceRef == nil {\n\t\treturn r.provision(bucket)\n\t}\n\n\t// bind to the resource\n\treturn r.bind(bucket)\n}", "func (o *StorageCreateOptions) Validate() (err error) {\n\tif o.isDevfile {\n\t\treturn\n\t}\n\t// validate storage path\n\treturn o.LocalConfigInfo.ValidateStorage(o.storageName, o.storagePath)\n}", "func (r *azureManagedControlPlaneReconciler) Reconcile(ctx context.Context, scope *scope.ManagedControlPlaneScope) error {\n\tdecodedSSHPublicKey, err := base64.StdEncoding.DecodeString(scope.ControlPlane.Spec.SSHPublicKey)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to decode SSHPublicKey\")\n\t}\n\n\tmanagedClusterSpec := &managedclusters.Spec{\n\t\tName: scope.ControlPlane.Name,\n\t\tResourceGroup: scope.ControlPlane.Spec.ResourceGroup,\n\t\tLocation: scope.ControlPlane.Spec.Location,\n\t\tTags: scope.ControlPlane.Spec.AdditionalTags,\n\t\tVersion: strings.TrimPrefix(scope.ControlPlane.Spec.Version, \"v\"),\n\t\tSSHPublicKey: string(decodedSSHPublicKey),\n\t\tDNSServiceIP: scope.ControlPlane.Spec.DNSServiceIP,\n\t}\n\n\tif scope.ControlPlane.Spec.NetworkPlugin != nil {\n\t\tmanagedClusterSpec.NetworkPlugin = *scope.ControlPlane.Spec.NetworkPlugin\n\t}\n\tif scope.ControlPlane.Spec.NetworkPolicy != nil {\n\t\tmanagedClusterSpec.NetworkPolicy = *scope.ControlPlane.Spec.NetworkPolicy\n\t}\n\tif scope.ControlPlane.Spec.LoadBalancerSKU != nil {\n\t\tmanagedClusterSpec.LoadBalancerSKU = *scope.ControlPlane.Spec.LoadBalancerSKU\n\t}\n\n\tscope.V(2).Info(\"Reconciling managed cluster resource group\")\n\tif err := r.groupsSvc.Reconcile(ctx); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile managed cluster resource group\")\n\t}\n\n\tscope.V(2).Info(\"Reconciling managed cluster\")\n\tif err := r.reconcileManagedCluster(ctx, scope, managedClusterSpec); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile managed cluster\")\n\t}\n\n\tscope.V(2).Info(\"Reconciling endpoint\")\n\tif err := r.reconcileEndpoint(ctx, scope, managedClusterSpec); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile control plane endpoint\")\n\t}\n\n\tscope.V(2).Info(\"Reconciling kubeconfig\")\n\tif err := r.reconcileKubeconfig(ctx, scope, managedClusterSpec); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile kubeconfig secret\")\n\t}\n\n\treturn nil\n}", "func CliStorageConfig(config RozoRes, storage string) (cid int, sid int, slot string, err error) {\n\t//TODO volume=1, export=1\n\tvid, cid := 1, 1\n\tfor _, vol := range config.RozoDetail.Volume {\n\t\tif vol.Cid == cid && vol.Vid == vid {\n\t\t\tcid = vol.Cid\n\t\t\tfor _, s := range vol.Sids {\n\t\t\t\tif s.Ip == storage {\n\t\t\t\t\tsid = s.Sid\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tslot = strconv.Itoa(cid) + \"_\" + strconv.Itoa(sid)\n\treturn\n}", "func InstallStorage(providers *ProviderConfig, server *genericapiserver.GenericAPIServer) error {\n\tinfo := BuildStorage(providers)\n\treturn server.InstallAPIGroup(&info)\n}", "func TestLoadMissingStorageFolder(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\tcmt, err := newContractManagerTester(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cmt.panicClose()\n\n\t// Add a storage folder to the contract manager tester.\n\tstorageFolderDir := filepath.Join(cmt.persistDir, \"storageFolderOne\")\n\t// Create the storage folder dir.\n\terr = os.MkdirAll(storageFolderDir, 0700)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = cmt.cm.AddStorageFolder(storageFolderDir, modules.SectorSize*storageFolderGranularity*2)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Check that the storage folder has been added.\n\tsfs := cmt.cm.StorageFolders()\n\tif len(sfs) != 1 {\n\t\tt.Fatal(\"There should be one storage folder reported\")\n\t}\n\t// Check that the storage folder has the right path and size.\n\tif sfs[0].Path != storageFolderDir {\n\t\tt.Error(\"storage folder reported with wrong path\")\n\t}\n\tif sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*2 {\n\t\tt.Error(\"storage folder reported with wrong sector size\")\n\t}\n\n\t// Add a sector to the storage folder.\n\troot, data := randSector()\n\terr = cmt.cm.AddSector(root, data)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Check that the sector was successfully added.\n\tsfs = cmt.cm.StorageFolders()\n\tif len(sfs) != 1 {\n\t\tt.Fatal(\"There should be one storage folder in the contract manager\", len(sfs))\n\t}\n\tif sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize {\n\t\tt.Error(\"One sector's worth of capacity should be consumed:\", sfs[0].Capacity, sfs[0].CapacityRemaining)\n\t}\n\tsfOneIndex := sfs[0].Index\n\n\t// Try reloading the contract manager after the storage folder has been\n\t// moved somewhere else.\n\terr = cmt.cm.Close()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Move the storage folder directory to a new location - hiding it from the\n\t// contract manager.\n\terr = os.Rename(storageFolderDir, storageFolderDir+\"-moved\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Re-open the contract manager.\n\td := new(dependencyNoRecheck)\n\tcmt.cm, err = newContractManager(d, filepath.Join(cmt.persistDir, modules.ContractManagerDir))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// The contract manager should still be reporting the storage folder, but\n\t// with errors reported.\n\tsfs = cmt.cm.StorageFolders()\n\tif len(sfs) != 1 {\n\t\tt.Fatal(\"wrong number of storage folders being reported\")\n\t}\n\tif sfs[0].FailedReads < 100000000 {\n\t\tt.Error(\"Not enough failures reported for absent storage folder\")\n\t}\n\tif sfs[0].FailedWrites < 100000000 {\n\t\tt.Error(\"Not enough failures reported for absent storage folder\")\n\t}\n\tif sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize {\n\t\tt.Error(\"One sector's worth of capacity should be consumed:\", sfs[0].Capacity, sfs[0].CapacityRemaining)\n\t}\n\n\t// Reload the contract manager and make sure the storage folder is still\n\t// there.\n\terr = cmt.cm.Close()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Re-open the contract manager.\n\tcmt.cm, err = newContractManager(d, filepath.Join(cmt.persistDir, modules.ContractManagerDir))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// The contract manager should still be reporting the storage folder with\n\t// errors.\n\tsfs = cmt.cm.StorageFolders()\n\tif len(sfs) != 1 {\n\t\tt.Fatal(\"wrong number of storage folders being reported\")\n\t}\n\tif sfs[0].FailedReads < 100000000 {\n\t\tt.Error(\"Not enough failures reported for absent storage folder\")\n\t}\n\tif sfs[0].FailedWrites < 100000000 {\n\t\tt.Error(\"Not enough failures reported for absent storage folder\")\n\t}\n\n\t// Try reading the sector from the missing storage folder.\n\t_, err = cmt.cm.ReadSector(root)\n\tif err == nil {\n\t\tt.Fatal(\"Expecting error when reading missing sector.\")\n\t}\n\n\t// Try adding a sector to the contract manager - no folder can receive it.\n\trootF, dataF := randSector()\n\terr = cmt.cm.AddSector(rootF, dataF)\n\tif err == nil {\n\t\tt.Error(\"should not be able to add sector\")\n\t}\n\n\t// Check that you can add folders, add sectors while the contract manager\n\t// correctly works around the missing storage folder.\n\tstorageFolderTwo := filepath.Join(cmt.persistDir, \"storageFolderTwo\")\n\terr = os.MkdirAll(storageFolderTwo, 0700)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = cmt.cm.AddStorageFolder(storageFolderTwo, modules.SectorSize*storageFolderGranularity*2)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Add a sector to the storage folder.\n\troot2, data2 := randSector()\n\terr = cmt.cm.AddSector(root2, data2)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Check that the sector was successfully added.\n\tsfs = cmt.cm.StorageFolders()\n\tif len(sfs) != 2 {\n\t\tt.Fatal(\"There should be one storage folder in the contract manager\", len(sfs))\n\t}\n\tfor i := range sfs {\n\t\tif sfs[i].Capacity != sfs[i].CapacityRemaining+modules.SectorSize {\n\t\t\tt.Error(\"One sector's worth of capacity should be consumed:\", sfs[i].Capacity, sfs[i].CapacityRemaining, sfs[i].Path)\n\t\t}\n\t}\n\tvar sfTwoIndex uint16\n\tif sfs[0].Index == sfOneIndex {\n\t\tsfTwoIndex = sfs[1].Index\n\t} else {\n\t\tsfTwoIndex = sfs[0].Index\n\t}\n\n\t// Add two more sectors.\n\troot3, data3 := randSector()\n\terr = cmt.cm.AddSector(root3, data3)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\troot4, data4 := randSector()\n\terr = cmt.cm.AddSector(root4, data4)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Check that the sector was successfully added.\n\tsfs = cmt.cm.StorageFolders()\n\tif len(sfs) != 2 {\n\t\tt.Fatal(\"There should be one storage folder in the contract manager\", len(sfs))\n\t}\n\tif sfs[0].Capacity != sfs[0].CapacityRemaining+modules.SectorSize*3 && sfs[1].Capacity != sfs[1].CapacityRemaining+modules.SectorSize*3 {\n\t\tt.Error(\"One sector's worth of capacity should be consumed\")\n\t}\n\n\t// Try to shrink the missing storage folder.\n\terr = cmt.cm.ResizeStorageFolder(sfOneIndex, modules.SectorSize*storageFolderGranularity, false)\n\tif err == nil {\n\t\tt.Fatal(\"should not be able to resize a missing storage folder\")\n\t}\n\terr = cmt.cm.ResizeStorageFolder(sfOneIndex, modules.SectorSize*storageFolderGranularity, true)\n\tif err == nil {\n\t\tt.Fatal(\"should not be able to resize a missing storage folder\")\n\t}\n\n\t// Check that the storage folder is still the original size.\n\tsfs = cmt.cm.StorageFolders()\n\tif len(sfs) != 2 {\n\t\tt.Fatal(\"wrong storage folder count\")\n\t}\n\tif sfs[0].Index == sfOneIndex && sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*2 {\n\t\tt.Error(\"Storage folder has wrong size after failing to resize\")\n\t}\n\tif sfs[1].Index == sfOneIndex && sfs[1].Capacity != modules.SectorSize*storageFolderGranularity*2 {\n\t\tt.Error(\"Storage folder has wrong size after failing to resize\")\n\t}\n\n\t// Try to grow the missing storage folder.\n\terr = cmt.cm.ResizeStorageFolder(sfOneIndex, modules.SectorSize*storageFolderGranularity*4, false)\n\tif err == nil {\n\t\tt.Fatal(\"should not be able to resize a missing storage folder\")\n\t}\n\terr = cmt.cm.ResizeStorageFolder(sfOneIndex, modules.SectorSize*storageFolderGranularity*4, true)\n\tif err == nil {\n\t\tt.Fatal(\"should not be able to resize a missing storage folder\")\n\t}\n\n\t// Check that the storage folder is still the original size.\n\tsfs = cmt.cm.StorageFolders()\n\tif len(sfs) != 2 {\n\t\tt.Fatal(\"wrong storage folder count\")\n\t}\n\tif sfs[0].Index == sfOneIndex && sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*2 {\n\t\tt.Error(\"Storage folder has wrong size after failing to resize\")\n\t}\n\tif sfs[1].Index == sfOneIndex && sfs[1].Capacity != modules.SectorSize*storageFolderGranularity*2 {\n\t\tt.Error(\"Storage folder has wrong size after failing to resize\")\n\t}\n\n\t// Check that you can delete sectors and have the contract manager work\n\t// correctly around the missing storage folder.\n\terr = cmt.cm.DeleteSector(root2)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = cmt.cm.DeleteSector(root3)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = cmt.cm.DeleteSector(root4)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Check that the sectors are no longer reported.\n\tsfs = cmt.cm.StorageFolders()\n\tif len(sfs) != 2 {\n\t\tt.Fatal(\"There should be one storage folder in the contract manager\", len(sfs))\n\t}\n\tif sfs[0].Capacity != sfs[0].CapacityRemaining && sfs[1].Capacity != sfs[1].CapacityRemaining {\n\t\tt.Error(\"Deleted sector does not seem to have been deleted correctly.\")\n\t}\n\t// Try reading the deleted sector.\n\t_, err = cmt.cm.ReadSector(root2)\n\tif err == nil {\n\t\tt.Fatal(\"should get an error when reading a deleted sector\")\n\t}\n\n\t// Check that it's okay to shrink a storage folder while missing a storage\n\t// folder.\n\t//\n\t// Start by resizing the second storage folder so it can hold a lot of\n\t// sectors.\n\terr = cmt.cm.ResizeStorageFolder(sfTwoIndex, modules.SectorSize*storageFolderGranularity*4, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Add enough sectors to the storage folder that doing a shrink operation\n\t// causes sectors to be moved around.\n\tnum := int(storageFolderGranularity*3 + 2)\n\troots := make([]crypto.Hash, num)\n\tdatas := make([][]byte, num)\n\tvar wg sync.WaitGroup // Add in parallel to get massive performance boost.\n\tfor i := 0; i < num; i++ {\n\t\twg.Add(1)\n\t\tgo func(i int) {\n\t\t\tdefer wg.Done()\n\t\t\trootI, dataI := randSector()\n\t\t\troots[i] = rootI\n\t\t\tdatas[i] = dataI\n\t\t\terr := cmt.cm.AddSector(rootI, dataI)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}(i)\n\t}\n\twg.Wait()\n\t// Make a new storage folder so the sectors have somewhere to go.\n\tstorageFolderThree := filepath.Join(cmt.persistDir, \"storageFolderThree\")\n\terr = os.MkdirAll(storageFolderThree, 0700)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = cmt.cm.AddStorageFolder(storageFolderThree, modules.SectorSize*storageFolderGranularity)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Shrink the second storage folder such that some of the sectors are forced\n\t// to move.\n\terr = cmt.cm.ResizeStorageFolder(sfTwoIndex, modules.SectorSize*storageFolderGranularity*3, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Check that all of the sectors are still recoverable.\n\tfor i := range roots {\n\t\tdata, err := cmt.cm.ReadSector(roots[i])\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !bytes.Equal(data, datas[i]) {\n\t\t\tt.Error(\"read sector does not have the same data that was inserted\")\n\t\t}\n\t}\n\n\t// Shrink the second storage folder again, such that there is not enough\n\t// room in the available storage folders to accept the data.\n\terr = cmt.cm.ResizeStorageFolder(sfTwoIndex, modules.SectorSize*storageFolderGranularity*2, false)\n\tif err == nil {\n\t\tt.Fatal(\"expected an error\")\n\t}\n\t// Check that all of the sectors are still recoverable.\n\tfor i := range roots {\n\t\tdata, err := cmt.cm.ReadSector(roots[i])\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !bytes.Equal(data, datas[i]) {\n\t\t\tt.Error(\"read sector does not have the same data that was inserted\")\n\t\t}\n\t}\n\n\t// Shrink the second storage folder again, such that there is not enough\n\t// room in the available storage folders to accept the data.\n\terr = cmt.cm.ResizeStorageFolder(sfTwoIndex, modules.SectorSize*storageFolderGranularity, true)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// There is now data loss.\n\n\t// Try deleting the second storage folder, which again will cause data loss.\n\terr = cmt.cm.RemoveStorageFolder(sfTwoIndex, false)\n\tif err == nil {\n\t\tt.Fatal(\"should have gotten an error when trying to remove the storage folder.\")\n\t}\n\terr = cmt.cm.RemoveStorageFolder(sfTwoIndex, true)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Try to recover the missing storage folder by closing and moving the\n\t// storage folder to the right place.\n\terr = cmt.cm.Close()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = os.Rename(storageFolderDir+\"-moved\", storageFolderDir)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Re-open the contract manager.\n\tcmt.cm, err = newContractManager(d, filepath.Join(cmt.persistDir, modules.ContractManagerDir))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// The contract manager should still be reporting the storage folder, but\n\t// with errors reported.\n\tsfs = cmt.cm.StorageFolders()\n\tif len(sfs) != 2 {\n\t\tt.Fatal(\"wrong number of storage folders being reported\")\n\t}\n\tvar sfOne modules.StorageFolderMetadata\n\tfor _, sf := range sfs {\n\t\tif sf.Index == sfOneIndex {\n\t\t\tsfOne = sf\n\t\t}\n\t}\n\tif sfOne.FailedReads > 0 {\n\t\tt.Error(\"folder should be visible again\")\n\t}\n\tif sfOne.FailedWrites > 0 {\n\t\tt.Error(\"folder should be visible again\")\n\t}\n\tif sfOne.Capacity != sfOne.CapacityRemaining+modules.SectorSize {\n\t\tcmt.cm.wal.mu.Lock()\n\t\tt.Log(\"Usage len:\", len(cmt.cm.storageFolders[sfOne.Index].usage))\n\t\tt.Log(\"Reported Sectors:\", cmt.cm.storageFolders[sfOne.Index].sectors)\n\t\tt.Log(\"Avail:\", len(cmt.cm.storageFolders[sfOne.Index].availableSectors))\n\t\tcmt.cm.wal.mu.Unlock()\n\t\tt.Error(\"One sector's worth of capacity should be consumed:\", sfOne.Capacity, sfOne.CapacityRemaining)\n\t}\n\n\t// See if the sector is still available.\n\trecoveredData, err := cmt.cm.ReadSector(root)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(recoveredData, data) {\n\t\tt.Error(\"recovered data is not equal to original data\")\n\t}\n\n\t// Redo the storage folder move, so we can test deleting a missing storage\n\t// folder.\n\terr = cmt.cm.Close()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Move the storage folder directory to a new location - hiding it from the\n\t// contract manager.\n\terr = os.Rename(storageFolderDir, storageFolderDir+\"-moved\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Re-open the contract manager.\n\tcmt.cm, err = newContractManager(d, filepath.Join(cmt.persistDir, modules.ContractManagerDir))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Try removing the storage folder without the --force option. It should\n\t// fail.\n\terr = cmt.cm.RemoveStorageFolder(sfOneIndex, false)\n\tif err == nil {\n\t\tt.Fatal(\"should have gotten an error\")\n\t}\n\tsfs = cmt.cm.StorageFolders()\n\tif len(sfs) != 2 {\n\t\tt.Error(\"there should be two storage folders after a removal failed.\")\n\t}\n\terr = cmt.cm.RemoveStorageFolder(sfOneIndex, true)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsfs = cmt.cm.StorageFolders()\n\tif len(sfs) != 1 {\n\t\tt.Error(\"there should be only one storage folder remaining\")\n\t}\n\n\t// Close and re-open the contract maanger, storage folder should still be\n\t// missing.\n\terr = cmt.cm.Close()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Re-open the contract manager.\n\tcmt.cm, err = newContractManager(d, filepath.Join(cmt.persistDir, modules.ContractManagerDir))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsfs = cmt.cm.StorageFolders()\n\tif len(sfs) != 1 {\n\t\tt.Error(\"there should be only one storage folder remaining\")\n\t}\n}", "func (c *VaultController) reconcileVault(vs *api.VaultServer, v Vault) error {\n\terr := c.CreateVaultTLSSecret(vs, v)\n\tif err != nil {\n\t\t_, err2 := patchutil.UpdateVaultServerStatus(\n\t\t\tcontext.TODO(),\n\t\t\tc.extClient.KubevaultV1alpha1(),\n\t\t\tvs.ObjectMeta,\n\t\t\tfunc(status *api.VaultServerStatus) *api.VaultServerStatus {\n\t\t\t\tstatus.Conditions = kmapi.SetCondition(status.Conditions, kmapi.Condition{\n\t\t\t\t\tType: kmapi.ConditionFailed,\n\t\t\t\t\tStatus: core.ConditionTrue,\n\t\t\t\t\tReason: \"FailedToCreateVaultTLSSecret\",\n\t\t\t\t\tMessage: err.Error(),\n\t\t\t\t})\n\t\t\t\treturn status\n\t\t\t},\n\t\t\tmetav1.UpdateOptions{},\n\t\t)\n\t\treturn utilerrors.NewAggregate([]error{err2, errors.Wrap(err, \"failed to create vault server tls secret\")})\n\t}\n\n\terr = c.CreateVaultConfig(vs, v)\n\tif err != nil {\n\t\t_, err2 := patchutil.UpdateVaultServerStatus(\n\t\t\tcontext.TODO(),\n\t\t\tc.extClient.KubevaultV1alpha1(),\n\t\t\tvs.ObjectMeta,\n\t\t\tfunc(status *api.VaultServerStatus) *api.VaultServerStatus {\n\t\t\t\tstatus.Conditions = kmapi.SetCondition(status.Conditions, kmapi.Condition{\n\t\t\t\t\tType: kmapi.ConditionFailed,\n\t\t\t\t\tStatus: core.ConditionTrue,\n\t\t\t\t\tReason: \"FailedToCreateVaultConfig\",\n\t\t\t\t\tMessage: err.Error(),\n\t\t\t\t})\n\t\t\t\treturn status\n\t\t\t},\n\t\t\tmetav1.UpdateOptions{},\n\t\t)\n\t\treturn utilerrors.NewAggregate([]error{err2, errors.Wrap(err, \"failed to create vault config\")})\n\t}\n\n\terr = c.DeployVault(vs, v)\n\tif err != nil {\n\t\t_, err2 := patchutil.UpdateVaultServerStatus(\n\t\t\tcontext.TODO(),\n\t\t\tc.extClient.KubevaultV1alpha1(),\n\t\t\tvs.ObjectMeta,\n\t\t\tfunc(status *api.VaultServerStatus) *api.VaultServerStatus {\n\t\t\t\tstatus.Conditions = kmapi.SetCondition(status.Conditions, kmapi.Condition{\n\t\t\t\t\tType: kmapi.ConditionFailed,\n\t\t\t\t\tStatus: core.ConditionTrue,\n\t\t\t\t\tReason: \"FailedToDeployVault\",\n\t\t\t\t\tMessage: err.Error(),\n\t\t\t\t})\n\t\t\t\treturn status\n\t\t\t},\n\t\t\tmetav1.UpdateOptions{},\n\t\t)\n\t\treturn utilerrors.NewAggregate([]error{err2, errors.Wrap(err, \"failed to deploy vault\")})\n\t}\n\n\terr = c.ensureAppBindings(vs, v)\n\tif err != nil {\n\t\t_, err2 := patchutil.UpdateVaultServerStatus(\n\t\t\tcontext.TODO(),\n\t\t\tc.extClient.KubevaultV1alpha1(),\n\t\t\tvs.ObjectMeta,\n\t\t\tfunc(status *api.VaultServerStatus) *api.VaultServerStatus {\n\t\t\t\tstatus.Conditions = kmapi.SetCondition(status.Conditions, kmapi.Condition{\n\t\t\t\t\tType: kmapi.ConditionFailed,\n\t\t\t\t\tStatus: core.ConditionTrue,\n\t\t\t\t\tReason: \"FailedToCreateAppBinding\",\n\t\t\t\t\tMessage: err.Error(),\n\t\t\t\t})\n\t\t\t\treturn status\n\t\t\t},\n\t\t\tmetav1.UpdateOptions{},\n\t\t)\n\t\treturn utilerrors.NewAggregate([]error{err2, errors.Wrap(err, \"failed to deploy vault\")})\n\t}\n\n\t_, err = patchutil.UpdateVaultServerStatus(\n\t\tcontext.TODO(),\n\t\tc.extClient.KubevaultV1alpha1(),\n\t\tvs.ObjectMeta,\n\t\tfunc(status *api.VaultServerStatus) *api.VaultServerStatus {\n\t\t\tstatus.ObservedGeneration = vs.Generation\n\t\t\tstatus.Conditions = kmapi.SetCondition(status.Conditions, kmapi.Condition{\n\t\t\t\tType: kmapi.ConditionReady,\n\t\t\t\tStatus: core.ConditionTrue,\n\t\t\t\tReason: \"Provisioned\",\n\t\t\t\tMessage: \"vault server is ready to use\",\n\t\t\t})\n\t\t\treturn status\n\t\t},\n\t\tmetav1.UpdateOptions{},\n\t)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to update status\")\n\t}\n\n\t// Add vault monitor to watch vault seal or unseal status\n\tkey := vs.GetKey()\n\tif _, ok := c.ctxCancels[key]; !ok {\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tc.ctxCancels[key] = CtxWithCancel{\n\t\t\tCtx: ctx,\n\t\t\tCancel: cancel,\n\t\t}\n\t\tgo c.monitorAndUpdateStatus(ctx, vs)\n\t}\n\n\t// Run auth method reconcile\n\tc.runAuthMethodsReconcile(vs)\n\n\treturn nil\n}" ]
[ "0.59389216", "0.5565799", "0.5399797", "0.52743936", "0.5260172", "0.5217568", "0.519692", "0.5181096", "0.517504", "0.51422614", "0.5113219", "0.50962377", "0.5088531", "0.5082477", "0.50752294", "0.5073554", "0.5038864", "0.5020849", "0.5003397", "0.49999294", "0.49972135", "0.49858305", "0.49758136", "0.49638623", "0.4962514", "0.49594685", "0.49281842", "0.49233505", "0.49140435", "0.49136874", "0.49098244", "0.49089187", "0.48938924", "0.48915055", "0.4889117", "0.4884259", "0.4877312", "0.48761517", "0.4872681", "0.48693845", "0.48630154", "0.48580995", "0.4848775", "0.48366603", "0.4835294", "0.4827882", "0.48273766", "0.4820793", "0.48194987", "0.48087835", "0.48047385", "0.4801728", "0.47962308", "0.47887006", "0.47845197", "0.47842756", "0.47812098", "0.47801957", "0.4779234", "0.47711274", "0.47650158", "0.4759786", "0.4755878", "0.47522724", "0.4751022", "0.47506756", "0.47454116", "0.4745371", "0.47440323", "0.4742694", "0.47421044", "0.47405434", "0.4732694", "0.4728059", "0.4727194", "0.47198474", "0.47194827", "0.4716758", "0.47118923", "0.4710508", "0.47087592", "0.46978235", "0.46958655", "0.46877447", "0.4687143", "0.4684256", "0.46824485", "0.46815738", "0.46809334", "0.46761376", "0.4675461", "0.46721938", "0.46673706", "0.46648782", "0.46623448", "0.46595725", "0.46580163", "0.4656938", "0.4655599", "0.4655272" ]
0.8529653
0
Equals returns true if the tags are equal.
func (in Labels) Equals(other Labels) bool { return reflect.DeepEqual(in, other) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (t Tags) Equal(other Tags) bool {\n\tif len(t.Values()) != len(other.Values()) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(t.Values()); i++ {\n\t\tequal := t.values[i].Name.Equal(other.values[i].Name) &&\n\t\t\tt.values[i].Value.Equal(other.values[i].Value)\n\t\tif !equal {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (t Tag) Equal(value Tag) bool {\n\treturn t.Name.Equal(value.Name) && t.Value.Equal(value.Value)\n}", "func (row TagApp) Equals(rhs TagApp) bool {\n\tif row.TagAppID != rhs.TagAppID {\n\t\treturn false\n\t}\n\tif row.Name != rhs.Name {\n\t\treturn false\n\t}\n\tif row.Weight != rhs.Weight {\n\t\treturn false\n\t}\n\tif row.ArchivedAt != nil || rhs.ArchivedAt != nil {\n\t\tif row.ArchivedAt == nil || rhs.ArchivedAt == nil {\n\t\t\treturn false\n\t\t}\n\t\tif !row.ArchivedAt.Equal(*rhs.ArchivedAt) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func AssertTagsEqual(t assert.TestingT, expected, actual []string) {\n\tif assert.Equal(t, len(expected), len(actual), fmt.Sprintf(\"Unexpected number of tags: expected %s, actual: %s\", expected, actual)) {\n\t\tfor _, tag := range expected {\n\t\t\tassert.Contains(t, actual, tag)\n\t\t}\n\t}\n}", "func CompareTags(tags []v1alpha1.Tag, ecrTags []ecr.Tag) bool {\n\tif len(tags) != len(ecrTags) {\n\t\treturn false\n\t}\n\n\tSortTags(tags, ecrTags)\n\n\tfor i, t := range tags {\n\t\tif t.Key != aws.StringValue(ecrTags[i].Key) || t.Value != aws.StringValue(ecrTags[i].Value) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func equalsRest(a, b language.Tag) bool {\n\t// TODO: don't include extensions in this comparison. To do this efficiently,\n\t// though, we should handle private tags separately.\n\treturn a.ScriptID == b.ScriptID && a.RegionID == b.RegionID && a.VariantOrPrivateUseTags() == b.VariantOrPrivateUseTags()\n}", "func tagArraysEqual(a []GeneratedType, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tfound := false\n\t\tfor j := 0; j < len(b); j++ {\n\t\t\tif a[i].(string) == b[j] {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (t TxPublish) Equals(that TxPublish) bool {\n\treturn t.Name == that.Name &&\n\t\tt.Size == that.Size &&\n\t\ttools.BytesToHexString(t.MetafileHash) == tools.BytesToHexString(that.MetafileHash)\n}", "func (a Attributes) Equal(b Attributes) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor _, attr := range a {\n\t\tv := b.Value(attr.Key)\n\t\tif !bytes.Equal(v, attr.Value) {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor _, attr := range b {\n\t\tv := a.Value(attr.Key)\n\t\tif !bytes.Equal(v, attr.Value) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (e *Element) Equals(target *Element) bool {\n\tif target == nil || e == nil {\n\t\treturn e == target\n\t}\n\tif !e.Tag.Equals(target.Tag) ||\n\t\te.RawValueRepresentation != target.RawValueRepresentation ||\n\t\te.ValueLength != target.ValueLength ||\n\t\te.ValueRepresentation != target.ValueRepresentation {\n\t\treturn false\n\t}\n\tif !e.Value.Equals(target.Value) {\n\t\treturn false\n\t}\n\treturn true\n}", "func (s *Set) Equals(s2 *Set) bool {\n\treturn reflect.DeepEqual(s.set, s2.set)\n}", "func (a *Mtx) Equals(b *Mtx) bool {\n\tfor i := 0; i < 4; i++ {\n\t\tfor j := 0; j < 4; j++ {\n\t\t\tif a.el[i][j] != b.el[i][j] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func (l *Label) Equal(r *Label) bool {\n\treturn l == r || l.key == r.key\n}", "func (t *token) Equal(tt *token) bool {\n\treturn t.code == tt.code && t.Text == tt.Text\n}", "func (l *LabelPair) Equal(o *LabelPair) bool {\n\tswitch {\n\tcase l.Name != o.Name:\n\t\treturn false\n\tcase l.Value != o.Value:\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}", "func (e IfChange) Equal(e2 IfChange) bool {\n\treturn e.Added == e2.Added &&\n\t\te.Deleted == e2.Deleted &&\n\t\te.Attrs.Equal(e2.Attrs)\n}", "func (node *SimpleNode) Equals(node2 Node) bool {\n\tif node == nil {\n\t\treturn false\n\t}\n\n\tif IsNil(node2) {\n\t\treturn false\n\t}\n\n\ttag := node2.Tag()\n\tif node.tag != tag {\n\t\treturn false\n\t}\n\n\tvalue := node2.Value()\n\tif node.value != value {\n\t\treturn false\n\t}\n\n\treturn node.pointer == node2.Pointer()\n}", "func (pair *eachPair) Equals(el interface {}) (bool) { \n other, ok := el.(*eachPair) \n if !ok { return false; }\n return pair == other\n}", "func (e Exemplar) Equals(e2 Exemplar) bool {\n\tif !labels.Equal(e.Labels, e2.Labels) {\n\t\treturn false\n\t}\n\n\tif (e.HasTs || e2.HasTs) && e.Ts != e2.Ts {\n\t\treturn false\n\t}\n\n\treturn e.Value == e2.Value\n}", "func (s BlobSet) Equals(other BlobSet) bool {\n\tif len(s) != len(other) {\n\t\treturn false\n\t}\n\n\tfor h := range s {\n\t\tif _, ok := other[h]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (s *Stack) Equals(test []bool) bool {\n\tif len(s.cakes) != len(test) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(s.cakes); i++ {\n\t\tif s.cakes[i] != test[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (t *TransactionPayload) Equal(other *TransactionPayload) bool {\n\treturn bytes.Equal(t.Data, other.Data)\n}", "func tagsContainsAll(as []*tag, bs []*tag) bool {\n\tfor _, a := range as {\n\t\tfound := false\n\t\tfor _, b := range bs {\n\t\t\tif a.Equal(b) {\n\t\t\t\tfound = true\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (o *Echo) IsEqual(other *Echo) bool {\n\treturn o.GetID() == other.GetID()\n}", "func (n *node) equal(other *node) bool {\n\treturn n.value == other.value && n.childrenEqual(other)\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tch1, ch2 := Walker(t1), Walker(t2)\n\n\tfor {\n\n\t\tv1, ok1 := <-ch1\n\t\tv2, ok2 := <-ch2\n\n\t\tif !ok1 || !ok2 {\n\t\t\treturn ok1 == ok2\n\t\t}\n\t\tif v1 != v2 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn false\n}", "func containsTags(a map[string]string, b []*elb.Tag) bool {\n\tfor k, v := range a {\n\t\tt := elbTag(k, v)\n\t\tif !containsTag(t, b) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (s Sequence) equal(other Node) bool {\n\to, ok := other.(Sequence)\n\tif !ok || len(s.elements) != len(o.elements) || s.path != o.path {\n\t\treturn false\n\t}\n\tif s.elements == nil || o.elements == nil {\n\t\treturn s.elements == nil && o.elements == nil\n\t}\n\tfor i, v := range o.elements {\n\t\tif !equal(s.elements[i], v) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func TagsContainsAll(as []*Tag, bs []*Tag) bool {\n\tfor _, a := range as {\n\t\tfound := false\n\t\tfor _, b := range bs {\n\t\t\tif a.Equal(b) {\n\t\t\t\tfound = true\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func AssertCompositeTagsEqual(t assert.TestingT, expected, actual tagset.CompositeTags) {\n\tvar expectedTags []string\n\texpected.ForEach(func(tag string) { expectedTags = append(expectedTags, tag) })\n\n\tvar actualTags []string\n\tactual.ForEach(func(tag string) { actualTags = append(actualTags, tag) })\n\n\tAssertTagsEqual(t, expectedTags, actualTags)\n}", "func (t *Text) Equals(other *Text) bool {\n\treturn (t.Title() == other.Title()) && (t.Body() == other.Body())\n}", "func xmlEqual(x, y interface{}) bool {\n\treturn cmp.Equal(x, y, xmlOpts())\n}", "func (td TupleDesc) Equals(other TupleDesc) bool {\n\tif len(td.Types) != len(other.Types) {\n\t\treturn false\n\t}\n\tfor i, typ := range td.Types {\n\t\tif typ != other.Types[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (g *Graph) Equal(g2 *Graph, debug bool) bool {\n\n\t// Check the vertices\n\tkeys1 := g.listOfKeys()\n\tkeys2 := g2.listOfKeys()\n\n\tif !SlicesHaveSameElements(&keys1, &keys2) {\n\t\tif debug {\n\t\t\tlog.Println(\"Lists of keys are different\")\n\t\t\tlog.Printf(\"Keys1: %v\\n\", keys1)\n\t\t\tlog.Printf(\"Keys2: %v\\n\", keys2)\n\t\t}\n\t\treturn false\n\t}\n\n\t// Walk through each vertex and check its connections\n\tfor _, vertex := range keys1 {\n\t\tconns1 := g.Nodes[vertex]\n\t\tconns2 := g2.Nodes[vertex]\n\n\t\tif !SetsEqual(conns1, conns2) {\n\t\t\tif debug {\n\t\t\t\tlog.Printf(\"Connections different for vertex %v\", vertex)\n\t\t\t\tlog.Printf(\"Connections 1: %v\\n\", conns1)\n\t\t\t\tlog.Printf(\"Connections 2: %v\\n\", conns2)\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func hasTag(tags []*ec2.Tag, Key string, value string) bool {\n\tfor i := range tags {\n\t\tif *tags[i].Key == Key && *tags[i].Value == value {\n\t\t\tlog.Printf(\"\\t\\tTag %s already set with value %s\\n\",\n\t\t\t\t*tags[i].Key,\n\t\t\t\t*tags[i].Value)\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (nd *Node) Equals(other *Node) bool {\n\treturn nd.name == other.name\n}", "func Equal(a, b Sequence) bool {\n\treturn reflect.DeepEqual(a.Info(), b.Info()) &&\n\t\treflect.DeepEqual(a.Features(), b.Features()) &&\n\t\tbytes.Equal(a.Bytes(), b.Bytes())\n}", "func Equal(t1, t2 Token) bool {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tif t1 == nil && t2 == nil {\n\t\treturn true\n\t}\n\n\t// we already checked for t1 == t2 == nil, so safe to do this\n\tif t1 == nil || t2 == nil {\n\t\treturn false\n\t}\n\n\tm1, err := t1.AsMap(ctx)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tfor iter := t2.Iterate(ctx); iter.Next(ctx); {\n\t\tpair := iter.Pair()\n\n\t\tv1 := m1[pair.Key.(string)]\n\t\tv2 := pair.Value\n\t\tswitch tmp := v1.(type) {\n\t\tcase time.Time:\n\t\t\ttmp2, ok := v2.(time.Time)\n\t\t\tif !ok {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\ttmp = tmp.Round(0).Truncate(time.Second)\n\t\t\ttmp2 = tmp2.Round(0).Truncate(time.Second)\n\t\t\tif !tmp.Equal(tmp2) {\n\t\t\t\treturn false\n\t\t\t}\n\t\tdefault:\n\t\t\tif v1 != v2 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\tdelete(m1, pair.Key.(string))\n\t}\n\n\treturn len(m1) == 0\n}", "func (p Pair) Equal(pair Pair) bool {\n\treturn p.String() == pair.String()\n}", "func (t *TableValues) Equals(t2 *TableValues) bool {\n\tif t == nil || t2 == nil || t.number != t2.number || t.size != t2.size || len(t.values) != len(t2.values) {\n\t\treturn false\n\t}\n\t// now check the contents\n\tfor i, val := range t.values {\n\t\tif val != t2.values[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (a AttributeValue) Equal(av AttributeValue) bool {\n\tif a.orig == av.orig {\n\t\treturn true\n\t}\n\n\tif a.orig.Value == nil || av.orig.Value == nil {\n\t\treturn a.orig.Value == av.orig.Value\n\t}\n\n\tswitch v := a.orig.Value.(type) {\n\tcase *otlpcommon.AnyValue_StringValue:\n\t\treturn v.StringValue == av.orig.GetStringValue()\n\tcase *otlpcommon.AnyValue_BoolValue:\n\t\treturn v.BoolValue == av.orig.GetBoolValue()\n\tcase *otlpcommon.AnyValue_IntValue:\n\t\treturn v.IntValue == av.orig.GetIntValue()\n\tcase *otlpcommon.AnyValue_DoubleValue:\n\t\treturn v.DoubleValue == av.orig.GetDoubleValue()\n\tcase *otlpcommon.AnyValue_ArrayValue:\n\t\tvv := v.ArrayValue.GetValues()\n\t\tavv := av.orig.GetArrayValue().GetValues()\n\t\tif len(vv) != len(avv) {\n\t\t\treturn false\n\t\t}\n\n\t\tfor i, val := range avv {\n\t\t\tval := val\n\t\t\tav := newAttributeValue(&vv[i])\n\n\t\t\t// According to the specification, array values must be scalar.\n\t\t\tif avType := av.Type(); avType == AttributeValueARRAY || avType == AttributeValueMAP {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tif !av.Equal(newAttributeValue(&val)) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\t// TODO: handle MAP data type\n\treturn false\n}", "func Equal(g1, g2 InstanceGroup) bool {\n\tif g1 == g2 {\n\t\treturn true\n\t}\n\n\tif g1.App() != g2.App() {\n\t\treturn false\n\t}\n\n\tif g1.Account() != g2.Account() {\n\t\treturn false\n\t}\n\n\tr1, ok1 := g1.Region()\n\tr2, ok2 := g2.Region()\n\tif ok1 != ok2 {\n\t\treturn false\n\t}\n\n\tif ok1 && (r1 != r2) {\n\t\treturn false\n\t}\n\n\ts1, ok1 := g1.Stack()\n\ts2, ok2 := g2.Stack()\n\n\tif ok1 != ok2 {\n\t\treturn false\n\t}\n\n\tif ok1 && (s1 != s2) {\n\t\treturn false\n\t}\n\n\tc1, ok1 := g1.Cluster()\n\tc2, ok2 := g2.Cluster()\n\n\tif ok1 != ok2 {\n\t\treturn false\n\t}\n\n\tif ok1 && (c1 != c2) {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func Equals(a, b interface{}) bool {\n\treturn neogointernal.Opcode2(\"EQUAL\", a, b).(bool)\n}", "func (t *Tags) IsNil() bool {\n\treturn t == nil\n}", "func (s *Set) Equals(s2 *Set) bool {\n\tif s2 == nil || s.Len() != s2.Len() {\n\t\treturn false\n\t}\n\n\tfor element, _ := range *s {\n\t\tif !s2.Contains(element) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (s *Series) match(tags map[string]string) bool {\n\tfor k, v := range tags {\n\t\tif s.Tags[k] != v {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (z *Element22) Equal(x *Element22) bool {\n\treturn (z[21] == x[21]) && (z[20] == x[20]) && (z[19] == x[19]) && (z[18] == x[18]) && (z[17] == x[17]) && (z[16] == x[16]) && (z[15] == x[15]) && (z[14] == x[14]) && (z[13] == x[13]) && (z[12] == x[12]) && (z[11] == x[11]) && (z[10] == x[10]) && (z[9] == x[9]) && (z[8] == x[8]) && (z[7] == x[7]) && (z[6] == x[6]) && (z[5] == x[5]) && (z[4] == x[4]) && (z[3] == x[3]) && (z[2] == x[2]) && (z[1] == x[1]) && (z[0] == x[0])\n}", "func (t Token) Equal(t2 Token) bool {\n\tif t.TokenType == t2.TokenType && bytes.Equal(t.Data, t2.Data) && len(t.Args) == len(t2.Args) {\n\t\tfor i := 0; i < len(t.Args); i++ {\n\t\t\tif t.Args[i].TokenType != t2.Args[i].TokenType || !bytes.Equal(t.Args[i].Data, t2.Args[i].Data) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}", "func (rs Ranges) Equal(bs Ranges) bool {\n\tif len(rs) != len(bs) {\n\t\treturn false\n\t}\n\tif rs == nil || bs == nil {\n\t\treturn true\n\t}\n\tfor i := range rs {\n\t\tif rs[i] != bs[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (t Token) Equal(b Token) bool {\n\treturn bytes.Equal(t, b)\n}", "func (kt KeyToken) Equal(okt KeyToken) bool {\n\tif kt.Tok.IsKeyword() && kt.Key != \"\" {\n\t\treturn kt.Tok == okt.Tok && kt.Key == okt.Key\n\t}\n\treturn kt.Tok == okt.Tok\n}", "func (t TestContent) Equals(other merkletree.Content) (bool, error) {\n\treturn t.x == other.(TestContent).x, nil\n}", "func (n *Node) Equals(nn *Node) bool {\n\tif nn == nil {\n\t\treturn false\n\t}\n\treturn n.Nodestr == nn.Nodestr\n}", "func instanceConfigEqual(current, desired *InstanceConfig) bool {\n\tif current.UserData != desired.UserData {\n\t\treturn false\n\t}\n\n\tif current.ImageID != desired.ImageID {\n\t\treturn false\n\t}\n\n\t// TODO: explain\n\tfor k, v := range desired.Tags {\n\t\tif currentValue, ok := current.Tags[k]; !ok || v != currentValue {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (k1 *KeyAuth) Equal(k2 *KeyAuth) bool {\n\treturn reflect.DeepEqual(k1.KeyAuth, k2.KeyAuth)\n}", "func (s SetValue) Equal(o attr.Value) bool {\n\tother, ok := o.(SetValue)\n\n\tif !ok {\n\t\treturn false\n\t}\n\n\tif !s.elementType.Equal(other.elementType) {\n\t\treturn false\n\t}\n\n\tif s.state != other.state {\n\t\treturn false\n\t}\n\n\tif s.state != attr.ValueStateKnown {\n\t\treturn true\n\t}\n\n\tif len(s.elements) != len(other.elements) {\n\t\treturn false\n\t}\n\n\tfor _, elem := range s.elements {\n\t\tif !other.contains(elem) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (s L4Service) Equal(s2 L4Service) bool {\n\tif len(s.Endpoints) != len(s2.Endpoints) {\n\t\treturn false\n\t}\n\tfor _, s1e := range s.Endpoints {\n\t\tfound := false\n\t\tfor _, s2e := range s2.Endpoints {\n\t\t\tif reflect.DeepEqual(s1e, s2e) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\ts.Endpoints = nil\n\ts2.Endpoints = nil\n\n\treturn reflect.DeepEqual(s, s2)\n}", "func (t Table) Equals(other core.DxfElement) bool {\n\tif otherTable, ok := other.(Table); ok {\n\t\tif len(t) != len(otherTable) {\n\t\t\treturn false\n\t\t}\n\n\t\tfor key, element := range t {\n\t\t\tif otherElement, ok := otherTable[key]; ok {\n\t\t\t\tif !element.Equals(otherElement) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t}\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (av *Attributes) IsEqualTo(dest *Attributes) bool {\n\taLen := len(av.attributes)\n\tif aLen != len(dest.attributes) {\n\t\treturn false\n\t}\n\n\tvisited := make([]bool, aLen)\n\tfor i := 0; i < aLen; i++ {\n\t\ta := &av.attributes[i]\n\t\tfound := false\n\t\tfor j := 0; j < aLen; j++ {\n\t\t\tif visited[j] {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif reflect.DeepEqual(*a, dest.attributes[j]) {\n\t\t\t\tvisited[j] = true\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func Equals(a, b ContextKey) bool {\n\treturn a == b\n}", "func (j JID) Equal(j2 JID) bool {\n\tif len(j.data) != len(j2.data) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(j.data); i++ {\n\t\tif j.data[i] != j2.data[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn j.locallen == j2.locallen && j.domainlen == j2.domainlen\n}", "func (in *EntitySlice) DeepEqual(other *EntitySlice) bool {\n\tif other == nil {\n\t\treturn false\n\t}\n\n\tif len(*in) != len(*other) {\n\t\treturn false\n\t} else {\n\t\tfor i, inElement := range *in {\n\t\t\tif inElement != (*other)[i] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\treturn true\n}", "func (bs BitStream) IsEqual(other BitStream) bool {\n\tif len(bs.bits) != len(other.bits) {\n\t\treturn false\n\t}\n\n\tfor i, b := range bs.bits {\n\t\tif b != other.bits[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (ts Tests) Equals(t *testing.T) {\n\tfor _, test := range ts {\n\t\ttest.Equals(t)\n\t}\n}", "func Equal(t, other Tuplelike) bool {\n\tfor idx, value := range t.Values() {\n\t\tif !inEpsilon(value, other.At(idx)) {\n\t\t\treturn false\n\t\t}\n\n\t}\n\treturn true\n}", "func (sc StorageConfig) Equals(other StorageConfig) bool {\n\treturn reflect.DeepEqual(sc, other)\n}", "func (b Bytes) Equal(o Bytes) bool { return bytes.Equal(b.Bytes(), o.Bytes()) }", "func (v *Values) Equal(other *Values) bool {\n\tv.lock.RLock()\n\tdefer v.lock.RUnlock()\n\tother.lock.RLock()\n\tdefer other.lock.RUnlock()\n\n\treturn v.root.equal(other.root)\n}", "func (ja *Array) Equals(other *Array) bool {\n\tif ja.Values == nil || other.Values == nil || len(ja.Values) != len(other.Values) {\n\t\treturn false\n\t}\n\n\tfor _, lv := range ja.Values {\n\t\tvar ok bool\n\t\tfor _, rv := range other.Values {\n\t\t\tok = compareValues(lv, rv)\n\t\t\tif ok {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func Equal(s1, s2 Set) bool {\n\tif Same(s1, s2) {\n\t\treturn true\n\t}\n\n\tif len(s1) != len(s2) {\n\t\treturn false\n\t}\n\n\tfor e := range s1 {\n\t\tif _, ok := s2[e]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (v Feature) Equals(rhs Feature) bool {\n\treturn v == rhs\n}", "func (b *BooleanObject) equal(e *BooleanObject) bool {\n\treturn b.value == e.value\n}", "func (n NodeID) Equal(other NodeID) bool {\n\treturn bytes.Equal(n.bytes(), other.bytes())\n}", "func TagsEQ(v string) predicate.Project {\n\treturn predicate.Project(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldTags), v))\n\t})\n}", "func (in *ToGroups) DeepEqual(other *ToGroups) bool {\n\tif other == nil {\n\t\treturn false\n\t}\n\n\tif (in.AWS == nil) != (other.AWS == nil) {\n\t\treturn false\n\t} else if in.AWS != nil {\n\t\tif !in.AWS.DeepEqual(other.AWS) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (d dag) equalNodes(that dag) bool {\n\tif len(d) != len(that) {\n\t\treturn false\n\t}\n\tfor id := range d {\n\t\tif _, ok := that[id]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Equal(a *ROBDD, b *ROBDD) (bool, error) {\n\tif !reflect.DeepEqual(a.Vocabulary, b.Vocabulary) {\n\t\treturn false, fmt.Errorf(\"Mismatched vocabularies in GraphEqual: %v, %v\", a.Vocabulary, b.Vocabulary)\n\t}\n\treturn seq.Equal(a.Node, b.Node)\n}", "func (t Token) Equal(v Token) bool {\n\treturn t.ID == v.ID &&\n\t\tt.Class == v.Class &&\n\t\tt.Surface == v.Surface\n}", "func (iter *RbTreeIterator) Equal(other iterator.ConstIterator) bool {\n\totherIter, ok := other.(*RbTreeIterator)\n\tif !ok {\n\t\treturn false\n\t}\n\tif otherIter.node == iter.node {\n\t\treturn true\n\t}\n\treturn false\n}", "func (s Set) Equal(t Set) bool {\n\tif len(s) != len(t) {\n\t\treturn false\n\t}\n\n\tfor el := range s {\n\t\tif !t.Contain(el) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (t *Token) Equals(other *Token) bool {\n\tif t == other {\n\t\treturn true\n\t}\n\n\treturn t.ChainID == other.ChainID && t.Address == other.Address\n}", "func (dtagTR DTagTransferRequest) Equals(other DTagTransferRequest) bool {\n\treturn dtagTR.DTagToTrade == other.DTagToTrade &&\n\t\tdtagTR.CurrentOwner.Equals(other.CurrentOwner) &&\n\t\tdtagTR.ReceivingUser.Equals(other.ReceivingUser)\n}", "func identical(x, y Type, cmpTags bool, p *ifacePair) bool {\n\tif x == y {\n\t\treturn true\n\t}\n\n\tswitch x := x.(type) {\n\tcase *Basic:\n\t\t// Basic types are singletons except for the rune and byte\n\t\t// aliases, thus we cannot solely rely on the x == y check\n\t\t// above. See also comment in TypeName.IsAlias.\n\t\tif y, ok := y.(*Basic); ok {\n\t\t\treturn x.kind == y.kind\n\t\t}\n\n\tcase *Array:\n\t\t// Two array types are identical if they have identical element types\n\t\t// and the same array length.\n\t\tif y, ok := y.(*Array); ok {\n\t\t\t// If one or both array lengths are unknown (< 0) due to some error,\n\t\t\t// assume they are the same to avoid spurious follow-on errors.\n\t\t\treturn (x.len < 0 || y.len < 0 || x.len == y.len) && identical(x.elem, y.elem, cmpTags, p)\n\t\t}\n\n\tcase *Slice:\n\t\t// Two slice types are identical if they have identical element types.\n\t\tif y, ok := y.(*Slice); ok {\n\t\t\treturn identical(x.elem, y.elem, cmpTags, p)\n\t\t}\n\n\tcase *Struct:\n\t\t// Two struct types are identical if they have the same sequence of fields,\n\t\t// and if corresponding fields have the same names, and identical types,\n\t\t// and identical tags. Two embedded fields are considered to have the same\n\t\t// name. Lower-case field names from different packages are always different.\n\t\tif y, ok := y.(*Struct); ok {\n\t\t\tif x.NumFields() == y.NumFields() {\n\t\t\t\tfor i, f := range x.fields {\n\t\t\t\t\tg := y.fields[i]\n\t\t\t\t\tif f.embedded != g.embedded ||\n\t\t\t\t\t\tcmpTags && x.Tag(i) != y.Tag(i) ||\n\t\t\t\t\t\t!f.sameId(g.pkg, g.name) ||\n\t\t\t\t\t\t!identical(f.typ, g.typ, cmpTags, p) {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\tcase *Pointer:\n\t\t// Two pointer types are identical if they have identical base types.\n\t\tif y, ok := y.(*Pointer); ok {\n\t\t\treturn identical(x.base, y.base, cmpTags, p)\n\t\t}\n\n\tcase *Tuple:\n\t\t// Two tuples types are identical if they have the same number of elements\n\t\t// and corresponding elements have identical types.\n\t\tif y, ok := y.(*Tuple); ok {\n\t\t\tif x.Len() == y.Len() {\n\t\t\t\tif x != nil {\n\t\t\t\t\tfor i, v := range x.vars {\n\t\t\t\t\t\tw := y.vars[i]\n\t\t\t\t\t\tif !identical(v.typ, w.typ, cmpTags, p) {\n\t\t\t\t\t\t\treturn false\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\tcase *Signature:\n\t\ty, _ := y.(*Signature)\n\t\tif y == nil {\n\t\t\treturn false\n\t\t}\n\n\t\t// Two function types are identical if they have the same number of\n\t\t// parameters and result values, corresponding parameter and result types\n\t\t// are identical, and either both functions are variadic or neither is.\n\t\t// Parameter and result names are not required to match, and type\n\t\t// parameters are considered identical modulo renaming.\n\n\t\tif x.TypeParams().Len() != y.TypeParams().Len() {\n\t\t\treturn false\n\t\t}\n\n\t\t// In the case of generic signatures, we will substitute in yparams and\n\t\t// yresults.\n\t\typarams := y.params\n\t\tyresults := y.results\n\n\t\tif x.TypeParams().Len() > 0 {\n\t\t\t// We must ignore type parameter names when comparing x and y. The\n\t\t\t// easiest way to do this is to substitute x's type parameters for y's.\n\t\t\txtparams := x.TypeParams().list()\n\t\t\tytparams := y.TypeParams().list()\n\n\t\t\tvar targs []Type\n\t\t\tfor i := range xtparams {\n\t\t\t\ttargs = append(targs, x.TypeParams().At(i))\n\t\t\t}\n\t\t\tsmap := makeSubstMap(ytparams, targs)\n\n\t\t\tvar check *Checker // ok to call subst on a nil *Checker\n\n\t\t\t// Constraints must be pair-wise identical, after substitution.\n\t\t\tfor i, xtparam := range xtparams {\n\t\t\t\tybound := check.subst(token.NoPos, ytparams[i].bound, smap, nil)\n\t\t\t\tif !identical(xtparam.bound, ybound, cmpTags, p) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\n\t\t\typarams = check.subst(token.NoPos, y.params, smap, nil).(*Tuple)\n\t\t\tyresults = check.subst(token.NoPos, y.results, smap, nil).(*Tuple)\n\t\t}\n\n\t\treturn x.variadic == y.variadic &&\n\t\t\tidentical(x.params, yparams, cmpTags, p) &&\n\t\t\tidentical(x.results, yresults, cmpTags, p)\n\n\tcase *Union:\n\t\tif y, _ := y.(*Union); y != nil {\n\t\t\t// TODO(rfindley): can this be reached during type checking? If so,\n\t\t\t// consider passing a type set map.\n\t\t\tunionSets := make(map[*Union]*_TypeSet)\n\t\t\txset := computeUnionTypeSet(nil, unionSets, token.NoPos, x)\n\t\t\tyset := computeUnionTypeSet(nil, unionSets, token.NoPos, y)\n\t\t\treturn xset.terms.equal(yset.terms)\n\t\t}\n\n\tcase *Interface:\n\t\t// Two interface types are identical if they describe the same type sets.\n\t\t// With the existing implementation restriction, this simplifies to:\n\t\t//\n\t\t// Two interface types are identical if they have the same set of methods with\n\t\t// the same names and identical function types, and if any type restrictions\n\t\t// are the same. Lower-case method names from different packages are always\n\t\t// different. The order of the methods is irrelevant.\n\t\tif y, ok := y.(*Interface); ok {\n\t\t\txset := x.typeSet()\n\t\t\tyset := y.typeSet()\n\t\t\tif xset.comparable != yset.comparable {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif !xset.terms.equal(yset.terms) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\ta := xset.methods\n\t\t\tb := yset.methods\n\t\t\tif len(a) == len(b) {\n\t\t\t\t// Interface types are the only types where cycles can occur\n\t\t\t\t// that are not \"terminated\" via named types; and such cycles\n\t\t\t\t// can only be created via method parameter types that are\n\t\t\t\t// anonymous interfaces (directly or indirectly) embedding\n\t\t\t\t// the current interface. Example:\n\t\t\t\t//\n\t\t\t\t// type T interface {\n\t\t\t\t// m() interface{T}\n\t\t\t\t// }\n\t\t\t\t//\n\t\t\t\t// If two such (differently named) interfaces are compared,\n\t\t\t\t// endless recursion occurs if the cycle is not detected.\n\t\t\t\t//\n\t\t\t\t// If x and y were compared before, they must be equal\n\t\t\t\t// (if they were not, the recursion would have stopped);\n\t\t\t\t// search the ifacePair stack for the same pair.\n\t\t\t\t//\n\t\t\t\t// This is a quadratic algorithm, but in practice these stacks\n\t\t\t\t// are extremely short (bounded by the nesting depth of interface\n\t\t\t\t// type declarations that recur via parameter types, an extremely\n\t\t\t\t// rare occurrence). An alternative implementation might use a\n\t\t\t\t// \"visited\" map, but that is probably less efficient overall.\n\t\t\t\tq := &ifacePair{x, y, p}\n\t\t\t\tfor p != nil {\n\t\t\t\t\tif p.identical(q) {\n\t\t\t\t\t\treturn true // same pair was compared before\n\t\t\t\t\t}\n\t\t\t\t\tp = p.prev\n\t\t\t\t}\n\t\t\t\tif debug {\n\t\t\t\t\tassertSortedMethods(a)\n\t\t\t\t\tassertSortedMethods(b)\n\t\t\t\t}\n\t\t\t\tfor i, f := range a {\n\t\t\t\t\tg := b[i]\n\t\t\t\t\tif f.Id() != g.Id() || !identical(f.typ, g.typ, cmpTags, q) {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\tcase *Map:\n\t\t// Two map types are identical if they have identical key and value types.\n\t\tif y, ok := y.(*Map); ok {\n\t\t\treturn identical(x.key, y.key, cmpTags, p) && identical(x.elem, y.elem, cmpTags, p)\n\t\t}\n\n\tcase *Chan:\n\t\t// Two channel types are identical if they have identical value types\n\t\t// and the same direction.\n\t\tif y, ok := y.(*Chan); ok {\n\t\t\treturn x.dir == y.dir && identical(x.elem, y.elem, cmpTags, p)\n\t\t}\n\n\tcase *Named:\n\t\t// Two named types are identical if their type names originate\n\t\t// in the same type declaration.\n\t\tif y, ok := y.(*Named); ok {\n\t\t\txargs := x.TypeArgs().list()\n\t\t\tyargs := y.TypeArgs().list()\n\n\t\t\tif len(xargs) != len(yargs) {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tif len(xargs) > 0 {\n\t\t\t\t// Instances are identical if their original type and type arguments\n\t\t\t\t// are identical.\n\t\t\t\tif !Identical(x.orig, y.orig) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tfor i, xa := range xargs {\n\t\t\t\t\tif !Identical(xa, yargs[i]) {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\t// TODO(gri) Why is x == y not sufficient? And if it is,\n\t\t\t// we can just return false here because x == y\n\t\t\t// is caught in the very beginning of this function.\n\t\t\treturn x.obj == y.obj\n\t\t}\n\n\tcase *TypeParam:\n\t\t// nothing to do (x and y being equal is caught in the very beginning of this function)\n\n\tcase nil:\n\t\t// avoid a crash in case of nil type\n\n\tdefault:\n\t\tunreachable()\n\t}\n\n\treturn false\n}", "func Same(t1, t2 *tree.Tree) bool {\n\tone, two := Walker(t1), Walker(t2)\n\tfor i := range one {\n\t\tj := <-two\n\t\tif i != j {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func equals(p1, p2 *node) bool {\n\treturn p1.x == p2.x && p1.y == p2.y\n}", "func (kvp *KVPairs) IsEqual(other Expr) bool {\n\to, ok := other.(*KVPairs)\n\tif !ok {\n\t\treturn false\n\t}\n\tif kvp.SelfReferenced != o.SelfReferenced {\n\t\treturn false\n\t}\n\n\tif len(kvp.Pairs) != len(o.Pairs) {\n\t\treturn false\n\t}\n\n\tfor i := range kvp.Pairs {\n\t\tif kvp.Pairs[i].K != o.Pairs[i].K {\n\t\t\treturn false\n\t\t}\n\t\tif !Equal(kvp.Pairs[i].V, o.Pairs[i].V) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func Equals(v1, v2 interface{}) bool {\n\t// TODO(EItanya): Should this be `proto.Equal` since these values are usually proto messages\n\treturn reflect.DeepEqual(v1, v2)\n}", "func (v MempoolTransactionResponse) Equal(o MempoolTransactionResponse) bool {\n\treturn string(v.Metadata) == string(o.Metadata) &&\n\t\tv.Transaction.Equal(o.Transaction)\n}", "func (v Vector) Equal(o Vector) bool {\n\tvDefs := v.definables()\n\toDefs := o.definables()\n\n\tfor _, metric := range order {\n\t\ta := equivalent(metric, vDefs[metric].String())\n\t\tb := equivalent(metric, oDefs[metric].String())\n\n\t\tif a != b {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (s Set) Equal(s2 Set) bool {\n\tif len(s) != len(s2) {\n\t\treturn false\n\t}\n\tfor k := range s {\n\t\tif _, ok := s2[k]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (as RomSlice) Equals(bs RomSlice) bool {\n\tif len(as) != len(bs) {\n\t\treturn false\n\t}\n\n\tfor i, ag := range as {\n\t\tif !bs[i].Equals(ag) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (a seriesIDs) equals(other seriesIDs) bool {\n\tif len(a) != len(other) {\n\t\treturn false\n\t}\n\tfor i, s := range other {\n\t\tif a[i] != s {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (i *ArrayIterator) Equals(Object) bool {\n\treturn false\n}", "func (qt *queryTerm) equals(qt2 *queryTerm) bool {\n\treturn qt.Subject == qt2.Subject &&\n\t\tqt.Object == qt2.Object &&\n\t\treflect.DeepEqual(qt.Predicates, qt2.Predicates)\n}", "func (p Pair) Equal(v Pair) bool {\n\treturn bytes.Equal(p.Key, v.Key) && bytes.Equal(p.Value, v.Value)\n}", "func (a *Advertisement) Equal(b *Advertisement) bool {\n\tif a.Prefix.String() != b.Prefix.String() {\n\t\treturn false\n\t}\n\tif a.LocalPref != b.LocalPref {\n\t\treturn false\n\t}\n\treturn reflect.DeepEqual(a.Communities, b.Communities)\n}", "func (k *Key) Equals(k1 *Key) bool {\n\tif k1 == nil {\n\t\treturn false\n\t}\n\treturn k.Rand == k1.Rand && k.Timestamp == k1.Timestamp && k.Counter == k1.Counter\n}", "func (i *BytesIterator) Equals(Object) bool {\n\treturn false\n}", "func (v ConstructionParseResponse) Equal(o ConstructionParseResponse) bool {\n\treturn len(v.AccountIdentifierSigners) == len(o.AccountIdentifierSigners) &&\n\t\taccountIdentifierSliceEqual(v.AccountIdentifierSigners, o.AccountIdentifierSigners) &&\n\t\tstring(v.Metadata) == string(o.Metadata) &&\n\t\tlen(v.Operations) == len(o.Operations) &&\n\t\toperationSliceEqual(v.Operations, o.Operations) &&\n\t\tlen(v.Signers) == len(o.Signers) &&\n\t\tstringSliceEqual(v.Signers, o.Signers)\n}", "func (yp *YamlParser) equals(str string) bool {\n\treturn yp.read(len(str)) == str\n}" ]
[ "0.7741452", "0.69702935", "0.6477298", "0.6450072", "0.6274128", "0.6194222", "0.6008676", "0.59470457", "0.59356153", "0.58936787", "0.5872481", "0.5845245", "0.5827634", "0.5815576", "0.58080417", "0.5789719", "0.5784037", "0.5775299", "0.575321", "0.57492834", "0.5722611", "0.57016873", "0.56942904", "0.5685368", "0.5675363", "0.5664326", "0.5662686", "0.5654797", "0.56491476", "0.56357396", "0.5617756", "0.56016535", "0.5581814", "0.55723953", "0.55588806", "0.5558213", "0.5555048", "0.55513155", "0.55453426", "0.5538931", "0.5504644", "0.55037117", "0.5501527", "0.5495695", "0.54940444", "0.54737437", "0.5468435", "0.546477", "0.5453267", "0.5447642", "0.54408306", "0.54285336", "0.54153234", "0.5414311", "0.5407784", "0.54033834", "0.54025406", "0.5401348", "0.53860253", "0.538543", "0.53815997", "0.5378015", "0.5368926", "0.53656507", "0.53651446", "0.5347247", "0.53463566", "0.53455824", "0.5344454", "0.5343263", "0.5341951", "0.534063", "0.53274685", "0.53270495", "0.53262764", "0.5324458", "0.53230417", "0.5313374", "0.5310496", "0.5300829", "0.5296084", "0.5289628", "0.5287846", "0.52851295", "0.5284437", "0.52831787", "0.5282915", "0.5271738", "0.52717", "0.527065", "0.52667224", "0.5262305", "0.5261397", "0.52612954", "0.52601504", "0.5259557", "0.525777", "0.52499497", "0.524646", "0.52427405" ]
0.6086048
6
HasOwned returns true if the tags contains a tag that marks the resource as owned by the cluster from the perspective of this management tooling.
func (in Labels) HasOwned(cluster string) bool { value, ok := in[ClusterTagKey(cluster)] return ok && ResourceLifecycle(value) == ResourceLifecycleOwned }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IsOwned(object metav1.Object) (owned bool, err error) {\n\trefs, err := getRefs(object)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn len(refs) > 0, nil\n}", "func (k keeper) HasOwner(ctx sdk.Context, name string) bool {\n\treturn !k.GetWhois(ctx, name).Owner.Empty()\n}", "func (o *DeployKey) HasOwner() bool {\n\tif o != nil && o.Owner != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (n *Network) AdPeopleOwned() bool {\n\tfor _, ssid := range adpeople_owned {\n\t\tif n.Ssid == ssid {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (k Keeper) HasOwner(ctx sdk.Context, name string) bool {\n\treturn !k.GetWhois(ctx, name).Owner.Empty()\n}", "func (o *StorageNetAppCloudTargetAllOf) HasOwner() bool {\n\tif o != nil && o.Owner != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func ServiceOwned(ctx context.Context, conn *dbus.Conn, svc string) bool {\n\tobj := conn.Object(busName, busPath)\n\treturn obj.CallWithContext(ctx, busInterface+\".GetNameOwner\", 0, svc).Err == nil\n}", "func (o *User) HasOwnedObjects() bool {\n\tif o != nil && o.OwnedObjects != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *Snippet) HasOwner() bool {\n\tif o != nil && o.Owner != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *V0037Node) HasOwner() bool {\n\tif o != nil && o.Owner != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) HasResourcePoolOwner() bool {\n\tif o != nil && o.ResourcePoolOwner != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (d *BinaryValue) IsOwned() int32 {\n\treturn int32(C.gocef_binary_value_is_owned(d.toNative(), d.is_owned))\n}", "func (c *Tag) Own(name string) bool {\n\tif c.Name == name {\n\t\treturn true\n\t}\n\talias := c.Value(OptAlias)\n\tif util.ListIndex(alias, name) > -1 {\n\t\treturn true\n\t}\n\treturn false\n}", "func (sp *serviceOwnedEdgeLBObjectMetadata) IsOwnedBy(service *corev1.Service) bool {\n\treturn sp.ClusterName == cluster.Name && sp.Namespace == service.Namespace && sp.Name == service.Name\n}", "func (o AccessLevelBasicConditionDevicePolicyOutput) RequireCorpOwned() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v AccessLevelBasicConditionDevicePolicy) *bool { return v.RequireCorpOwned }).(pulumi.BoolPtrOutput)\n}", "func (o *Operation) IsOwner(gid GoogleID) bool {\n\treturn o.ID.IsOwner(gid)\n}", "func (o *User) HasOwnedDevices() bool {\n\tif o != nil && o.OwnedDevices != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o AccessLevelBasicConditionDevicePolicyPtrOutput) RequireCorpOwned() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *AccessLevelBasicConditionDevicePolicy) *bool {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.RequireCorpOwned\n\t}).(pulumi.BoolPtrOutput)\n}", "func (o AccessLevelsAccessLevelBasicConditionDevicePolicyOutput) RequireCorpOwned() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v AccessLevelsAccessLevelBasicConditionDevicePolicy) *bool { return v.RequireCorpOwned }).(pulumi.BoolPtrOutput)\n}", "func (o *Giveaway) HasUsed() bool {\n\tif o != nil && o.Used != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o AccessLevelsAccessLevelBasicConditionDevicePolicyPtrOutput) RequireCorpOwned() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *AccessLevelsAccessLevelBasicConditionDevicePolicy) *bool {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.RequireCorpOwned\n\t}).(pulumi.BoolPtrOutput)\n}", "func (m *Member) IsOwner() bool { return m.Role == MemberRoleOwner }", "func (opID OperationID) IsOwner(gid GoogleID) bool {\n\tvar c int\n\terr := db.QueryRow(\"SELECT COUNT(*) FROM operation WHERE ID = ? and gid = ?\", opID, gid).Scan(&c)\n\tif err != nil {\n\t\tLog.Error(err)\n\t\treturn false\n\t}\n\tif c < 1 {\n\t\treturn false\n\t}\n\treturn true\n}", "func (o *CompartimentoHistorico) HasRefUtilizador() bool {\n\tif o != nil && o.RefUtilizador.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (m *Manager) IsOwner(globalID, username string) (isowner bool, err error) {\n\tmatches, err := m.collection.Find(bson.M{\"globalid\": globalID, \"owners\": username}).Count()\n\tisowner = (matches > 0)\n\treturn\n}", "func (o *StorageHyperFlexStorageContainer) HasInUse() bool {\n\tif o != nil && o.InUse != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *Job) HasOwner() bool {\n\tif o != nil && o.Owner != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *NiatelemetrySwitchDiskUtilization) HasUsed() bool {\n\tif o != nil && o.Used != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *DashboardAllOfLinks) HasOwners() bool {\n\tif o != nil && o.Owners != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s *Service) IsManaged(ctx context.Context) (bool, error) {\n\tzoneSpec, _, _ := s.Scope.PrivateDNSSpec()\n\tif zoneSpec == nil {\n\t\treturn false, errors.Errorf(\"no private dns zone spec available\")\n\t}\n\n\tresult, err := s.zoneGetter.Get(ctx, zoneSpec)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tzone, ok := result.(privatedns.PrivateZone)\n\tif !ok {\n\t\treturn false, errors.Errorf(\"%T is not a privatedns.PrivateZone\", zone)\n\t}\n\n\ttags := converters.MapToTags(zone.Tags)\n\treturn tags.HasOwned(s.Scope.ClusterName()), nil\n}", "func IsResMgrOwnedState(state pbtask.TaskState) bool {\n\t_, ok := resMgrOwnedTaskStates[state]\n\tif ok {\n\t\treturn true\n\t}\n\treturn false\n}", "func CurrentlyOwns(\n\ttrx storage.Transaction,\n\towner *account.Account,\n\ttxId merkle.Digest,\n\tpool storage.Handle,\n) bool {\n\tdKey := append(owner.Bytes(), txId[:]...)\n\n\tif nil == trx {\n\t\treturn pool.Has(dKey)\n\t}\n\treturn trx.Has(pool, dKey)\n}", "func HasOwnerRole() predicate.ProfileUKM {\n\treturn predicate.ProfileUKM(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(OwnerRoleTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, OwnerRoleTable, OwnerRoleColumn),\n\t\t)\n\t\tsqlgraph.HasNeighbors(s, step)\n\t})\n}", "func HasOwner() predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(OwnerTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),\n\t\t)\n\t\tsqlgraph.HasNeighbors(s, step)\n\t})\n}", "func (r *AccountDIDRegistry) owns(caller string, did DID) bool {\n\ts := strings.Split(string(did), \":\")\n\treturn s[len(s)-1] == caller\n}", "func (u User) HasTag(t frameworks.Tag) bool {\n\tresult := false\n\treturn result\n}", "func HasOwnerWith(preds ...predicate.Cluster) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(OwnerInverseTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),\n\t\t)\n\t\tsqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {\n\t\t\tfor _, p := range preds {\n\t\t\t\tp(s)\n\t\t\t}\n\t\t})\n\t})\n}", "func (s *Synchronizer) IsManaged(object meta.Object) bool {\n\towner := meta.GetControllerOf(object)\n\tif owner == nil {\n\t\t// Ignore unmanaged service catalog resources\n\t\treturn false\n\t}\n\n\t// Try to retrieve the resource that is shadowing the service catalog resource\n\tswitch owner.Kind {\n\tcase templates.BindingKind:\n\t\t_, err := s.templateSDK.GetBindingFromCache(object.GetNamespace(), owner.Name)\n\t\tif err != nil {\n\t\t\tglog.V(4).Infof(\"ignoring orphaned object '%s' of %s '%s'\", object.GetSelfLink(), owner.Kind, owner.Name)\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\tcase templates.InstanceKind:\n\t\t_, err := s.templateSDK.GetInstanceFromCache(object.GetNamespace(), owner.Name)\n\t\tif err != nil {\n\t\t\tglog.V(4).Infof(\"ignoring orphaned object '%s' of %s '%s'\", object.GetSelfLink(), owner.Kind, owner.Name)\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\tcase \"ServiceBinding\":\n\t\t// Lookup the binding that owns the resource\n\t\tsvcBnd, err := s.svcatSDK.GetBindingFromCache(object.GetNamespace(), owner.Name)\n\t\tif err != nil {\n\t\t\tglog.V(4).Infof(\"ignoring orphaned object '%s' of %s '%s'\", object.GetSelfLink(), owner.Kind, owner.Name)\n\t\t\treturn false\n\t\t}\n\n\t\t// The binding must be owned by the templates controller\n\t\treturn s.IsManaged(svcBnd)\n\t}\n\n\treturn false\n}", "func HasOwner() predicate.Pet {\n\treturn predicate.Pet(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),\n\t\t)\n\t\tsqlgraph.HasNeighbors(s, step)\n\t})\n}", "func CurrentlyOwns(owner *account.Account, txId merkle.Digest) bool {\n\tdKey := append(owner.Bytes(), txId[:]...)\n\treturn nil != storage.Pool.OwnerDigest.Get(dKey)\n}", "func (o *StorageNetAppCloudTargetAllOf) HasUsed() bool {\n\tif o != nil && o.Used != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func HasOwnerRef(metadata metav1.Object, needle metav1.OwnerReference) bool {\n\tfor _, existingOwnerRef := range metadata.GetOwnerReferences() {\n\t\tif existingOwnerRef.APIVersion == needle.APIVersion &&\n\t\t\texistingOwnerRef.Kind == needle.Kind &&\n\t\t\texistingOwnerRef.Name == needle.Name &&\n\t\t\texistingOwnerRef.UID == needle.UID {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o *NetworkingProjectNetworkCreate) HasTag() bool {\n\tif o != nil && o.Tag != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func HasOwnerUkm() predicate.ProfileUKM {\n\treturn predicate.ProfileUKM(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(OwnerUkmTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, OwnerUkmTable, OwnerUkmColumn),\n\t\t)\n\t\tsqlgraph.HasNeighbors(s, step)\n\t})\n}", "func (o *ResourcepoolPoolMember) HasResource() bool {\n\tif o != nil && o.Resource != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (p *pool) IsUsed(ctx context.Context) (bool, error) {\n\tblocks := &coilv2.AddressBlockList{}\n\terr := p.reader.List(ctx, blocks, client.MatchingLabels{\n\t\tconstants.LabelPool: p.name,\n\t})\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn len(blocks.Items) > 0, nil\n}", "func (o *Content) HasOwnerGuid() bool {\n\tif o != nil && o.OwnerGuid != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *NetworkingProjectNetadpCreate) HasTag() bool {\n\tif o != nil && o.Tag != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func hasTags(cmd *cobra.Command) bool {\n\tfor curr := cmd; curr != nil; curr = curr.Parent() {\n\t\tif len(curr.Tags) > 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (o *SecurityGroup) HasTags() bool {\n\tif o != nil && o.Tags != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (r *Role) HasTag(tag string) bool {\n\tfor _, t := range r.Tags {\n\t\tif t == tag {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (r *Role) HasTag(tag string) bool {\n\tfor _, t := range r.Tags {\n\t\tif t == tag {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func IsControlledBy(owner, obj metav1.Object) bool {\n\tfor _, ownerRef := range obj.GetOwnerReferences() {\n\t\tif ownerRef.Controller != nil && *ownerRef.Controller && ownerRef.UID == owner.GetUID() {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o *BasicBot) HasOwnerId() bool {\n\tif o != nil && o.OwnerId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsMesosOwnedState(state pbtask.TaskState) bool {\n\t_, ok := mesosOwnedTaskStates[state]\n\tif ok {\n\t\treturn true\n\t}\n\treturn false\n}", "func HasOwner() predicate.QueueItem {\n\treturn predicate.QueueItem(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(OwnerTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),\n\t\t)\n\t\tsqlgraph.HasNeighbors(s, step)\n\t})\n}", "func (o *NetworkingProjectIpCreate) HasTag() bool {\n\tif o != nil && o.Tag != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func ResourceOwnedBy(owner runtime.Object) Func {\n\treturn func(obj runtime.Object) bool {\n\t\treturn metav1.IsControlledBy(obj.(metav1.Object), owner.(metav1.Object))\n\t}\n}", "func (m *kafkaConsumerGroupManagerImpl) IsManaged(groupId string) bool {\n\treturn m.getGroup(groupId) != nil\n}", "func (svc *Service) HasPrivilege(ctx context.Context, priv string) (bool, error) {\n\tif priv == \"\" {\n\t\treturn false, nil\n\t}\n\tprivs, err := svc.getPrivileges(ctx)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tfor i := range privs {\n\t\tif privs[i].Key == priv {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}", "func (_ReserveSpenderMultiSig *ReserveSpenderMultiSigCaller) IsOwner(opts *bind.CallOpts, arg0 common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _ReserveSpenderMultiSig.contract.Call(opts, out, \"isOwner\", arg0)\n\treturn *ret0, err\n}", "func (k ClusterName) OwnedTagFilter() *ec2.Filter {\n\treturn &ec2.Filter{\n\t\tName: aws.String(fmt.Sprintf(\"tag:kubernetes.io/cluster/%s\", k)),\n\t\tValues: aws.StringSlice([]string{\"owned\"}),\n\t}\n}", "func (o *IamProjectRoleCreate) HasTag() bool {\n\tif o != nil && o.Tag != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func HasOwner() predicate.Task {\n\treturn predicate.Task(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),\n\t\t)\n\t\tsqlgraph.HasNeighbors(s, step)\n\t})\n}", "func ResourceOwnerOf(obj runtime.Object) Func {\n\treturn func(ownerObj runtime.Object) bool {\n\t\treturn metav1.IsControlledBy(obj.(metav1.Object), ownerObj.(metav1.Object))\n\t}\n}", "func (o *PartnerCustomerCreateRequest) HasAssetsUnderManagement() bool {\n\tif o != nil && o.AssetsUnderManagement != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func resourceHasUserVisibleApply(info *InstanceInfo) bool {\n\taddr := info.ResourceAddress()\n\n\t// Only managed resources have user-visible apply actions.\n\t// In particular, this excludes data resources since we \"apply\" these\n\t// only as an implementation detail of removing them from state when\n\t// they are destroyed. (When reading, they don't get here at all because\n\t// we present them as \"Refresh\" actions.)\n\treturn addr.Mode == config.ManagedResourceMode\n}", "func (namespace *Namespace) IsOwnedBy(user *User) bool {\n\tif user == nil || namespace == nil {\n\t\treturn false\n\t}\n\treturn namespace.UserID == user.ID\n}", "func (n *node) HasTag(name string) bool {\n\tfor _, v := range n.tags {\n\t\tif v == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func IsOwner(tree *dag.Dag, blockWithHeaders *chaintree.BlockWithHeaders) (bool, chaintree.CodedError) {\n\tctx := context.TODO()\n\tid, _, err := tree.Resolve(context.TODO(), []string{\"id\"})\n\tif err != nil {\n\t\treturn false, &consensus.ErrorCode{Memo: fmt.Sprintf(\"error: %v\", err), Code: consensus.ErrUnknown}\n\t}\n\n\theaders := &consensus.StandardHeaders{}\n\n\terr = typecaster.ToType(blockWithHeaders.Headers, headers)\n\tif err != nil {\n\t\treturn false, &consensus.ErrorCode{Memo: fmt.Sprintf(\"error: %v\", err), Code: consensus.ErrUnknown}\n\t}\n\n\tvar addrs []string\n\n\tuncastAuths, _, err := tree.Resolve(context.TODO(), strings.Split(\"tree/\"+consensus.TreePathForAuthentications, \"/\"))\n\tif err != nil {\n\t\treturn false, &consensus.ErrorCode{Code: consensus.ErrUnknown, Memo: fmt.Sprintf(\"err resolving: %v\", err)}\n\t}\n\t// If there are no authentications then the Chain Tree is still owned by its genesis key\n\tif uncastAuths == nil {\n\t\taddrs = []string{consensus.DidToAddr(id.(string))}\n\t} else {\n\t\terr = typecaster.ToType(uncastAuths, &addrs)\n\t\tif err != nil {\n\t\t\treturn false, &consensus.ErrorCode{Code: consensus.ErrUnknown, Memo: fmt.Sprintf(\"err casting: %v\", err)}\n\t\t}\n\t}\n\n\tfor _, addr := range addrs {\n\t\tisSigned, err := consensus.IsBlockSignedBy(ctx, blockWithHeaders, addr)\n\t\tif err != nil {\n\t\t\treturn false, &consensus.ErrorCode{Memo: fmt.Sprintf(\"error finding if signed: %v\", err), Code: consensus.ErrUnknown}\n\t\t}\n\n\t\tif isSigned {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}", "func HasOwner() predicate.Account {\n\treturn predicate.Account(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(OwnerTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),\n\t\t)\n\t\tsqlgraph.HasNeighbors(s, step)\n\t})\n}", "func (_TokenStakingEscrow *TokenStakingEscrowCaller) IsOwner(opts *bind.CallOpts) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _TokenStakingEscrow.contract.Call(opts, out, \"isOwner\")\n\treturn *ret0, err\n}", "func (o *DeviceNode) HasResources() bool {\n\tif o != nil && o.Resources != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *V1VirusDatasetRequest) HasReleasedSince() bool {\n\tif o != nil && o.ReleasedSince != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *StorageBaseCapacity) HasUsed() bool {\n\tif o != nil && o.Used != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *ResourcepoolPoolMember) HasPool() bool {\n\tif o != nil && o.Pool != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (gou *GroupOrderdUser) Exists() bool {\n\treturn gou._exists\n}", "func (o *PublicIp) HasTags() bool {\n\tif o != nil && o.Tags != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *PublicIp) HasTags() bool {\n\tif o != nil && o.Tags != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (_ERC20HecoManager *ERC20HecoManagerSession) IsOwner() (bool, error) {\n\treturn _ERC20HecoManager.Contract.IsOwner(&_ERC20HecoManager.CallOpts)\n}", "func (o *FiltersNet) HasTagKeys() bool {\n\tif o != nil && o.TagKeys != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *FiltersSecurityGroup) HasTagKeys() bool {\n\tif o != nil && o.TagKeys != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func Has(client *http.Client, k []byte, host, bucket string, creds awsauth.Credentials) (has bool, err error) {\n\traw := fmt.Sprintf(\"https://%s/%s/%x\", host, bucket, k)\n\tloc, err := url.Parse(raw)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"failed to parse '%s' as url: %v\", raw, err)\n\t}\n\n\treq, err := http.NewRequest(\"HEAD\", loc.String(), nil)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"failed to create HEAD request: %v\", err)\n\t}\n\n\tawsauth.Sign(req, creds)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"failed to perform HEAD request: %v\", err)\n\t}\n\n\tif resp.StatusCode == http.StatusOK {\n\t\treturn true, nil\n\t} else if resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden {\n\t\t//AWS returns forbidden for a HEAD request if the one performing the operation does not have\n\t\t//list bucket permissions\n\t\treturn false, nil\n\t} else {\n\t\treturn false, fmt.Errorf(\"unexpected response from HEAD '%s' request: %s\", loc, resp.Status)\n\t}\n}", "func HasOwnerUkmWith(preds ...predicate.Ukm) predicate.ProfileUKM {\n\treturn predicate.ProfileUKM(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(OwnerUkmInverseTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, OwnerUkmTable, OwnerUkmColumn),\n\t\t)\n\t\tsqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {\n\t\t\tfor _, p := range preds {\n\t\t\t\tp(s)\n\t\t\t}\n\t\t})\n\t})\n}", "func cephOSDPoolExists(ClusterName string, poolName string, userName string) bool {\n\t_, err := shared.RunCommand(\n\t\t\"ceph\",\n\t\t\"--name\", fmt.Sprintf(\"client.%s\", userName),\n\t\t\"--cluster\", ClusterName,\n\t\t\"osd\",\n\t\t\"pool\",\n\t\t\"get\",\n\t\tpoolName,\n\t\t\"size\")\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (_ERC20HecoManager *ERC20HecoManagerCaller) IsOwner(opts *bind.CallOpts) (bool, error) {\n\tvar out []interface{}\n\terr := _ERC20HecoManager.contract.Call(opts, &out, \"isOwner\")\n\n\tif err != nil {\n\t\treturn *new(bool), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(bool)).(*bool)\n\n\treturn out0, err\n\n}", "func (o *TenantWithOfferWeb) HasTags() bool {\n\tif o != nil && o.Tags != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func HasOwnerWith(preds ...predicate.User) predicate.QueueItem {\n\treturn predicate.QueueItem(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(OwnerInverseTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),\n\t\t)\n\t\tsqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {\n\t\t\tfor _, p := range preds {\n\t\t\t\tp(s)\n\t\t\t}\n\t\t})\n\t})\n}", "func (o *Member) HasTags() bool {\n\tif o != nil && o.Tags != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (_ReserveSpenderMultiSig *ReserveSpenderMultiSigSession) IsOwner(arg0 common.Address) (bool, error) {\n\treturn _ReserveSpenderMultiSig.Contract.IsOwner(&_ReserveSpenderMultiSig.CallOpts, arg0)\n}", "func (o *ReconciliationTarget) HasUsage() bool {\n\tif o != nil && o.Usage != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *ImageImportOperation) HasUuid() bool {\n\tif o != nil && o.Uuid != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *FiltersNatService) HasTagKeys() bool {\n\tif o != nil && o.TagKeys != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *LocalDatabaseProvider) HasTags() bool {\n\tif o != nil && o.Tags != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (_ChpRegistry *ChpRegistryCaller) IsOwner(opts *bind.CallOpts) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _ChpRegistry.contract.Call(opts, out, \"isOwner\")\n\treturn *ret0, err\n}", "func (_Token *TokenCaller) IsOwner(opts *bind.CallOpts) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Token.contract.Call(opts, out, \"isOwner\")\n\treturn *ret0, err\n}", "func (s *MemberSummary) SetIsOwned(v bool) *MemberSummary {\n\ts.IsOwned = &v\n\treturn s\n}", "func (o *ConfluentAccountCreateRequestAttributes) HasTags() bool {\n\treturn o != nil && o.Tags != nil\n}", "func (profession Profession) HasTag(tag string) bool {\n\tfor _, t := range profession.Tags {\n\t\tif t == tag {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func SupportsOwnerReference(restMapper meta.RESTMapper, owner, dependent runtime.Object) (bool, error) {\n\townerGVK := owner.GetObjectKind().GroupVersionKind()\n\townerMapping, err := restMapper.RESTMapping(ownerGVK.GroupKind(), ownerGVK.Version)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tmOwner, err := meta.Accessor(owner)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tdepGVK := dependent.GetObjectKind().GroupVersionKind()\n\tdepMapping, err := restMapper.RESTMapping(depGVK.GroupKind(), depGVK.Version)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tmDep, err := meta.Accessor(dependent)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\townerClusterScoped := ownerMapping.Scope.Name() == meta.RESTScopeNameRoot\n\townerNamespace := mOwner.GetNamespace()\n\tdepClusterScoped := depMapping.Scope.Name() == meta.RESTScopeNameRoot\n\tdepNamespace := mDep.GetNamespace()\n\n\tif ownerClusterScoped {\n\t\treturn true, nil\n\t}\n\n\tif depClusterScoped {\n\t\treturn false, nil\n\t}\n\n\tif ownerNamespace != depNamespace {\n\t\treturn false, nil\n\t}\n\t// Both owner and dependent are namespace-scoped and in the same namespace.\n\treturn true, nil\n}" ]
[ "0.65666986", "0.61177206", "0.59844804", "0.59676355", "0.596722", "0.5791662", "0.57812816", "0.577984", "0.56838113", "0.5683395", "0.56106216", "0.55367696", "0.5497445", "0.5447929", "0.53616506", "0.52428067", "0.5231064", "0.52302146", "0.5227012", "0.5203173", "0.5201436", "0.5200813", "0.5200184", "0.51986784", "0.5184544", "0.5148832", "0.51452285", "0.5143033", "0.51302993", "0.5124208", "0.51229477", "0.5120931", "0.5081019", "0.5062646", "0.50455874", "0.5040827", "0.5038689", "0.50352097", "0.5029367", "0.50234735", "0.501764", "0.501583", "0.50112814", "0.4993897", "0.49852633", "0.49797028", "0.49796066", "0.49740127", "0.49711195", "0.4951154", "0.4948805", "0.4948805", "0.49384227", "0.49374405", "0.49370134", "0.49323523", "0.49250343", "0.4924223", "0.4917369", "0.49157748", "0.49108273", "0.49087536", "0.49043602", "0.48966214", "0.4892779", "0.48923677", "0.4879104", "0.48765114", "0.48747668", "0.4866818", "0.48659697", "0.4865191", "0.4859382", "0.4857859", "0.485556", "0.4853326", "0.48479512", "0.48445353", "0.48445353", "0.484397", "0.48282483", "0.48255694", "0.4825277", "0.48243666", "0.48131144", "0.4811281", "0.4804033", "0.4798621", "0.47947273", "0.47920015", "0.47894487", "0.47855356", "0.47853684", "0.47794092", "0.47779158", "0.4777538", "0.47774372", "0.47694325", "0.4768082", "0.47564748" ]
0.7708871
0
ToComputeFilter returns the string representation of the labels as a filter to be used in google compute sdk calls.
func (in Labels) ToComputeFilter() string { var builder strings.Builder for k, v := range in { builder.WriteString(fmt.Sprintf("(labels.%s = %q) ", k, v)) } return builder.String() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (q *Query) ToFilter() string {\n\treturn fmt.Sprintf(`\nresource.type=k8s_container\nAND (\n\tlogName=projects/%s/logs/stderr\n\tOR logName=projects/%s/logs/stdout\n)\nAND resource.labels.cluster_name=%q\nAND resource.labels.namespace_name=%q\nAND labels.%q=%q\n`,\n\t\tq.Project,\n\t\tq.Project,\n\t\tq.Cluster,\n\t\tq.Namespace,\n\t\tStackdriverBuildIDLabel,\n\t\tq.BuildID,\n\t)\n}", "func (sink *influxdbSink) labelsToPredicate(labels map[string]string) string {\n\tif len(labels) == 0 {\n\t\treturn \"\"\n\t}\n\n\tparts := make([]string, 0, len(labels))\n\tfor k, v := range labels {\n\t\tparts = append(parts, fmt.Sprintf(\"%q = '%s'\", k, v))\n\t}\n\n\treturn strings.Join(parts, \" AND \")\n}", "func (f WithLabelsFilter) String() string {\n\treturn fmt.Sprintf(\"WithLabelsFilter(%s)\", strings.Join(f.labels, \",\"))\n}", "func (qb QueryBuilder) composeFilter() string {\n\tfilterBuilder := qb.getFilterBuilder().\n\t\tWithMetricType(qb.metricName).\n\t\tWithProject(qb.translator.config.Project).\n\t\tWithCluster(qb.translator.config.Cluster)\n\n\tresourceNames := qb.getResourceNames()\n\tif qb.translator.useNewResourceModel {\n\t\t// new resource model specific filters\n\t\tfilterBuilder = filterBuilder.WithLocation(qb.translator.config.Location)\n\t\tif !qb.nodes.isNodeValuesEmpty() {\n\t\t\t// node metrics\n\t\t\treturn filterBuilder.WithNodes(resourceNames).Build()\n\t\t}\n\t\t// pod metrics\n\t\treturn filterBuilder.\n\t\t\tWithNamespace(qb.namespace).\n\t\t\tWithPods(resourceNames).\n\t\t\tBuild()\n\n\t}\n\t// legacy resource model specific filters\n\treturn filterBuilder.\n\t\tWithContainer().\n\t\tWithPods(resourceNames).\n\t\tBuild()\n}", "func generateListFilterFromLabels(labels map[string]string) string {\n\tvar filter string\n\tfor k, v := range labels {\n\t\tfilter = fmt.Sprintf(\"%s(labels.%s eq %s)\", filter, k, v)\n\t}\n\n\treturn filter\n}", "func (m *ServicePolicyMetricLabelFilter) ToJSON() (string, error) {\n\treturn codec.ToJSON(m)\n}", "func (f WithoutLabelsFilter) String() string {\n\treturn fmt.Sprintf(\"WithoutLabelsFilter(%s)\", strings.Join(f.labels, \",\"))\n}", "func (lc *LabelCreator) Filter(input []string) (output []string) {\n\tfor i := range input {\n\t\tinput[i] = strings.Trim(reLabelSanitize.ReplaceAllString(strings.ToLower(input[i]), \"-\"), \"-\")\n\t}\n\n\tslices.Sort(input)\n\treturn slices.Compact(input)\n}", "func LabelFilter(labels map[string]string) factory.EventFilterFunc {\n\treturn func(obj interface{}) bool {\n\t\tmetaObj := obj.(metav1.Object)\n\t\tobjLabels := metaObj.GetLabels()\n\t\tfor k, v := range labels {\n\t\t\tif objLabels[k] != v {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n}", "func (tf TrueFilter) ToFilterDescription() FilterDescription {\n\treturn FilterDescription{\n\t\tType: \"TrueFilter\",\n\t\tSQLExpression: ptrString(\"1=1\"),\n\t}\n}", "func (r *resultImpl) Filter(filter func(label *Label) *Label) (LabelResult, error) {\n\tvar result []Label\n\tfor _, value := range r.labels {\n\t\tfilterResult := filter(&value)\n\t\tif filterResult != nil {\n\t\t\tresult = append(result, *filterResult)\n\t\t}\n\t}\n\treturn &resultImpl{\n\t\tlabels: result,\n\t}, nil\n}", "func (f *IPLabelFilter) String() string {\n\teq := \"=\" // LabelFilterEqual -> \"==\", we don't want in string representation of ip label filter.\n\tif f.ty == LabelFilterNotEqual {\n\t\teq = LabelFilterNotEqual.String()\n\t}\n\n\treturn fmt.Sprintf(\"%s%sip(%q)\", f.label, eq, f.pattern) // label filter\n}", "func (pl *NodeLabel) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {\n\t// Note that NodeLabelPredicate doesn't use predicate metadata, hence passing nil here.\n\t_, reasons, err := pl.predicate(pod, nil, nodeInfo)\n\treturn migration.PredicateResultToFrameworkStatus(reasons, err)\n}", "func (m *ForwardProxyPolicyMetricLabelFilter) ToJSON() (string, error) {\n\treturn codec.ToJSON(m)\n}", "func (o MetadataFilterOutput) FilterLabels() MetadataFilterLabelMatchArrayOutput {\n\treturn o.ApplyT(func(v MetadataFilter) []MetadataFilterLabelMatch { return v.FilterLabels }).(MetadataFilterLabelMatchArrayOutput)\n}", "func (t *Translator) filterForCluster() string {\n\tprojectFilter := fmt.Sprintf(\"resource.labels.project_id = %q\", t.config.Project)\n\tclusterFilter := fmt.Sprintf(\"resource.labels.cluster_name = %q\", t.config.Cluster)\n\tlocationFilter := fmt.Sprintf(\"resource.labels.location = %q\", t.config.Location)\n\treturn fmt.Sprintf(\"%s AND %s AND %s\", projectFilter, clusterFilter, locationFilter)\n}", "func (s EgressFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (c *Config) LabelsFilter() []string {\n\treturn c.labelsFilter\n}", "func (s MetricFilterV2) String() string {\n\treturn awsutil.Prettify(s)\n}", "func TransformLabelsToSelector(labels map[string]string) string {\n\tlabelList := make([]string, 0)\n\tfor key, value := range labels {\n\t\tlabelList = append(labelList, fmt.Sprintf(\"%s=%s\", key, value))\n\t}\n\treturn strings.Join(labelList, \",\")\n}", "func (s DominantLanguageDetectionJobFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s EntitiesDetectionJobFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (sf SQLFilter) ToFilterDescription() FilterDescription {\n\treturn FilterDescription{\n\t\tType: \"SqlFilter\",\n\t\tSQLExpression: &sf.Expression,\n\t}\n}", "func (s TopicsDetectionJobFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func FilterLabels(labels map[string]string) map[string]string {\n\n\tstatefulSetLabels := make(map[string]string)\n\tfor key, value := range labels {\n\t\tif key != bdv1.LabelDeploymentVersion {\n\t\t\tstatefulSetLabels[key] = value\n\t\t}\n\t}\n\treturn statefulSetLabels\n}", "func BuildRemoveLabelFilter(predicate func(key string) bool) (yaml.Filter, error) {\n\tfieldPaths, err := xform.ParseFieldPaths(\n\t\t[]string{\n\t\t\t\"metadata.labels\",\n\t\t\t\"spec.selector\",\n\t\t\t\"spec.selector.matchLabels\",\n\t\t\t\"spec.template.metadata.labels\",\n\t\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &xform.FieldClearer{\n\t\tFieldPaths: fieldPaths,\n\t\tPredicate: predicate,\n\t}, nil\n}", "func (s ModelMetadataFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ExportFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ExportFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ExportFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (t *Translator) legacyFilterForCluster() string {\n\tprojectFilter := fmt.Sprintf(\"resource.labels.project_id = %q\", t.config.Project)\n\t// Skip location, since it may be set incorrectly by Heapster for old resource model\n\tclusterFilter := fmt.Sprintf(\"resource.labels.cluster_name = %q\", t.config.Cluster)\n\tcontainerFilter := \"resource.labels.container_name = \\\"\\\"\"\n\treturn fmt.Sprintf(\"%s AND %s AND %s\", projectFilter, clusterFilter, containerFilter)\n}", "func (p antiAffinityLabel) filter(pools *csp.CSPList) (*csp.CSPList, error) {\n\tif p.labelSelector == \"\" {\n\t\treturn pools, nil\n\t}\n\t// pools that are already associated with\n\t// this label should be excluded\n\t//\n\t// NOTE: we try without giving any namespace\n\t// so that it lists from all available\n\t// namespaces\n\tcvrs, err := p.cvrList(\"\", metav1.ListOptions{LabelSelector: p.labelSelector})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\texclude := cvr.NewListBuilder().WithAPIList(cvrs).List().GetPoolUIDs()\n\treturn pools.Filter(csp.IsNotUID(exclude...)), nil\n}", "func (o AutoscalingPolicyCustomMetricUtilizationOutput) Filter() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AutoscalingPolicyCustomMetricUtilization) *string { return v.Filter }).(pulumi.StringPtrOutput)\n}", "func (ff FalseFilter) ToFilterDescription() FilterDescription {\n\treturn FilterDescription{\n\t\tType: \"FalseFilter\",\n\t\tSQLExpression: ptrString(\"1!=1\"),\n\t}\n}", "func (s IpFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s KeywordFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ListEntitiesFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func filter(l []*container.Cluster, label string, value string) []*container.Cluster {\n\tvar res []*container.Cluster\n\tfor _, cluster := range l {\n\t\tif cluster.ResourceLabels[label] == value {\n\t\t\tres = append(res, cluster)\n\t\t}\n\t}\n\treturn res\n}", "func (s FilterV2) String() string {\n\treturn awsutil.Prettify(s)\n}", "func filter(l []*container.Cluster, label, value string) []*container.Cluster {\n\tif label == \"\" { //TODO Temp impl\n\t\treturn l\n\t}\n\n\tvar res []*container.Cluster\n\tfor _, cluster := range l {\n\t\tif cluster.ResourceLabels[label] == value {\n\t\t\tres = append(res, cluster)\n\t\t}\n\t}\n\treturn res\n}", "func (o GoogleCloudRetailV2alphaRuleFilterActionResponseOutput) Filter() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GoogleCloudRetailV2alphaRuleFilterActionResponse) string { return v.Filter }).(pulumi.StringOutput)\n}", "func (s ImportTaskFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ImportTaskFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s SearchFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (t *Translator) filterForNodes(nodeNames []string) string {\n\tif len(nodeNames) == 0 {\n\t\tklog.Fatalf(\"createFilterForNodes called with empty list of node names\")\n\t} else if len(nodeNames) == 1 {\n\t\treturn fmt.Sprintf(\"resource.labels.node_name = %s\", nodeNames[0])\n\t}\n\treturn fmt.Sprintf(\"resource.labels.node_name = one_of(%s)\", strings.Join(nodeNames, \",\"))\n}", "func (o AutoscalingPolicyCustomMetricUtilizationResponseOutput) Filter() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AutoscalingPolicyCustomMetricUtilizationResponse) string { return v.Filter }).(pulumi.StringOutput)\n}", "func (s SentimentDetectionJobFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o GoogleCloudRetailV2alphaRuleFilterActionOutput) Filter() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v GoogleCloudRetailV2alphaRuleFilterAction) *string { return v.Filter }).(pulumi.StringPtrOutput)\n}", "func (s DatastoreFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (cf CorrelationFilter) ToFilterDescription() FilterDescription {\n\treturn FilterDescription{\n\t\tType: \"CorrelationFilter\",\n\t\tCorrelationFilter: cf,\n\t}\n}", "func (o MetadataFilterResponseOutput) FilterLabels() MetadataFilterLabelMatchResponseArrayOutput {\n\treturn o.ApplyT(func(v MetadataFilterResponse) []MetadataFilterLabelMatchResponse { return v.FilterLabels }).(MetadataFilterLabelMatchResponseArrayOutput)\n}", "func (m *Exemplar) GetFilteredLabels() []v11.StringKeyValue {\n\tif m != nil {\n\t\treturn m.FilteredLabels\n\t}\n\treturn nil\n}", "func (s DocumentKeyValuesFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ContactFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s UsageStatisticsFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ControlPlaneTagFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (fe *FilterExpr) String() string {\n\tw := expr.NewDefaultWriter()\n\tfe.WriteDialect(w)\n\treturn w.String()\n}", "func (c *jsiiProxy_CfnFilter) ToString() *string {\n\tvar returns *string\n\n\t_jsii_.Invoke(\n\t\tc,\n\t\t\"toString\",\n\t\tnil, // no parameters\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func Filter(fn sif.FilterOperation) *sif.DataFrameOperation {\n\treturn &sif.DataFrameOperation{\n\t\tTaskType: sif.FilterTaskType,\n\t\tDo: func(d sif.DataFrame) (*sif.DataFrameOperationResult, error) {\n\t\t\treturn &sif.DataFrameOperationResult{\n\t\t\t\tTask: &filterTask{fn: iutil.SafeFilterOperation(fn)},\n\t\t\t\tDataSchema: d.GetSchema().Clone(),\n\t\t\t}, nil\n\t\t},\n\t}\n}", "func (s ListServiceInstancesFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (f *Filter) String() string {\n\treturn (string)(*f)\n}", "func (s Filter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Filter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Filter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Filter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Filter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Filter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Filter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Filter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Filter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Filter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Filter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Filter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Filter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func ToEntityLoadFilter(typeFilter *wrappers.StringValue, keyFilter *wrappers.StringValue, ids []*EntityID) storage.EntityLoadFilter {\n\tentityLoadFilter := storage.EntityLoadFilter{\n\t\tTypeFilter: getStringPointer(typeFilter),\n\t\tKeyFilter: getStringPointer(keyFilter),\n\t\tIDs: ToTypeAndKeys(ids),\n\t}\n\treturn entityLoadFilter\n}", "func (s QuickConnectSearchFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DescribeActivationsFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s TagFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s TagFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s TagFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ParametersFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func gceLabelsToModel(labels map[string]string) []string {\n\tidx := 0\n\tmLabels := make([]string, len(labels))\n\tfor k, v := range labels {\n\t\tmLabels[idx] = fmt.Sprintf(\"%s:%s\", k, v)\n\t\tidx++\n\t}\n\treturn mLabels\n}", "func (s QueryFilters) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (p LabelParameters) ToQuery() string {\n\treturn paramsToQuery(p)\n}", "func (s SyncResourceFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r *RemoveLabels) Transform(mfs []*dto.MetricFamily) []*dto.MetricFamily {\n\tfor i := range mfs {\n\t\tfor j, m := range mfs[i].Metric {\n\t\t\t// Filter out labels\n\t\t\tlabels := m.Label[:0]\n\t\t\tfor _, l := range m.Label {\n\t\t\t\tif _, ok := r.Labels[l.GetName()]; !ok {\n\t\t\t\t\tlabels = append(labels, l)\n\t\t\t\t}\n\t\t\t}\n\t\t\tmfs[i].Metric[j].Label = labels\n\t\t}\n\t}\n\treturn mfs\n}", "func createFilter(m eh.EventMatcher) string {\n\tswitch m := m.(type) {\n\tcase eh.MatchEvents:\n\t\ts := make([]string, len(m))\n\t\tfor i, et := range m {\n\t\t\ts[i] = fmt.Sprintf(`attributes:\"%s\"`, et) // Filter event types by key to save space.\n\t\t}\n\n\t\treturn strings.Join(s, \" OR \")\n\tcase eh.MatchAggregates:\n\t\ts := make([]string, len(m))\n\t\tfor i, at := range m {\n\t\t\ts[i] = fmt.Sprintf(`attributes.%s=\"%s\"`, aggregateTypeAttribute, at)\n\t\t}\n\n\t\treturn strings.Join(s, \" OR \")\n\tcase eh.MatchAny:\n\t\ts := make([]string, len(m))\n\t\tfor i, sm := range m {\n\t\t\ts[i] = fmt.Sprintf(\"(%s)\", createFilter(sm))\n\t\t}\n\n\t\treturn strings.Join(s, \" OR \")\n\tcase eh.MatchAll:\n\t\ts := make([]string, len(m))\n\t\tfor i, sm := range m {\n\t\t\ts[i] = fmt.Sprintf(\"(%s)\", createFilter(sm))\n\t\t}\n\n\t\treturn strings.Join(s, \" AND \")\n\tdefault:\n\t\treturn \"\"\n\t}\n}", "func (d *DistributedBackupDescriptor) Filter(pred func(s string) bool) {\n\tfor _, desc := range d.Nodes {\n\t\tcs := make([]string, 0, len(desc.Classes))\n\t\tfor _, cls := range desc.Classes {\n\t\t\tif pred(cls) {\n\t\t\t\tcs = append(cs, cls)\n\t\t\t}\n\t\t}\n\t\tif len(cs) != len(desc.Classes) {\n\t\t\tdesc.Classes = cs\n\t\t}\n\t}\n}", "func (s InstanceInformationFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s AutomationExecutionFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ConversationLogsDataSourceFilterBy) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CommandFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (f *Filter) ToQuery() string {\n\treturn fmt.Sprintf(\"last_knowledge_of_server=%d\", f.LastKnowledgeOfServer)\n}", "func (s SourceServerActionsRequestFilters) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ListAnnotationImportJobsFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s AnalyticsPathFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DocumentFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s MapFilter) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (f *Filter) AddLabels(m map[string]string) {\n\tconst (\n\t\tmetricLabel = \"metric.label\"\n\t)\n\tlabels := []string{}\n\tfor label, value := range m {\n\t\tmetricLabel := fmt.Sprintf(\"%s.\\\"%s\\\"=\\\"%s\\\"\", metricLabel, label, value)\n\t\tlabels = append(labels, metricLabel)\n\t}\n\tf.add(strings.Join(labels, \" \"))\n}", "func (s NumberFilter) String() string {\n\treturn awsutil.Prettify(s)\n}" ]
[ "0.63834465", "0.574212", "0.55767214", "0.55669326", "0.55330604", "0.55210286", "0.54926217", "0.53527564", "0.52048385", "0.51603985", "0.5131564", "0.5130316", "0.51199216", "0.50214374", "0.4973829", "0.4970531", "0.49135852", "0.4864435", "0.48620355", "0.48164833", "0.47891018", "0.47705606", "0.47684094", "0.47638533", "0.4721275", "0.47117636", "0.46938935", "0.46799618", "0.46799618", "0.46799618", "0.46781346", "0.46431792", "0.4636521", "0.45784885", "0.45739132", "0.45705733", "0.4558335", "0.45581728", "0.45552766", "0.45491198", "0.45446736", "0.45431197", "0.45431197", "0.4535192", "0.45350575", "0.45312688", "0.45042926", "0.44978213", "0.44730932", "0.44658178", "0.4456905", "0.44475782", "0.44468626", "0.44314024", "0.44275278", "0.44243497", "0.4418971", "0.4415407", "0.44097465", "0.437669", "0.43675426", "0.4363342", "0.4363342", "0.4363342", "0.4363342", "0.4363342", "0.4363342", "0.4363342", "0.4363342", "0.4363342", "0.4363342", "0.4363342", "0.4363342", "0.4363342", "0.4360969", "0.43388033", "0.43357188", "0.4330685", "0.4330685", "0.4330685", "0.43201646", "0.431849", "0.43172032", "0.4308137", "0.43051296", "0.43042985", "0.42974892", "0.42921296", "0.4287001", "0.42830533", "0.42744812", "0.427356", "0.4269767", "0.4260536", "0.4259586", "0.42539492", "0.4253513", "0.42533955", "0.42509055", "0.4250694" ]
0.8348716
0
Difference returns the difference between this map of tags and the other map of tags. Items are considered equals if key and value are equals.
func (in Labels) Difference(other Labels) Labels { res := make(Labels, len(in)) for key, value := range in { if otherValue, ok := other[key]; ok && value == otherValue { continue } res[key] = value } return res }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TagsDiff(sqsTags map[string]string, newTags map[string]string) (removed, added map[string]string) {\n\tremoved = map[string]string{}\n\tfor k, v := range sqsTags {\n\t\tif _, ok := newTags[k]; !ok {\n\t\t\tremoved[k] = v\n\t\t}\n\t}\n\n\tadded = map[string]string{}\n\tfor k, newV := range newTags {\n\t\tif oldV, ok := sqsTags[k]; !ok || oldV != newV {\n\t\t\tadded[k] = newV\n\t\t}\n\t}\n\treturn\n}", "func (am AttributeMap) Diff(a, b *AttributeMap) AttributeMap {\n\tvar keys []string\n\tfor k := range *a {\n\t\tkeys = append(keys, k)\n\t}\n\tfor k := range *b {\n\t\tfor a := range keys {\n\t\t\tif keys[a] != k {\n\t\t\t\tkeys = append(keys, k)\n\t\t\t}\n\t\t}\n\t}\n\n\tattributes := make(AttributeMap)\n\tfor _, k := range keys {\n\t\tvalA, _ := (*a)[k]\n\t\tvalB, okB := (*b)[k]\n\t\tif valA != valB {\n\t\t\tif !okB {\n\t\t\t\tattributes[k] = nil\n\t\t\t} else {\n\t\t\t\tattributes[k] = valB\n\t\t\t}\n\t\t}\n\t}\n\n\treturn attributes\n}", "func Diff(firstJson map[string]interface{}, secondJson map[string]interface{}) []string {\n\tfirstJsonCopy := copyMapByte(firstJson)\n\tsecondJsonCopy := copyMapByte(secondJson)\n\n\tchangesSet := make(map[string]struct{})\n\tcompare(firstJsonCopy, secondJsonCopy, \"\", changesSet)\n\tcompare(secondJsonCopy, firstJsonCopy, \"\", changesSet)\n\n\tchanges := make([]string, 0)\n\tfor key := range changesSet {\n\t\tchanges = append(changes, key)\n\t}\n\tsort.Strings(changes)\n\n\treturn changes\n}", "func (obj *object) Diff(other Object) Object {\n\tr := NewObject()\n\tobj.Foreach(func(k, v *Term) {\n\t\tif other.Get(k) == nil {\n\t\t\tr.Insert(k, v)\n\t\t}\n\t})\n\treturn r\n}", "func difference(a, b map[string]bool) map[string]bool {\n\tr := make(map[string]bool)\n\tfor k := range a {\n\t\tif !b[k] {\n\t\t\tr[k] = true\n\t\t}\n\t}\n\tfor k := range b {\n\t\tif !a[k] {\n\t\t\tr[k] = true\n\t\t}\n\t}\n\treturn r\n}", "func setDifference(a, b map[string]interface{}) map[string]interface{} {\n\tresult := make(map[string]interface{})\n\tfor k, v := range a {\n\t\tif _, ok := b[k]; !ok {\n\t\t\tresult[k] = v\n\t\t}\n\t}\n\n\treturn result\n}", "func DiffTags(spec []v1alpha1.Tag, current []ecr.Tag) (addTags []ecr.Tag, remove []string) {\n\taddMap := make(map[string]string, len(spec))\n\tfor _, t := range spec {\n\t\taddMap[t.Key] = t.Value\n\t}\n\tremoveMap := map[string]struct{}{}\n\tfor _, t := range current {\n\t\tif addMap[aws.StringValue(t.Key)] == aws.StringValue(t.Value) {\n\t\t\tdelete(addMap, aws.StringValue(t.Key))\n\t\t\tcontinue\n\t\t}\n\t\tremoveMap[aws.StringValue(t.Key)] = struct{}{}\n\t}\n\tfor k, v := range addMap {\n\t\taddTags = append(addTags, ecr.Tag{Key: aws.String(k), Value: aws.String(v)})\n\t}\n\tfor k := range removeMap {\n\t\tremove = append(remove, k)\n\t}\n\treturn\n}", "func (s PodSet) Difference(s2 PodSet) PodSet {\n\tresult := PodSet{}\n\tfor key := range s {\n\t\tif _, contained := s2[key]; !contained {\n\t\t\tresult[key] = sets.Empty{}\n\t\t}\n\t}\n\treturn result\n}", "func (s *HashSet) Difference(other *HashSet) *HashSet{\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\tif other.Len() == 0{\n\t\treturn s\n\t}\n\tdiff := NewSet()\n\tfor item,_:=range s.items{\n\t\tif !other.Has(item){\n\t\t\tdiff.Insert(item)\n\t\t}\n\t}\n\treturn diff\n}", "func diffMap(m1, m2 map[string]*VF) map[string]*VF {\n\tr := map[string]*VF{}\n\tfor k, v := range m1 {\n\t\tif _, ok := m2[k]; !ok {\n\t\t\tr[k] = v\n\t\t}\n\t}\n\treturn r\n}", "func (b *Bag) difference(c Bag) Bag {\n\tbag := make(Bag)\n\tfor k, vb := range *b {\n\t\tvc, ok := c[k]\n\t\tif ok {\n\t\t\tif vb > vc {\n\t\t\t\tbag[k] = vb - vc\n\t\t\t}\n\t\t} else {\n\t\t\tbag[k] = vb\n\t\t}\n\t}\n\treturn bag\n}", "func (o1 StructTestObject) Diff(o2 StructTestObject) metago.Diff {\n\tchgs := make([]metago.Chg, 0)\n\n\t{\n\t\tva, vb := o1.B, o2.B\n\t\tif !va.Equals(vb) {\n\t\t\tchgs = append(chgs, metago.NewStructChg(&StructTestObjectBSREF, va.Diff(vb)))\n\t\t}\n\t}\n\n\t{\n\t\tva, vb := o1.MB, o2.MB\n\t\tfor key, va1 := range va {\n\t\t\tif vb1, ok := vb[key]; ok {\n\t\t\t\t// \"key\" exists in both \"va\" and \"vb\"\n\t\t\t\tchgs1 := make([]metago.Chg, 0)\n\t\t\t\tif !va1.Equals(vb1) {\n\t\t\t\t\tchgs1 = append(chgs1, metago.NewStructChg(&StructTestObjectMBSREF, va1.Diff(vb1)))\n\t\t\t\t}\n\t\t\t\tif len(chgs1) != 0 {\n\t\t\t\t\tchgs = append(chgs, metago.NewIntMapChg(&StructTestObjectMBSREF, key, metago.ChangeTypeModify, chgs1))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// \"key\" exists in \"va\" but not in \"vb\"\n\t\t\t\tchgs1 := make([]metago.Chg, 0)\n\t\t\t\tt := BasicAttrTypesObject{}\n\t\t\t\tchgs1 = append(chgs1, metago.NewStructChg(&StructTestObjectMBSREF, va1.Diff(t)))\n\t\t\t\tif len(chgs1) != 0 {\n\t\t\t\t\tchgs = append(chgs, metago.NewIntMapChg(&StructTestObjectMBSREF, key, metago.ChangeTypeDelete, chgs1))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor key, vb1 := range vb {\n\t\t\tif _, ok := va[key]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// \"key\" exists in vb but not int va\"\n\t\t\tchgs1 := make([]metago.Chg, 0)\n\t\t\tt := BasicAttrTypesObject{}\n\t\t\tchgs1 = append(chgs1, metago.NewStructChg(&StructTestObjectMBSREF, t.Diff(vb1)))\n\t\t\tif len(chgs1) != 0 {\n\t\t\t\tchgs = append(chgs, metago.NewIntMapChg(&StructTestObjectMBSREF, key, metago.ChangeTypeInsert, chgs1))\n\t\t\t}\n\t\t}\n\t}\n\treturn metago.Diff{Chgs: chgs}\n}", "func (s *Uint64) Diff(other Uint64) Uint64 {\n\tres := NewUint64WithSize(s.Size())\n\n\tfor val := range s.m {\n\t\tif !other.Contains(val) {\n\t\t\tres.Add(val)\n\t\t}\n\t}\n\treturn res\n}", "func (i *Index) Diff(index *Index) *Index {\n\tdiff := &Index{}\n\n\tobjects := i.buildMap()\n\n\tfor _, object := range index.Objects {\n\t\tright, ok := objects[object.Key]\n\n\t\tif !ok || object.Tags != right.Tags {\n\t\t\tdiff.Add(object.Key, object.Tags, object.Filepath, !ok)\n\t\t}\n\t}\n\n\treturn diff\n}", "func (s *Set) Diff(s2 *Set) *Set {\n\tnew := NewSet()\n\tfor k := range s.set {\n\t\t// Add all values in s that are not in s2\n\t\tif !s2.HasVal(k) {\n\t\t\tnew.AddVal(k)\n\t\t}\n\t}\n\treturn new\n}", "func (kvs KeyValues) Merge(other KeyValues) KeyValues {\n\tif len(other) == 0 {\n\t\treturn kvs.Clone()\n\t}\n\tm := kvs.Map()\n\tfor _, item := range other {\n\t\tm[item.Key] = item.Value\n\t}\n\tmerged := make(KeyValues, len(m))\n\tidx := 0\n\tfor key, value := range m {\n\t\tmerged[idx] = &protoMetricsV1.KeyValue{\n\t\t\tKey: key,\n\t\t\tValue: value,\n\t\t}\n\t\tidx++\n\t}\n\tsort.Sort(merged)\n\treturn merged\n}", "func (s *SeriesIDSet) Diff(other *SeriesIDSet) {\n\tother.RLock()\n\tdefer other.RUnlock()\n\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.bitmap = roaring.AndNot(s.bitmap, other.bitmap)\n}", "func (p1 PortInfoMap) Difference(p2 PortInfoMap) PortInfoMap {\n\tresult := make(PortInfoMap)\n\tfor svcPort, p1PortInfo := range p1 {\n\t\tp2PortInfo, ok := p2[svcPort]\n\t\tif ok && reflect.DeepEqual(p1[svcPort], p2PortInfo) {\n\t\t\tcontinue\n\t\t}\n\t\tresult[svcPort] = p1PortInfo\n\t}\n\treturn result\n}", "func (metrics Metrics) Diff(anotherMetrics Metrics) (MetricNameDiff, MetricLabelDiffs) {\n\tnamesDiff := []string{}\n\tlabelsDiff := MetricLabelDiffs{}\n\n\tfor metricName, metric := range metrics {\n\t\tanotherMetric, metricExists := anotherMetrics[metricName]\n\t\tif !metricExists {\n\t\t\tnamesDiff = append(namesDiff, metricName)\n\t\t\tcontinue\n\t\t}\n\n\t\tlabelNamesDiff := stringArrayDiff(metric.Labels, anotherMetric.Labels)\n\t\tif len(labelNamesDiff) > 0 {\n\t\t\tmetricLabelsDiff := MetricLabelDiff{\n\t\t\t\tMetricName: metricName,\n\t\t\t\tLabelDiff: labelNamesDiff,\n\t\t\t}\n\t\t\tlabelsDiff = append(labelsDiff, metricLabelsDiff)\n\t\t}\n\n\t}\n\n\treturn MetricNameDiff(namesDiff), labelsDiff\n}", "func Diff(left, right *unstructured.Unstructured) *DiffResult {\n\tvar leftObj, rightObj map[string]interface{}\n\tif left != nil {\n\t\tleftObj = left.Object\n\t}\n\tif right != nil {\n\t\trightObj = removeMapFields(leftObj, right.Object)\n\t}\n\tgjDiff := gojsondiff.New().CompareObjects(leftObj, rightObj)\n\tdr := DiffResult{\n\t\tDiff: gjDiff,\n\t\tModified: gjDiff.Modified(),\n\t}\n\treturn &dr\n}", "func diffVersionInfo(a, b map[string]string) (deleted, added map[string]string, changed map[string][2]string) {\n\t// from a to b\n\n\tdeleted = make(map[string]string)\n\tadded = make(map[string]string)\n\tchanged = make(map[string][2]string)\n\n\t// keys in a but not in b -> delete\n\tfor ka, va := range a {\n\t\tif vb, ok := b[ka]; !ok {\n\t\t\tdeleted[ka] = va\n\t\t} else {\n\t\t\tif va != vb {\n\t\t\t\tchanged[ka] = [2]string{va, vb}\n\t\t\t}\n\t\t}\n\t}\n\n\t// keys in b but not in a -> add\n\tfor kb, vb := range b {\n\t\tif _, ok := a[kb]; !ok {\n\t\t\tadded[kb] = vb\n\t\t}\n\t}\n\treturn deleted, added, changed\n}", "func (s *set) Diff(other Set) Set {\n\tr := NewSet()\n\ts.Foreach(func(x *Term) {\n\t\tif !other.Contains(x) {\n\t\t\tr.Add(x)\n\t\t}\n\t})\n\treturn r\n}", "func (ons *orderedNodeSet) difference(ons2 *orderedNodeSet) *orderedNodeSet {\n\tdiff := newOrderedNodeSet()\n\tif ons2 == nil {\n\t\treturn ons.clone()\n\t}\n\n\tfor _, k := range ons.elements() {\n\t\tif !ons2.contains(k){\n\t\t\tdiff.add(k)\n\t\t}\n\t}\n\n\treturn diff\n}", "func (mf Manifest) Diff(old Manifest) map[string]byte {\n\ta, b := mf.First(), old.First()\n\tif a.Vers == b.Vers && a.Name == b.Name {\n\t\treturn nil\n\t}\n\tm := make(map[string]*Version, len(old))\n\tfor i := range old {\n\t\tv := &old[i]\n\t\tm[v.Name] = v\n\t}\n\tres := make(map[string]byte, len(mf))\n\tfor _, v := range mf {\n\t\tw := m[v.Name]\n\t\tif w == nil {\n\t\t\tres[v.Name] = '+'\n\t\t\tcontinue\n\t\t}\n\t\tif v.Vers != w.Vers {\n\t\t\tres[v.Name] = '*'\n\t\t}\n\t\tdelete(m, v.Name)\n\t}\n\tfor k := range m {\n\t\tres[k] = '-'\n\t}\n\treturn res\n}", "func (s *Set) Difference(other *Set) *Set {\n\tnewSet := NewSizedSet(len(s.m))\n\tfor elem := range s.m {\n\t\tif _, found := other.m[elem]; !found {\n\t\t\tnewSet.m[elem] = SetNothing{}\n\t\t}\n\t}\n\treturn newSet\n}", "func Difference(one Set, other Set) Set {\n\tif other == nil && one == nil {\n\t\treturn nil\n\t}\n\tif other == nil {\n\t\treturn one.Copy()\n\t}\n\tif one == nil {\n\t\treturn nil\n\t}\n\toneLen := one.Len()\n\totherLen := other.Len()\n\tif oneLen == 0 || otherLen == 0{\n\t\treturn one.Copy()\n\t}\n\tcopyset := one.Copy()\n\tfor _, key := range copyset.Elements() {\n\t\tif other.Contains(key){\n\t\t\tcopyset.Remove(key)\n\t\t}\n\t}\n\treturn copyset\n}", "func (h *Histogram) Diff(old *Histogram) (*Histogram, error) {\n\tif h.Name != old.Name {\n\t\treturn nil, errors.Errorf(\"unmatched histogram, %s vs %s\", h.Name, old.Name)\n\t}\n\tif len(old.Buckets) > len(h.Buckets) {\n\t\treturn nil, errors.Errorf(\"old histogram has %d bucket(s), new only has %d\", len(old.Buckets), len(h.Buckets))\n\t}\n\n\tdiff := &Histogram{Name: h.Name, Sum: h.Sum - old.Sum}\n\toi := 0\n\tfor _, hb := range h.Buckets {\n\t\t// If we've already looked at all of the old buckets, copy the new bucket over.\n\t\tif oi >= len(old.Buckets) {\n\t\t\tdiff.Buckets = append(diff.Buckets, hb)\n\t\t\tcontinue\n\t\t}\n\n\t\tob := old.Buckets[oi]\n\n\t\tswitch {\n\t\tcase ob.Min < hb.Min:\n\t\t\t// The old histogram shouldn't contain any buckets that aren't in the new one.\n\t\t\treturn nil, errors.Errorf(\"bucket [%d,%d) is present in old histogram but not new one\", ob.Min, ob.Max)\n\t\tcase ob.Min > hb.Min:\n\t\t\t// If this bucket isn't present in the old histogram, just copy it over.\n\t\t\tif ob.Min < hb.Max {\n\t\t\t\treturn nil, errors.Errorf(\"old bucket [%d,%d) overlaps new bucket [%d,%d)\", ob.Min, ob.Max, hb.Min, hb.Max)\n\t\t\t}\n\t\t\tdiff.Buckets = append(diff.Buckets, hb)\n\t\tcase ob.Min == hb.Min:\n\t\t\t// If we're looking at the same bucket in both histograms, save the difference (if any) and move to the next old bucket.\n\t\t\tif ob.Max != hb.Max {\n\t\t\t\treturn nil, errors.Errorf(\"old bucket [%d,%d) doesn't match new bucket [%d,%d)\", ob.Min, ob.Max, hb.Min, hb.Max)\n\t\t\t}\n\t\t\tif hb.Count < ob.Count {\n\t\t\t\treturn nil, errors.Errorf(\"old bucket [%d,%d) has count %d, new only has %d\", ob.Min, ob.Max, ob.Count, hb.Count)\n\t\t\t} else if hb.Count > ob.Count {\n\t\t\t\tdiff.Buckets = append(diff.Buckets, HistogramBucket{hb.Min, hb.Max, hb.Count - ob.Count})\n\t\t\t}\n\t\t\toi++\n\t\t}\n\t}\n\treturn diff, nil\n}", "func (s *IntSet) Difference(s2 *IntSet) *IntSet {\n\tout := new(IntSet)\n\tout.Clear()\n\n\tfor key := range s.elem {\n\t\tout.Add(key)\n\t}\n\n\tfor key := range s2.elem {\n\t\tout.Remove(key)\n\t}\n\n\treturn out\n}", "func Difference(s1, s2 Set) Set {\n\ttoReturn := New()\n\tfor k := range s1 {\n\t\tif _, ok := s2[k]; !ok {\n\t\t\ttoReturn.Add(k)\n\t\t}\n\t}\n\treturn toReturn\n}", "func (t *Tree) Diff(other ITree) []IContent {\n\tif bytes.Equal(t.Hash(), other.Hash()) {\n\t\treturn nil\n\t}\n\n\treturn t.rootNode.diff(other.getNode())\n}", "func (set *AppleSet) Difference(other *AppleSet) *AppleSet {\n\tif set == nil {\n\t\treturn nil\n\t}\n\n\tif other == nil {\n\t\treturn set\n\t}\n\n\tdifferencedSet := NewAppleSet()\n\n\tset.s.RLock()\n\tother.s.RLock()\n\tdefer set.s.RUnlock()\n\tdefer other.s.RUnlock()\n\n\tfor v := range set.m {\n\t\tif !other.Contains(v) {\n\t\t\tdifferencedSet.doAdd(v)\n\t\t}\n\t}\n\n\treturn differencedSet\n}", "func (t Tags) Equal(other Tags) bool {\n\tif len(t.Values()) != len(other.Values()) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(t.Values()); i++ {\n\t\tequal := t.values[i].Name.Equal(other.values[i].Name) &&\n\t\t\tt.values[i].Value.Equal(other.values[i].Value)\n\t\tif !equal {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (oss *OrderedStringSet) Difference(setB *OrderedStringSet) *OrderedStringSet {\n\tdiff := NewOrderedStringSet()\n\tif setB == nil {\n\t\treturn oss.Clone()\n\t}\n\n\tfor _, m := range oss.Elements() {\n\t\tif !setB.Contains(m) {\n\t\t\tdiff.Add(m)\n\t\t}\n\t}\n\n\treturn diff\n}", "func (s *Int64) Diff(other Int64) Int64 {\n\tres := NewInt64WithSize(s.Size())\n\n\tfor val := range s.m {\n\t\tif !other.Contains(val) {\n\t\t\tres.Add(val)\n\t\t}\n\t}\n\treturn res\n}", "func Difference(a, b Set) Set {\n\treturn a.Where(func(v Value) bool { return !b.Has(v) })\n}", "func (set KeySet) Difference(sset KeySet) KeySet {\n\tnset := make(KeySet)\n\n\tfor k := range set {\n\t\tif _, ok := sset[k]; !ok {\n\t\t\tnset.Add(k)\n\t\t}\n\t}\n\n\treturn nset\n}", "func (d *DeltaStrSet) Diff(l []string) *DeltaStrSet {\n\t// clone and initialize values to false for old\n\t// false means deleted, true means added\n\tdiff := &DeltaStrSet{\n\t\titems: make(map[string]bool, len(d.items)),\n\t}\n\n\tfor k := range d.items {\n\t\tdiff.items[k] = false\n\t}\n\n\t// remove items that are in l and d.\n\t// add items that are only in l with value set to true\n\n\tfor _, k := range l {\n\t\tif _, found := diff.items[k]; found {\n\t\t\tdelete(diff.items, k)\n\t\t} else {\n\t\t\tdiff.items[k] = true\n\t\t}\n\t}\n\n\treturn diff\n}", "func GetDictDiff(reference map[string]interface{}, input map[string]interface{}) map[string]interface{} {\n\tret := make(map[string]interface{})\n\tfor k, v := range input {\n\t\tif rv, ok := reference[k]; ok {\n\t\t\treplace, nv := getValueDiff(rv, v)\n\t\t\tif replace {\n\t\t\t\tret[k] = nv\n\t\t\t}\n\t\t} else {\n\t\t\t// If the value does not exist in reference, it's new, and we\n\t\t\t// should include it.\n\t\t\tret[k] = v\n\t\t}\n\t}\n\n\treturn ret\n}", "func (set Set) Diff(ctx context.Context, keys ...string) ([]string, error) {\n\treq := newRequestSize(2+len(keys), \"\\r\\n$5\\r\\nSDIFF\\r\\n$\")\n\treq.addStringAndStrings(set.name, keys)\n\treturn set.c.cmdStrings(ctx, req)\n}", "func differentKeys(oldMap, newMap map[string]string) []string {\n\tvar differentKeys []string\n\tfor k := range oldMap {\n\t\tif _, exist := newMap[k]; !exist {\n\t\t\tdifferentKeys = append(differentKeys, k)\n\t\t}\n\t}\n\n\treturn differentKeys\n}", "func SetDiff(old, new Set) (add, del Set) {\n\tadd, del = Set{}, Set{}\n\tfor k := range new {\n\t\tadd.Push(k)\n\t}\n\tfor k := range old {\n\t\tdel.Push(k)\n\t}\n\tfor k := range add {\n\t\tif _, ok := del[k]; ok {\n\t\t\tdelete(add, k)\n\t\t\tdelete(del, k)\n\t\t}\n\t}\n\treturn\n}", "func (v *ClassValue) Difference(o Value) Value {\n\ta, b := v, o.(*ClassValue)\n\tout := &ClassValue{Class: v.Class, Fields: make(map[string]Value, len(v.Fields))}\n\tfor n, f := range a.Fields {\n\t\tout.Fields[n] = f.Difference(b.Fields[n])\n\t}\n\treturn out\n}", "func AttributesDiff(changeList *[]ChangeInstruction, from, to *html.Node) {\n\totherAttrs := AttrMapFromNode(to)\n\tattrs := AttrMapFromNode(from)\n\n\t// Now iterate through the attributes in otherEl\n\tfor name, otherValue := range otherAttrs {\n\t\tvalue, found := attrs[name]\n\t\tif !found || value != otherValue {\n\n\t\t\tscopeID, _ := ScopeOfNode(from)\n\t\t\t*changeList = append(*changeList, ChangeInstruction{\n\t\t\t\tType: \"SET_ATTR\",\n\t\t\t\tScopeID: scopeID,\n\t\t\t\tElement: SelectorFromNode(from),\n\t\t\t\tAttr: Attr{\n\t\t\t\t\tName: name,\n\t\t\t\t\tValue: otherValue,\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t}\n\n\tfor attrName := range attrs {\n\t\tif _, found := otherAttrs[attrName]; !found {\n\n\t\t\tscopeID, _ := ScopeOfNode(from)\n\t\t\t*changeList = append(*changeList, ChangeInstruction{\n\t\t\t\tType: \"REMOVE_ATTR\",\n\t\t\t\tScopeID: scopeID,\n\t\t\t\tElement: SelectorFromNode(from),\n\t\t\t\tAttr: Attr{\n\t\t\t\t\tName: attrName,\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t}\n\n}", "func (bst *BST) Difference(other *BST) []interface{} {\n\treturn set_operation(bst.Root, other.Root, setDiff)\n}", "func (ss *StringSet) Difference(other *StringSet) *StringSet {\n\tdiff := NewStringSet()\n\tif other == nil {\n\t\tfor k := range ss.members {\n\t\t\tdiff.Add(k)\n\t\t}\n\t\treturn diff\n\t}\n\n\tfor k := range ss.members {\n\t\tif !other.Contains(k) {\n\t\t\tdiff.Add(k)\n\t\t}\n\t}\n\treturn diff\n}", "func enforcedSetDifference(a map[string]ruleset.EnforceChange, b map[string]interface{}) map[string]interface{} {\n\tresult := make(map[string]interface{})\n\tfor k, v := range a {\n\t\tif _, ok := b[k]; !ok {\n\t\t\tresult[k] = v\n\t\t}\n\t}\n\n\treturn result\n}", "func mergeResourceTags(one map[string]string, two map[string]*string) (map[string]*string, bool) {\n\tmergedResourceTags := map[string]*string{}\n\t// \"Copy\" two into a new map.\n\tfor key, value := range two {\n\t\tmergedResourceTags[key] = to.Ptr(*value)\n\t}\n\tchanged := false\n\t// Merge \"one\" into the new map, setting changed if we had to make a modification,\n\t// this is for determining whether we needed to make an update to the existing tags.\n\tfor key, value := range one {\n\t\tval, ok := mergedResourceTags[key]\n\t\tif !ok || *val != value {\n\t\t\tmergedResourceTags[key] = to.Ptr(value)\n\t\t\tchanged = true\n\t\t}\n\t}\n\treturn mergedResourceTags, changed\n}", "func copyTags(tags map[string]string) map[string]string {\n\tnewTags := make(map[string]string)\n\tfor k, v := range tags {\n\t\tnewTags[k] = v\n\t}\n\treturn newTags\n}", "func (s *StrSet) Difference(o *StrSet) StrSet {\n\tn := NewStr()\n\tfor el := range s.els {\n\t\tif _, ok := o.els[el]; !ok {\n\t\t\tn.Add(el)\n\t\t}\n\t}\n\treturn n\n}", "func (counts ProcessCounts) Diff(currentCounts ProcessCounts) map[ProcessClass]int64 {\n\tdiff := make(map[ProcessClass]int64)\n\tdesiredValue := reflect.ValueOf(counts)\n\tcurrentValue := reflect.ValueOf(currentCounts)\n\tfor label, index := range processClassIndices {\n\t\tdesired := desiredValue.Field(index).Int()\n\t\tcurrent := currentValue.Field(index).Int()\n\t\tif (desired > 0 || current > 0) && desired != current {\n\t\t\tdiff[label] = desired - current\n\t\t}\n\t}\n\n\treturn diff\n}", "func (mm Uint64Uint64Map) Equals(other Uint64Uint64Map) bool {\n\tif mm.Size() != other.Size() {\n\t\treturn false\n\t}\n\tfor k, v1 := range mm {\n\t\tv2, found := other[k]\n\t\tif !found || v1 != v2 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (s *Set) Difference(s1 *Set) *Set {\n\ts2 := NewSet()\n\tfor _, val := range s.elements {\n\t\tadd := true\n\t\tfor _, val1 := range s1.elements {\n\t\t\tif val == val1 {\n\t\t\t\tadd = false\n\t\t\t}\n\t\t}\n\t\tif add {\n\t\t\ts2.Add(val)\n\t\t}\n\t}\n\treturn s2\n}", "func Diff(x, y []checker.Pair) (dr DiffResult, err error) {\n\tvar i, j int\n\tvar diffs []DiffLine\n\txn, yn := len(x), len(y)\n\tfor i < xn && j < yn {\n\t\tif x[i].Key == y[j].Key {\n\t\t\tif x[i].Value == y[j].Value {\n\t\t\t\tdiffs = append(diffs, DiffLine{T: EQUAL, Left: x[i], Right: y[j]})\n\t\t\t} else {\n\t\t\t\tdiffs = append(diffs, DiffLine{T: DIFFERENT, Left: x[i], Right: y[j]})\n\t\t\t}\n\t\t\ti++\n\t\t\tj++\n\t\t} else {\n\t\t\tif x[i].Key < y[j].Key {\n\t\t\t\tdiffs = append(diffs, DiffLine{T: LEFTNEW, Left: x[i]})\n\t\t\t\ti++\n\t\t\t} else {\n\t\t\t\tdiffs = append(diffs, DiffLine{T: RIGHTNEW, Right: y[j]})\n\t\t\t\tj++\n\t\t\t}\n\t\t}\n\t}\n\t// drain longer list\n\tfor ; i < xn; i++ {\n\t\tdiffs = append(diffs, DiffLine{T: LEFTNEW, Left: x[i]})\n\t}\n\tfor ; j < yn; j++ {\n\t\tdiffs = append(diffs, DiffLine{T: RIGHTNEW, Right: y[j]})\n\t}\n\tdr = DiffResult{Diffs: diffs}\n\treturn dr, err\n}", "func (s *ConcurrentSet) Difference(others ...Set) Set {\n\tvar n sync.Map\n\tsize := uint32(0)\n\n\ts.hash.Range(func(k, v interface{}) bool {\n\t\texistAny := false\n\t\tfor _, set := range others {\n\t\t\tif set.Contains(k) {\n\t\t\t\texistAny = true\n\t\t\t}\n\t\t}\n\t\tif !existAny {\n\t\t\tn.Store(k, nothing{})\n\t\t\tsize++\n\t\t}\n\t\treturn true\n\t})\n\n\treturn &ConcurrentSet{n, size}\n}", "func DiffWithValues(firstJson map[string]interface{}, secondJson map[string]interface{}) (diff []string, firstValues map[string]interface{}, secondValues map[string]interface{}) {\n\tdiff = Diff(firstJson, secondJson)\n\tfirstValues = keepDiffFields(firstJson, diff)\n\tsecondValues = keepDiffFields(secondJson, diff)\n\treturn diff, firstValues, secondValues\n}", "func Diff(a, b []string) (diff []string) {\n\tfor _, e := range a {\n\t\tif !Contains(b, e) {\n\t\t\tdiff = append(diff, e)\n\t\t}\n\t}\n\treturn\n}", "func (c StringArrayCollection) Diff(m interface{}) Collection {\n\tms := m.([]string)\n\tvar d = make([]string, 0)\n\tfor _, value := range c.value {\n\t\texist := false\n\t\tfor i := 0; i < len(ms); i++ {\n\t\t\tif ms[i] == value {\n\t\t\t\texist = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !exist {\n\t\t\td = append(d, value)\n\t\t}\n\t}\n\treturn StringArrayCollection{\n\t\tvalue: d,\n\t}\n}", "func (s Set) Subtract(other Set) {\n\tfor item := range other {\n\t\tif s.Has(item) {\n\t\t\ts.Remove(item)\n\t\t}\n\t}\n}", "func DiffKeys(lhs, rhs *tomledit.Document) []Diff {\n\tdiff := diffSections(lhs.Global, rhs.Global)\n\n\tlsec, rsec := lhs.Sections, rhs.Sections\n\ttransform.SortSectionsByName(lsec)\n\ttransform.SortSectionsByName(rsec)\n\n\ti, j := 0, 0\n\tfor i < len(lsec) && j < len(rsec) {\n\t\tif lsec[i].Name.Before(rsec[j].Name) {\n\t\t\tdiff = append(diff, Diff{Type: Section, Deleted: true, KV: KV{Key: lsec[i].Name.String()}})\n\t\t\tfor _, kv := range allKVs(lsec[i]) {\n\t\t\t\tdiff = append(diff, Diff{Type: Mapping, Deleted: true, KV: kv})\n\t\t\t}\n\t\t\ti++\n\t\t} else if rsec[j].Name.Before(lsec[i].Name) {\n\t\t\tdiff = append(diff, Diff{Type: Section, KV: KV{Key: rsec[j].Name.String()}})\n\t\t\tfor _, kv := range allKVs(rsec[j]) {\n\t\t\t\tdiff = append(diff, Diff{Type: Mapping, KV: kv})\n\t\t\t}\n\t\t\tj++\n\t\t} else {\n\t\t\tdiff = append(diff, diffSections(lsec[i], rsec[j])...)\n\t\t\ti++\n\t\t\tj++\n\t\t}\n\t}\n\tfor ; i < len(lsec); i++ {\n\t\tdiff = append(diff, Diff{Type: Section, Deleted: true, KV: KV{Key: lsec[i].Name.String()}})\n\t\tfor _, kv := range allKVs(lsec[i]) {\n\t\t\tdiff = append(diff, Diff{Type: Mapping, Deleted: true, KV: kv})\n\t\t}\n\t}\n\tfor ; j < len(rsec); j++ {\n\t\tdiff = append(diff, Diff{Type: Section, KV: KV{Key: rsec[j].Name.String()}})\n\t\tfor _, kv := range allKVs(rsec[j]) {\n\t\t\tdiff = append(diff, Diff{Type: Mapping, KV: kv})\n\t\t}\n\t}\n\n\treturn diff\n}", "func (set ModuleIdentifierSet) Difference(other ModuleIdentifierSet) (c ModuleIdentifierSet) {\n\t// copy internal slices and sort them\n\ta := ModuleIdentifierSet{identifiers: set.Identifiers()}\n\tb := ModuleIdentifierSet{identifiers: other.Identifiers()}\n\tsort.Sort(a)\n\tsort.Sort(b)\n\n\tlengthA, lengthB := a.Len(), b.Len()\n\tvar indexA, indexB int\n\tfor indexA < lengthA && indexB < lengthB {\n\t\tif a.identifiers[indexA] == b.identifiers[indexB] {\n\t\t\tindexA++\n\t\t\tindexB++\n\t\t\tcontinue\n\t\t}\n\t\tif a.identifiers[indexA] < b.identifiers[indexB] {\n\t\t\t// append from the first set\n\t\t\tc.Append(a.identifiers[indexA])\n\t\t\tindexA++\n\t\t\tcontinue\n\t\t}\n\t\t// append from the second set\n\t\tc.Append(b.identifiers[indexB])\n\t\tindexB++\n\t}\n\t// append all remaining ones\n\tfor indexA < lengthA {\n\t\tc.Append(a.identifiers[indexA])\n\t\tindexA++\n\t}\n\tfor indexB < lengthB {\n\t\tc.Append(b.identifiers[indexB])\n\t\tindexB++\n\t}\n\t// sort our complement and return\n\tsort.Sort(c)\n\treturn\n}", "func difference(a, b []string) []string {\n\tmb := make(map[string]struct{}, len(b))\n\tfor _, x := range b {\n\t\tmb[x] = struct{}{}\n\t}\n\tvar diff []string\n\tfor _, x := range a {\n\t\tif _, found := mb[x]; !found {\n\t\t\tdiff = append(diff, x)\n\t\t}\n\t}\n\treturn diff\n}", "func (m1 Map) Equal(m2 Map) bool {\n\tif len(m1) != len(m2) {\n\t\treturn false\n\t}\n\n\tfor k, v := range m1 {\n\t\tif w, ok := m2[k]; !ok || v != w {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (attr SGRAttr) Diff(other SGRAttr) SGRAttr {\n\tif other&SGRAttrClear != 0 {\n\t\treturn other\n\t}\n\n\tconst (\n\t\tfgMask = sgrAttrFGSet | (sgrColorMask << sgrFGShift)\n\t\tbgMask = sgrAttrBGSet | (sgrColorMask << sgrBGShift)\n\t)\n\n\tvar (\n\t\tattrFlags = attr & SGRAttrMask\n\t\totherFlags = other & SGRAttrMask\n\t\tchangedFlags = attrFlags ^ otherFlags\n\t\tgoneFlags = attrFlags & changedFlags\n\t\tattrFG = attr & fgMask\n\t\tattrBG = attr & bgMask\n\t\totherFG = other & fgMask\n\t\totherBG = other & bgMask\n\t)\n\n\tif goneFlags != 0 ||\n\t\t(otherFG == 0 && attrFG != 0) ||\n\t\t(otherBG == 0 && attrBG != 0) {\n\t\tother |= SGRAttrClear\n\t\treturn other\n\t}\n\n\tdiff := otherFlags & changedFlags\n\tif otherFG != attrFG {\n\t\tdiff |= otherFG\n\t}\n\tif otherBG != attrBG {\n\t\tdiff |= otherBG\n\t}\n\treturn diff\n}", "func (t Tags) Copy() Tags {\n\tret := Tags{}\n\n\tfor k, v := range t {\n\t\tret[k] = v\n\t}\n\n\treturn ret\n}", "func Diff(left, right []string) (leftOnly, rightOnly []string) {\n\treturn CollectDifferent(left, right, GetAll, GetAll)\n}", "func mergeTags(generalTags []*tag, infraTags []*tag) []*tag {\n\tvar dupMap = make(map[string]bool)\n\tfor _, tag := range generalTags {\n\t\tdupMap[tag.key] = true\n\t}\n\tfor _, tag := range infraTags {\n\t\tif _, exists := dupMap[tag.key]; !exists {\n\t\t\tgeneralTags = append(generalTags, tag)\n\t\t}\n\t}\n\treturn generalTags\n}", "func ParameterDiff(old, new map[string]string) map[string]string {\n\tdiff := make(map[string]string)\n\n\tfor key, val := range old {\n\t\t// If a parameter was removed in the new spec\n\t\tif _, ok := new[key]; !ok {\n\t\t\tdiff[key] = val\n\t\t}\n\t}\n\n\tfor key, val := range new {\n\t\t// If new spec parameter was added or changed\n\t\tif v, ok := old[key]; !ok || v != val {\n\t\t\tdiff[key] = val\n\t\t}\n\t}\n\n\treturn diff\n}", "func (s Set) Difference(t Set) Set {\n\td := s.Clone()\n\tfor n := range t {\n\t\tdelete(d, n)\n\t}\n\treturn d\n}", "func BenchmarkMergeTagSets(b *testing.B) {\n\tif testing.Short() {\n\t\tb.Skip(\"short test\")\n\t}\n\tt1 := make(TagSet, 10)\n\tt2 := make(TagSet, 10)\n\tfor i := 0; i < len(t1); i++ {\n\t\tt1[i] = Tag{\n\t\t\tKey: fmt.Sprintf(\"k1%d\", i),\n\t\t\tValue: fmt.Sprintf(\"v1_%d\", i),\n\t\t}\n\t\tt2[i] = Tag{\n\t\t\tKey: fmt.Sprintf(\"k2%d\", i),\n\t\t\tValue: fmt.Sprintf(\"v2_%d\", i),\n\t\t}\n\t}\n\tt1.Sort()\n\tt2.Sort()\n\n\tscratch := make(TagSet, len(t1)+len(t2))\n\n\tb.ResetTimer()\n\tb.Run(\"KeysNotEqual\", func(b *testing.B) {\n\t\tfor size := 2; size <= 10; size += 2 {\n\t\t\tb.Run(fmt.Sprint(size), func(b *testing.B) {\n\t\t\t\ts1 := t1[:size]\n\t\t\t\ts2 := t2[:size]\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\tmergeTagSets(s1, s2, scratch)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tb.Run(\"KeysHalfEqual\", func(b *testing.B) {\n\t\tfor i := range t2 {\n\t\t\tif i&1 != 0 {\n\t\t\t\tt2[i].Key = t1[i].Key\n\t\t\t}\n\t\t}\n\t\tt2.Sort()\n\t\tfor size := 2; size <= 10; size += 2 {\n\t\t\tb.Run(fmt.Sprint(size), func(b *testing.B) {\n\t\t\t\ts1 := t1[:size]\n\t\t\t\ts2 := t2[:size]\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\tmergeTagSets(s1, s2, scratch)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tb.Run(\"KeysEqual\", func(b *testing.B) {\n\t\tfor i := range t2 {\n\t\t\tt2[i].Key = t1[i].Key\n\t\t}\n\t\tfor size := 2; size <= 10; size += 2 {\n\t\t\tb.Run(fmt.Sprint(size), func(b *testing.B) {\n\t\t\t\ts1 := t1[:size]\n\t\t\t\ts2 := t2[:size]\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\tmergeTagSets(s1, s2, scratch)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n}", "func (s *Set) DifferenceUpdate(other *Set) *Set {\n\tm := s.m\n\tfor elem := range other.m {\n\t\tdelete(m, elem)\n\t}\n\treturn s\n}", "func MergeTags(generalTags []*Tag, infraTags []*Tag) []*Tag {\n\tvar dupMap = make(map[string]bool)\n\tfor _, tag := range generalTags {\n\t\tdupMap[tag.Key] = true\n\t}\n\tfor _, tag := range infraTags {\n\t\tif _, exists := dupMap[tag.Key]; !exists {\n\t\t\tgeneralTags = append(generalTags, tag)\n\t\t}\n\t}\n\treturn generalTags\n}", "func uniqueTagsWithSeen(seen map[string]struct{}, t1 gostatsd.Tags, t2 gostatsd.Tags) gostatsd.Tags {\n\tlast := len(t1)\n\tfor idx := 0; idx < last; {\n\t\ttag := t1[idx]\n\t\tif _, ok := seen[tag]; ok {\n\t\t\tlast--\n\t\t\tt1[idx] = t1[last]\n\t\t\tt1 = t1[:last]\n\t\t} else {\n\t\t\tseen[tag] = present\n\t\t\tidx++\n\t\t}\n\t}\n\n\tfor _, tag := range t2 {\n\t\tif _, ok := seen[tag]; !ok {\n\t\t\tt1 = append(t1, tag)\n\t\t}\n\t}\n\n\treturn t1\n}", "func (this *ExDomain) Difference(domain Domain) Domain {\n\tnewDomain := CreateExDomain()\n\tdomainValues := domain.Values_asMap()\n\tfor key := range this.Values {\n\t\tif !domainValues[key] {\n\t\t\tnewDomain.Add(key)\n\t\t}\n\t}\n\n\tfor key := range domainValues {\n\t\tif !this.Values[key] {\n\t\t\tnewDomain.Add(key)\n\t\t}\n\t}\n\treturn newDomain\n}", "func (d *Release) Diff() map[string]*Changes {\n\tm := d.merge()\n\tif len(m) == 0 {\n\t\treturn nil\n\t}\n\t// Builds the list of available prefixes of deploy keys\n\t// with project name and env values.\n\tprefix := make([]string, 0)\n\tfor _, ev1 := range d.env1 {\n\t\tfor _, ev2 := range d.env2 {\n\t\t\tpid := string(d.ref.Key())\n\t\t\tprefix = append(prefix, Key(pid, ev1, ev2)+\"_\")\n\t\t}\n\t}\n\t// Returns only the name of the variable from the deploy key.\n\tname := func(s string) string {\n\t\tfor _, v := range prefix {\n\t\t\tif strings.HasPrefix(s, v) {\n\t\t\t\treturn strings.TrimPrefix(s, v)\n\t\t\t}\n\t\t}\n\t\tpanic(\"deploy: fails to find prefix in deploy key named: \" + s)\n\t}\n\tc := make(map[string]*Changes)\n\tfor k, v := range d.dep {\n\t\tn := name(k)\n\t\tif _, ok := c[n]; !ok {\n\t\t\tc[n] = &Changes{Var: n, Log: make(map[string][2]interface{})}\n\t\t}\n\t\tcv := d.dst[k]\n\t\tc[n].Log[k] = change(cv, v)\n\t}\n\treturn c\n}", "func (set Int64Set) Difference(other Int64Set) Int64Set {\n\tdifferencedSet := NewInt64Set()\n\tfor v := range set {\n\t\tif !other.Contains(v) {\n\t\t\tdifferencedSet.doAdd(v)\n\t\t}\n\t}\n\treturn differencedSet\n}", "func TestHashMapEqual(t *T) {\n\t// Degenerate case\n\thm1, hm2 := NewHashMap(), NewHashMap()\n\tassert.Equal(t, true, hm1.Equal(hm2))\n\tassert.Equal(t, true, hm2.Equal(hm1))\n\n\t// False with different sizes\n\thm1, _ = hm1.Set(\"one\", 1)\n\tassert.Equal(t, false, hm1.Equal(hm2))\n\tassert.Equal(t, false, hm2.Equal(hm1))\n\n\t// False with same sizes\n\thm2, _ = hm2.Set(\"two\", 2)\n\tassert.Equal(t, false, hm1.Equal(hm2))\n\tassert.Equal(t, false, hm2.Equal(hm1))\n\n\t// Now true\n\thm1, _ = hm1.Set(\"two\", 2)\n\thm2, _ = hm2.Set(\"one\", 1)\n\tassert.Equal(t, true, hm1.Equal(hm2))\n\tassert.Equal(t, true, hm2.Equal(hm1))\n\n\t// False with embedded HashMap\n\thm1, _ = hm1.Set(NewHashMap().Set(\"three\", 3))\n\tassert.Equal(t, false, hm1.Equal(hm2))\n\tassert.Equal(t, false, hm2.Equal(hm1))\n\n\t// True with embedded set\n\thm2, _ = hm2.Set(NewHashMap().Set(\"three\", 3))\n\tassert.Equal(t, true, hm1.Equal(hm2))\n\tassert.Equal(t, true, hm2.Equal(hm1))\n\n\t// False with same key, different value\n\thm1, _ = hm1.Set(\"four\", 4)\n\thm2, _ = hm2.Set(\"four\", 5)\n\tassert.Equal(t, false, hm1.Equal(hm2))\n\tassert.Equal(t, false, hm2.Equal(hm1))\n}", "func Difference(a, b []string) []string {\n\tmb := make(map[string]struct{}, len(b))\n\tfor _, x := range b {\n\t\tmb[x] = struct{}{}\n\t}\n\tvar diff []string\n\tfor _, x := range a {\n\t\tif _, found := mb[x]; !found {\n\t\t\tdiff = append(diff, x)\n\t\t}\n\t}\n\treturn diff\n}", "func (b *Blade) Diff(blade *Blade) (differences []string) {\n\tif len(b.Nics) != len(blade.Nics) {\n\t\treturn []string{\"Number of Nics is different\"}\n\t}\n\n\tsort.Sort(byMacAddress(b.Nics))\n\tsort.Sort(byMacAddress(blade.Nics))\n\n\tsort.Sort(byDiskSerial(b.Disks))\n\tsort.Sort(byDiskSerial(blade.Disks))\n\n\tfor _, diff := range pretty.Diff(b, blade) {\n\t\tif !strings.Contains(diff, \"UpdatedAt.\") && !strings.Contains(diff, \"PowerKw\") && !strings.Contains(diff, \"TempC\") {\n\t\t\tdifferences = append(differences, diff)\n\t\t}\n\t}\n\n\treturn differences\n}", "func (p *plugin) concatTags(tags1 *structtag.Tags, tags2 *structtag.Tags) (*structtag.Tags, error) {\n\tif tags1.Len() == 0 {\n\t\treturn tags2, nil\n\t}\n\tif tags2.Len() == 0 {\n\t\treturn tags1, nil\n\t}\n\n\tfor _, t2 := range tags2.Tags() {\n\t\tvar found bool\n\t\tfor _, t1 := range tags1.Tags() {\n\t\t\tif t1.Key == t2.Key {\n\t\t\t\tif len(t1.Name) == 0 {\n\t\t\t\t\tt1.Name = t2.Name\n\t\t\t\t}\n\t\t\t\tif t1.Options == nil || len(t1.Options) == 0 {\n\t\t\t\t\tt1.Options = t2.Options\n\t\t\t\t}\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tvar err error\n\t\t\ts := tags1.String() + \" \" + t2.String()\n\t\t\ttags1, err = structtag.Parse(s)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to parse tags '%s': %s\", s, err.Error())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn tags1, nil\n}", "func compareMaps(t *testing.T, a, b map[string]string) {\n\tif len(a) != len(b) {\n\t\tt.Error(\"Maps different sizes\", a, b)\n\t}\n\tfor ka, va := range a {\n\t\tif vb, ok := b[ka]; !ok || va != vb {\n\t\t\tt.Error(\"Difference in key\", ka, va, b[ka])\n\t\t}\n\t}\n\tfor kb, vb := range b {\n\t\tif va, ok := a[kb]; !ok || vb != va {\n\t\t\tt.Error(\"Difference in key\", kb, vb, a[kb])\n\t\t}\n\t}\n}", "func Difference(arr1, arr2 []int64) (result []int64) {\n\thash := make(map[int64]struct{})\n\n\tfor _, item := range arr1 {\n\t\thash[item] = struct{}{}\n\t}\n\n\tfor _, item := range arr2 {\n\t\tif _, ok := hash[item]; !ok {\n\t\t\tresult = append(result, item)\n\t\t}\n\t}\n\n\treturn result\n}", "func (a Attributes) Equal(b Attributes) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor _, attr := range a {\n\t\tv := b.Value(attr.Key)\n\t\tif !bytes.Equal(v, attr.Value) {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor _, attr := range b {\n\t\tv := a.Value(attr.Key)\n\t\tif !bytes.Equal(v, attr.Value) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func EqualKeys(a, b VectorClock) bool {\n if len(a.Vector) != len(b.Vector) {\n return false\n }\n for k, _:= range a.Vector{\n _, exists := b.Vector[k]\n if exists == false{\n return false\n }\n }\n return true\n}", "func (c *collection) subtract(other *collection) *collection {\n\tret := newCollection(c.defaultNs, c.meta)\n\tfor k, v := range c.objects {\n\t\tif _, ok := other.objects[k]; !ok {\n\t\t\tret.objects[k] = v\n\t\t}\n\t}\n\treturn ret\n}", "func CompareItemsBetweenOldAndNew(feedOld *atom.Feed, feedNew *atom.Feed) []*atom.Entry {\r\n\titemList := []*atom.Entry{}\r\n\r\n\tfor _, item1 := range feedNew.Entry {\r\n\t\texists := false\r\n\t\tfor _, item2 := range feedOld.Entry {\r\n\t\t\tif item1.ID == item2.ID {\r\n\t\t\t\texists = true\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t\tif !exists {\r\n\t\t\titemList = append(itemList, item1)\r\n\t\t}\r\n\t}\r\n\treturn itemList\r\n}", "func (s *Set) Difference(other *Set) *Set {\n\treturn NewSet(s.difference(other)...)\n}", "func (r1 *csvTable) Diff(r2 rel.Relation) rel.Relation {\n\treturn rel.NewDiff(r1, r2)\n}", "func Difference[T comparable](list1 []T, list2 []T) ([]T, []T) {\n\tleft := []T{}\n\tright := []T{}\n\n\tseenLeft := map[T]struct{}{}\n\tseenRight := map[T]struct{}{}\n\n\tfor _, elem := range list1 {\n\t\tseenLeft[elem] = struct{}{}\n\t}\n\n\tfor _, elem := range list2 {\n\t\tseenRight[elem] = struct{}{}\n\t}\n\n\tfor _, elem := range list1 {\n\t\tif _, ok := seenRight[elem]; !ok {\n\t\t\tleft = append(left, elem)\n\t\t}\n\t}\n\n\tfor _, elem := range list2 {\n\t\tif _, ok := seenLeft[elem]; !ok {\n\t\t\tright = append(right, elem)\n\t\t}\n\t}\n\n\treturn left, right\n}", "func Difference[T comparable](list1 []T, list2 []T) ([]T, []T) {\n\tleft := []T{}\n\tright := []T{}\n\n\tseenLeft := map[T]struct{}{}\n\tseenRight := map[T]struct{}{}\n\n\tfor _, elem := range list1 {\n\t\tseenLeft[elem] = struct{}{}\n\t}\n\n\tfor _, elem := range list2 {\n\t\tseenRight[elem] = struct{}{}\n\t}\n\n\tfor _, elem := range list1 {\n\t\tif _, ok := seenRight[elem]; !ok {\n\t\t\tleft = append(left, elem)\n\t\t}\n\t}\n\n\tfor _, elem := range list2 {\n\t\tif _, ok := seenLeft[elem]; !ok {\n\t\t\tright = append(right, elem)\n\t\t}\n\t}\n\n\treturn left, right\n}", "func ExcludeTags(rii RegInvImage, excludedTags map[Tag]bool) RegInvImage {\n\tfiltered := make(RegInvImage)\n\tfor imageName, digestTags := range rii {\n\t\tfor digest, tags := range digestTags {\n\t\t\tfor _, tag := range tags {\n\t\t\t\tif _, excludeMe := excludedTags[tag]; excludeMe {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif filtered[imageName] == nil {\n\t\t\t\t\tfiltered[imageName] = make(DigestTags)\n\t\t\t\t}\n\t\t\t\tfiltered[imageName][digest] = append(\n\t\t\t\t\tfiltered[imageName][digest],\n\t\t\t\t\ttag)\n\t\t\t}\n\t\t}\n\t}\n\treturn filtered\n}", "func (s *Set[T]) Difference(set Set[T]) {\n\tfor elt := range set {\n\t\tdelete(*s, elt)\n\t}\n}", "func Diff(a, b interface{}) string {\n\td := &spew.ConfigState{Indent: \" \", DisableMethods: true, DisablePointerMethods: true, DisablePointerAddresses: true}\n\treturn diff.Diff(d.Sdump(a), d.Sdump(b))\n}", "func Diff(a, b interface{}) string {\n\td := &spew.ConfigState{Indent: \" \", DisableMethods: true, DisablePointerMethods: true, DisablePointerAddresses: true}\n\treturn diff.Diff(d.Sdump(a), d.Sdump(b))\n}", "func (p *ServiceDefinition) Diff(other ServiceDefinition) (ServiceDefinitionDiff, error) {\n\tvar err error\n\tvar apiDiff util.MapDiff\n\tvar pluginDiff util.MapDiff\n\n\tif apiDiff, err = p.diffApis(other); err != nil {\n\t\treturn ServiceDefinitionDiff{}, err\n\t}\n\n\tif pluginDiff, err = p.diffPlugins(other); err != nil {\n\t\treturn ServiceDefinitionDiff{}, err\n\t}\n\n\treturn ServiceDefinitionDiff{\n\t\tApis: apiDiff,\n\t\tPlugins: pluginDiff,\n\t}, nil\n}", "func Diff(A, B string) string {\n\taLines := strings.Split(A, \"\\n\")\n\tbLines := strings.Split(B, \"\\n\")\n\n\tchunks := DiffChunks(aLines, bLines)\n\n\tbuf := new(bytes.Buffer)\n\tfor _, c := range chunks {\n\t\tfor _, line := range c.Added {\n\t\t\tfmt.Fprintf(buf, \"+%s\\n\", line)\n\t\t}\n\t\tfor _, line := range c.Deleted {\n\t\t\tfmt.Fprintf(buf, \"-%s\\n\", line)\n\t\t}\n\t\tfor _, line := range c.Equal {\n\t\t\tfmt.Fprintf(buf, \" %s\\n\", line)\n\t\t}\n\t}\n\treturn strings.TrimRight(buf.String(), \"\\n\")\n}", "func difference(xs, ys []string) []string {\n\tm := make(map[string]struct{}, len(ys))\n\tfor _, y := range ys {\n\t\tm[y] = struct{}{}\n\t}\n\trs := make([]string, 0, len(ys))\n\tfor _, x := range xs {\n\t\tif _, ok := m[x]; !ok {\n\t\t\trs = append(rs, x)\n\t\t}\n\t}\n\treturn rs\n}", "func (e EdgeMetadatas) Merge(other EdgeMetadatas) EdgeMetadatas {\n\tcp := e.Copy()\n\tfor k, v := range other {\n\t\tcp[k] = cp[k].Merge(v)\n\t}\n\treturn cp\n}", "func Tags(v interface{}, key string) (map[string]string, error) {\n\treturn New(v).Tags(key)\n}", "func Diff(a, b string) []string {\n\topts := jsondiff.Options{\n\t\tAdded: jsondiff.Tag{Begin: \"{\\\"changed\\\":[\", End: \"]}\"},\n\t\tRemoved: jsondiff.Tag{Begin: \"{\\\"changed\\\":[\", End: \"]}\"},\n\t\tChanged: jsondiff.Tag{Begin: \"{\\\"changed\\\":[\", End: \"]}\"},\n\t\tChangedSeparator: \", \",\n\t\tIndent: \" \",\n\t}\n\n\tresult, comparedStr := jsondiff.Compare([]byte(a), []byte(b), &opts)\n\n\tif !(result == jsondiff.NoMatch || result == jsondiff.SupersetMatch) {\n\t\treturn nil\n\t}\n\n\treader := bufio.NewReader(bytes.NewReader([]byte(comparedStr)))\n\tdiffMap := make(map[string]bool)\n\tvar currentProperty string\n\tfor {\n\t\tstringRead, err := reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlog.Printf(\"Error finding difference in json strings %v\", err)\n\t\t}\n\t\tif yes, str := isNewTopLevelProp(stringRead); yes {\n\t\t\tcurrentProperty = str\n\t\t}\n\t\tif strings.Contains(stringRead, changePattern) {\n\t\t\tdiffMap[currentProperty] = true\n\t\t}\n\t}\n\treturn mapKeysToSlice(reflect.ValueOf(diffMap).MapKeys())\n}", "func (s Reload) Diff(t Reload, opts ...Options) map[string][]interface{} {\n\tdiff := make(map[string][]interface{})\n\tif s.ID != t.ID {\n\t\tdiff[\"ID\"] = []interface{}{s.ID, t.ID}\n\t}\n\n\tif s.ReloadTimestamp != t.ReloadTimestamp {\n\t\tdiff[\"ReloadTimestamp\"] = []interface{}{s.ReloadTimestamp, t.ReloadTimestamp}\n\t}\n\n\tif s.Response != t.Response {\n\t\tdiff[\"Response\"] = []interface{}{s.Response, t.Response}\n\t}\n\n\tif s.Status != t.Status {\n\t\tdiff[\"Status\"] = []interface{}{s.Status, t.Status}\n\t}\n\n\treturn diff\n}" ]
[ "0.7123335", "0.7117373", "0.6249632", "0.6098153", "0.6051251", "0.59984875", "0.59709567", "0.5913765", "0.59114486", "0.5909879", "0.58669716", "0.5807775", "0.57989705", "0.5741582", "0.5656751", "0.5651042", "0.5592614", "0.5587161", "0.5564429", "0.55570143", "0.5551818", "0.5489363", "0.5472568", "0.54646826", "0.54167086", "0.54131", "0.54102", "0.541014", "0.5408826", "0.53733623", "0.5372736", "0.535692", "0.5345552", "0.530661", "0.5293858", "0.52901435", "0.5287212", "0.52715564", "0.52555805", "0.52255446", "0.5206388", "0.5191445", "0.51869243", "0.5171722", "0.5163683", "0.51357687", "0.51350856", "0.512267", "0.5106602", "0.5103216", "0.5089676", "0.50874346", "0.5077523", "0.50745636", "0.506421", "0.5063543", "0.5060124", "0.5028106", "0.5018932", "0.5004849", "0.5001433", "0.49910754", "0.49840197", "0.4975334", "0.49589643", "0.49531883", "0.4950253", "0.4940815", "0.494029", "0.4928383", "0.49225864", "0.49151304", "0.49119803", "0.49066687", "0.49033883", "0.49028397", "0.4898778", "0.48934507", "0.48726398", "0.48713595", "0.48626417", "0.48623163", "0.48400882", "0.48337328", "0.48318952", "0.48220217", "0.48215848", "0.4819032", "0.4819032", "0.48163286", "0.48090684", "0.48058197", "0.48058197", "0.48020735", "0.4801138", "0.4786454", "0.4785868", "0.47820953", "0.4767743", "0.47664502" ]
0.6719608
2
AddLabels adds (and overwrites) the current labels with the ones passed in.
func (in Labels) AddLabels(other Labels) Labels { for key, value := range other { if in == nil { in = make(map[string]string, len(other)) } in[key] = value } return in }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func AddLabels(obj metav1.Object, additionalLabels map[string]string) {\n\tlabels := obj.GetLabels()\n\tif labels == nil {\n\t\tlabels = map[string]string{}\n\t\tobj.SetLabels(labels)\n\t}\n\tfor k, v := range additionalLabels {\n\t\tlabels[k] = v\n\t}\n}", "func (d *DeviceInfo) AddLabels(labels map[string]string) *DeviceInfo {\n\tif d.Labels == nil {\n\t\td.Labels = labels\n\t} else {\n\t\tfor k, v := range labels {\n\t\t\td.Labels[k] = v\n\t\t}\n\t}\n\treturn d\n}", "func (puo *PostUpdateOne) AddLabels(l ...*Label) *PostUpdateOne {\n\tids := make([]int, len(l))\n\tfor i := range l {\n\t\tids[i] = l[i].ID\n\t}\n\treturn puo.AddLabelIDs(ids...)\n}", "func (pu *PostUpdate) AddLabels(l ...*Label) *PostUpdate {\n\tids := make([]int, len(l))\n\tfor i := range l {\n\t\tids[i] = l[i].ID\n\t}\n\treturn pu.AddLabelIDs(ids...)\n}", "func AddLabels(o metav1.Object, labels map[string]string) {\n\tl := o.GetLabels()\n\tif l == nil {\n\t\to.SetLabels(labels)\n\t\treturn\n\t}\n\tfor k, v := range labels {\n\t\tl[k] = v\n\t}\n\to.SetLabels(l)\n}", "func (c *client) AddLabels(org, repo string, number int, labels ...string) error {\n\treturn c.AddLabelsWithContext(context.Background(), org, repo, number, labels...)\n}", "func (f *Filter) AddLabels(m map[string]string) {\n\tconst (\n\t\tmetricLabel = \"metric.label\"\n\t)\n\tlabels := []string{}\n\tfor label, value := range m {\n\t\tmetricLabel := fmt.Sprintf(\"%s.\\\"%s\\\"=\\\"%s\\\"\", metricLabel, label, value)\n\t\tlabels = append(labels, metricLabel)\n\t}\n\tf.add(strings.Join(labels, \" \"))\n}", "func AddLabels(cfg config.Config, vmPool *vmpool.VMPool) string {\n\tvar (\n\t\tlines strings.Builder\n\t\tregexpLabels = []*regexpLabel{}\n\t)\n\n\tif len(cfg.VMNameRegexpLabels) > 0 {\n\t\tregexpLabels = compileRegexpLabels(cfg.VMNameRegexpLabels)\n\t}\n\n\tfor _, vm := range vmPool.VMs {\n\t\tvar b strings.Builder\n\t\tfmt.Fprintf(&b, `%s_vms{name=%q,id=\"%d\",state=%q,lcm_state=%q,host=%q`,\n\t\t\tcfg.Exporter.Namespace, vm.Name, vm.ID, vm.State, vm.LCMState, vm.Node)\n\n\t\t// even if regexpLabels is empty, check length to avoid func call\n\t\tif len(cfg.VMNameRegexpLabels) > 0 {\n\t\t\tb.WriteString(AddVMNameRegexpLabels(vm, regexpLabels))\n\t\t}\n\n\t\tif len(cfg.UserTemplateLabels) > 0 {\n\t\t\tb.WriteString(AddUserTemplateLabels(vm, cfg.UserTemplateLabels))\n\t\t}\n\n\t\tb.WriteString(\"} 1\\n\")\n\t\tlines.WriteString(b.String())\n\t}\n\n\treturn lines.String()\n}", "func (o *Board) AddLabels(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Label) error {\n\tvar err error\n\tfor _, rel := range related {\n\t\tif insert {\n\t\t\trel.BoardID = o.ID\n\t\t\tif err = rel.Insert(ctx, exec, boil.Infer()); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"failed to insert into foreign table\")\n\t\t\t}\n\t\t} else {\n\t\t\tupdateQuery := fmt.Sprintf(\n\t\t\t\t\"UPDATE `labels` SET %s WHERE %s\",\n\t\t\t\tstrmangle.SetParamNames(\"`\", \"`\", 0, []string{\"board_id\"}),\n\t\t\t\tstrmangle.WhereClause(\"`\", \"`\", 0, labelPrimaryKeyColumns),\n\t\t\t)\n\t\t\tvalues := []interface{}{o.ID, rel.ID}\n\n\t\t\tif boil.DebugMode {\n\t\t\t\tfmt.Fprintln(boil.DebugWriter, updateQuery)\n\t\t\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t\t\t}\n\n\t\t\tif _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"failed to update foreign table\")\n\t\t\t}\n\n\t\t\trel.BoardID = o.ID\n\t\t}\n\t}\n\n\tif o.R == nil {\n\t\to.R = &boardR{\n\t\t\tLabels: related,\n\t\t}\n\t} else {\n\t\to.R.Labels = append(o.R.Labels, related...)\n\t}\n\n\tfor _, rel := range related {\n\t\tif rel.R == nil {\n\t\t\trel.R = &labelR{\n\t\t\t\tBoard: o,\n\t\t\t}\n\t\t} else {\n\t\t\trel.R.Board = o\n\t\t}\n\t}\n\treturn nil\n}", "func AddLabels(newLabels []string, issueURL, authToken string) (*http.Response, error) {\n\tlabelResponse, err := json.Marshal(map[string][]string{\n\t\t\"labels\": newLabels,\n\t})\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error marshalling labels to map[string][]string: %v\", err)\n\t}\n\n\t// converting labelResponse to bytes for making a new request\n\tresponseBody := bytes.NewBuffer(labelResponse)\n\n\turl := fmt.Sprintf(\"%s%s\", issueURL, \"/labels\")\n\n\trequest, err := http.NewRequest(\"POST\", url, responseBody)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error: Writing a new request with labels as bytes buffer: %v\", err)\n\t}\n\n\trequest.Header.Add(\"Authorization\", authToken)\n\trequest.Header.Add(\"Accept\", \"application/vnd.github.v3+json\")\n\n\treturn http.DefaultClient.Do(request)\n}", "func (mr *MockRepositoryClientMockRecorder) AddLabels(org, repo, number interface{}, labels ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{org, repo, number}, labels...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddLabels\", reflect.TypeOf((*MockRepositoryClient)(nil).AddLabels), varargs...)\n}", "func (mr *MockClientMockRecorder) AddLabels(org, repo, number interface{}, labels ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{org, repo, number}, labels...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddLabels\", reflect.TypeOf((*MockClient)(nil).AddLabels), varargs...)\n}", "func (issue *Issue) AddLabel(labels []string) error {\n\tfor i, val := range labels {\n\t\tlabels[i] = fmt.Sprintf(`{\"add\": \"%s\"}`, val)\n\t}\n\treturn updateLabelsHelper(labels, issue.Key)\n}", "func (issue *Issue) AddLabel(labels []string) error {\n\tfor i, val := range labels {\n\t\tlabels[i] = fmt.Sprintf(`{\"add\": \"%s\"}`, val)\n\t}\n\treturn updateLabelsHelper(labels, issue.Key)\n}", "func (p PRMirror) AddLabels(id int, labels []string) bool {\n\t_, _, err := p.GitHubClient.Issues.AddLabelsToIssue(*p.Context, p.Configuration.DownstreamOwner, p.Configuration.DownstreamRepo, id, labels)\n\tif err != nil {\n\t\tlog.Errorf(\"Error while adding a label on issue#:%d - %s\", id, err.Error())\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (r *ConfigConnectorContextReconciler) addLabels() declarative.ObjectTransform {\n\treturn func(ctx context.Context, o declarative.DeclarativeObject, manifest *manifest.Objects) error {\n\t\tlabels := r.labelMaker(ctx, o)\n\t\tfor _, o := range manifest.Items {\n\t\t\to.AddLabels(labels)\n\t\t}\n\t\treturn nil\n\t}\n}", "func (d *DeviceInfo) AddLabel(name, value string) *DeviceInfo {\n\tm := d.Labels\n\tif m == nil {\n\t\tm = make(map[string]string)\n\t\td.Labels = m\n\t}\n\tm[name] = value\n\treturn d\n}", "func (m *MockClient) AddLabels(org, repo string, number int, labels ...string) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{org, repo, number}\n\tfor _, a := range labels {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"AddLabels\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (fc *fakeClient) AddLabel(owner, repo string, number int, label string) error {\n\tfc.added = append(fc.added, label)\n\tfc.labels = append(fc.labels, label)\n\treturn nil\n}", "func NewLabels(lmap map[string]int64) *Labels {\n\tvar labs Labels\n\tlabs.Reset(lmap)\n\treturn &labs\n}", "func (m *MockRepositoryClient) AddLabels(org, repo string, number int, labels ...string) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{org, repo, number}\n\tfor _, a := range labels {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"AddLabels\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func addTagsToLabels(tags map[string]string, labelNamePrefix string, labels map[string]string) {\n\tfor k, v := range tags {\n\t\tlabels[labelNamePrefix+\"_\"+sanitizeLabelName(k)] = v\n\t}\n}", "func (puo *PostUpdateOne) AddLabelIDs(ids ...int) *PostUpdateOne {\n\tpuo.mutation.AddLabelIDs(ids...)\n\treturn puo\n}", "func (app *App) MergeLabels(labels map[string]string) {\n\tfor k, v := range labels {\n\t\tapp.Labels[k] = v\n\t}\n}", "func AddLabel(obj mftest.Object, label string, value string) mftest.Object {\n\tlabels := obj.GetLabels()\n\tif labels == nil {\n\t\tobj.SetLabels(make(map[string]string))\n\t}\n\tobj.GetLabels()[label] = value\n\treturn obj\n}", "func AddLabel(ctx context.Context, obj *Object, key, value string, override bool) error {\n\tif key == \"\" || value == \"\" {\n\t\treturn fmt.Errorf(\"key and value cannot be empty\")\n\t}\n\n\tif err := addToNestedMap(obj, key, value, override, \"metadata\", \"labels\"); err != nil {\n\t\treturn err\n\t}\n\n\tvar nestedFields []string\n\tswitch kind := ObjectKind(obj); kind {\n\tcase \"CronJob\":\n\t\tnestedFields = []string{\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"labels\"}\n\tcase \"DaemonSet\", \"Deployment\", \"Job\", \"ReplicaSet\", \"ReplicationController\", \"StatefulSet\":\n\t\tnestedFields = []string{\"spec\", \"template\", \"metadata\", \"labels\"}\n\tdefault:\n\t\treturn nil\n\t}\n\tif err := addToNestedMap(obj, key, value, override, nestedFields...); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func NewLabels(api plugin.API) *Labels {\n\treturn &Labels{\n\t\tByID: make(map[string]*Label),\n\t\tapi: api,\n\t}\n}", "func addLabels(target map[string]string, added map[string]string) []rfc6902PatchOperation {\n\tpatches := []rfc6902PatchOperation{}\n\n\taddedKeys := make([]string, 0, len(added))\n\tfor key := range added {\n\t\taddedKeys = append(addedKeys, key)\n\t}\n\tsort.Strings(addedKeys)\n\n\tfor _, key := range addedKeys {\n\t\tvalue := added[key]\n\t\tpatch := rfc6902PatchOperation{\n\t\t\tOp: \"add\",\n\t\t\tPath: \"/metadata/labels/\" + escapeJSONPointerValue(key),\n\t\t\tValue: value,\n\t\t}\n\n\t\tif target == nil {\n\t\t\ttarget = map[string]string{}\n\t\t\tpatch.Path = \"/metadata/labels\"\n\t\t\tpatch.Value = map[string]string{\n\t\t\t\tkey: value,\n\t\t\t}\n\t\t}\n\n\t\tif target[key] == \"\" {\n\t\t\tpatches = append(patches, patch)\n\t\t}\n\t}\n\n\treturn patches\n}", "func (o *ConnectorTypeAllOf) SetLabels(v []string) {\n\to.Labels = &v\n}", "func (pu *PostUpdate) AddLabelIDs(ids ...int) *PostUpdate {\n\tpu.mutation.AddLabelIDs(ids...)\n\treturn pu\n}", "func AddLabelsToInstance(t *testing.T, projectID string, zone string, instance string, labels map[string]string) {\n\terr := AddLabelsToInstanceE(t, projectID, zone, instance, labels)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func NewLabels() Labels {\n\tl := labels(sync.Map{})\n\treturn &l\n}", "func (o *DashboardAllOfLinks) SetLabels(v string) {\n\to.Labels = &v\n}", "func WithLabels(labels map[string]string) Option {\n\treturn func(o *options) {\n\t\to.labels = labels\n\t}\n}", "func updateLabels(w *illumioapi.Workload, lblhref map[string]illumioapi.Label) {\n\n\tvar tmplbls []*illumioapi.Label\n\tfor _, lbl := range w.Labels {\n\t\ttmplbl := lblhref[lbl.Href]\n\t\ttmplbls = append(tmplbls, &tmplbl)\n\t}\n\tw.Labels = tmplbls\n}", "func (b *Block) SetLabels(labels []string) {\n\tb.labelsObj().Replace(labels)\n}", "func (mc *MockCluster) AddLabelsStore(storeID uint64, regionCount int, labels map[string]string) {\n\tmc.AddRegionStore(storeID, regionCount)\n\tstore := mc.GetStore(storeID)\n\tfor k, v := range labels {\n\t\tstore.Labels = append(store.Labels, &metapb.StoreLabel{Key: k, Value: v})\n\t}\n\tmc.PutStore(store)\n}", "func NewLabels(labels LabelMap) *Labels {\n\tl := &Labels{\n\t\tlabels: labels,\n\t\tkeys: make([]string, 0, len(labels)),\n\t}\n\tfor k := range labels {\n\t\tl.keys = append(l.keys, k)\n\t}\n\tsort.Strings(l.keys)\n\treturn l\n}", "func (o *ZoneZone) SetLabels(v map[string]string) {\n\to.Labels = &v\n}", "func (i *Icon) AddLabel(label string) {\n\ti.Config.Label = label\n}", "func AddLabelsOnNode(n node.Node, labels map[string]string) error {\n\tfor labelKey, labelValue := range labels {\n\t\tif err := Inst().S.AddLabelOnNode(n, labelKey, labelValue); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func AddLabelsAndAnnotations(pod corev1.Pod, labels map[string]string, annotations map[string]string) error {\n\toriginalJSON, err := json.Marshal(pod)\n\tif err != nil {\n\t\treturn err\n\t}\n\tmodifiedPod := pod.DeepCopyObject().(*corev1.Pod)\n\n\t// First, update labels\n\tif modifiedPod.Labels == nil {\n\t\tmodifiedPod.Labels = labels\n\t} else {\n\t\tfor k, v := range labels {\n\t\t\tmodifiedPod.Labels[k] = v\n\t\t}\n\t}\n\t// Then, update annotations\n\tif modifiedPod.Annotations == nil {\n\t\tmodifiedPod.Annotations = annotations\n\t} else {\n\t\tfor k, v := range annotations {\n\t\t\tmodifiedPod.Annotations[k] = v\n\t\t}\n\t}\n\treturn patchFunction(modifiedPod, originalJSON)\n}", "func (r *InformationProtectionPolicyLabelsCollectionRequest) Add(ctx context.Context, reqObj *InformationProtectionLabel) (resObj *InformationProtectionLabel, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func (nv *NetView) ConfigLabels(labs []string) bool {\n\tvs := nv.Scene()\n\tlgp, err := vs.ChildByNameTry(\"Labels\", 1)\n\tif err != nil {\n\t\tlgp = gi3d.AddNewGroup(vs, vs, \"Labels\")\n\t}\n\n\tlbConfig := kit.TypeAndNameList{}\n\tfor _, ls := range labs {\n\t\tlbConfig.Add(gi3d.KiT_Text2D, ls)\n\t}\n\tmods, updt := lgp.ConfigChildren(lbConfig)\n\tif mods {\n\t\tfor i, ls := range labs {\n\t\t\tlb := lgp.ChildByName(ls, i).(*gi3d.Text2D)\n\t\t\tlb.Defaults(vs)\n\t\t\tlb.SetText(vs, ls)\n\t\t\tlb.SetProp(\"text-align\", gist.AlignLeft)\n\t\t\tlb.SetProp(\"vertical-align\", gist.AlignTop)\n\t\t\tlb.SetProp(\"white-space\", gist.WhiteSpacePre)\n\t\t}\n\t}\n\tlgp.UpdateEnd(updt)\n\treturn mods\n}", "func (m *Group) SetAssignedLabels(value []AssignedLabelable)() {\n m.assignedLabels = value\n}", "func WithLabels(labels map[string]string) Opt {\n\treturn func(object client.Object) {\n\t\tobject.SetLabels(labels)\n\t}\n}", "func addLabelsToPullRequest(prInfo *PullRequestInfo, labels []string) error {\n\tif prInfo == nil {\n\t\treturn errors.New(\"pull request to label cannot be nil\")\n\t}\n\tpr := prInfo.PullRequest\n\tprovider := prInfo.GitProvider\n\n\tif len(labels) > 0 {\n\t\tnumber := *pr.Number\n\t\tvar err error\n\t\terr = provider.AddLabelsToIssue(pr.Owner, pr.Repo, number, labels)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Logger().Infof(\"Added label %s to Pull Request %s\", util.ColorInfo(strings.Join(labels, \", \")), pr.URL)\n\t}\n\treturn nil\n}", "func (ls *LabelService) AddLabel(\n\tlabelID string,\n\tlabelParams *map[string]interface{},\n) (labeledResources []*types.LabeledResource, err error) {\n\tlog.Debug(\"AddLabel\")\n\n\tdata, status, err := ls.concertoService.Post(fmt.Sprintf(APIPathLabelResources, labelID), labelParams)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = utils.CheckStandardStatus(status, data); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = json.Unmarshal(data, &labeledResources); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn labeledResources, nil\n}", "func (lbl *LabelService) AddLabel(labelVector *map[string]interface{}, labelID string) (labeledResources []*types.LabeledResource, err error) {\n\tlog.Debug(\"AddLabel\")\n\n\tdata, status, err := lbl.concertoService.Post(fmt.Sprintf(\"/labels/%s/resources\", labelID), labelVector)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = utils.CheckStandardStatus(status, data); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = json.Unmarshal(data, &labeledResources); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn labeledResources, nil\n}", "func AddRemoveLabelsCommand(parent *cobra.Command) {\n\tvar opt RemoveLabel\n\n\tcmd := &cobra.Command{\n\t\tUse: \"remove-label\",\n\t\tAliases: []string{\"remove-labels\"},\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\topt.Labels = append(opt.Labels, args...)\n\t\t\treturn xform.RunXform(cmd.Context(), opt.Run)\n\t\t},\n\t}\n\n\tparent.AddCommand(cmd)\n}", "func (b *Builder) WithLabelsNew(labels map[string]string) *Builder {\n\tif len(labels) == 0 {\n\t\tb.errs = append(\n\t\t\tb.errs,\n\t\t\terrors.New(\"failed to build PVC object: missing labels\"),\n\t\t)\n\t\treturn b\n\t}\n\n\t// copy of original map\n\tnewlbls := map[string]string{}\n\tfor key, value := range labels {\n\t\tnewlbls[key] = value\n\t}\n\n\t// override\n\tb.pvc.object.Labels = newlbls\n\treturn b\n}", "func (a *Client) AddLabelsToResources(params *AddLabelsToResourcesParams, opts ...ClientOption) (*AddLabelsToResourcesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewAddLabelsToResourcesParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"AddLabelsToResources\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/add-labels-to-resources\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &AddLabelsToResourcesReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*AddLabelsToResourcesOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for AddLabelsToResources: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func NewLabels(base Arcer, r vg.Length, ls ...Labeler) (*Labels, error) {\n\tvar b ArcOfer\n\tswitch base := base.(type) {\n\tcase ArcOfer:\n\t\tfor _, l := range ls {\n\t\t\tvar err error\n\t\t\tswitch l := l.(type) {\n\t\t\tcase locater:\n\t\t\t\t_, err = base.ArcOf(l.location(), nil)\n\t\t\tcase Feature:\n\t\t\t\t_, err = base.ArcOf(l, nil)\n\t\t\tdefault:\n\t\t\t\t_, err = base.ArcOf(nil, nil)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tb = base\n\tdefault:\n\t\tif len(ls) > 1 {\n\t\t\treturn nil, fmt.Errorf(\"rings: cannot label a type %T with more than one feature\", base)\n\t\t}\n\t\tarc := base.Arc()\n\t\tb = Arcs{Base: arc, Arcs: map[Feature]Arc{Feature(nil): arc}}\n\t}\n\tvar x, y float64\n\tif xy, ok := base.(XYer); ok {\n\t\tx, y = xy.XY()\n\t}\n\treturn &Labels{\n\t\tLabels: ls,\n\t\tBase: b,\n\t\tRadius: r,\n\t\tX: x,\n\t\tY: y,\n\t}, nil\n}", "func (c *client) AddLabel(org, repo string, number int, label string) error {\n\treturn c.AddLabelWithContext(context.Background(), org, repo, number, label)\n}", "func (o *VirtualizationIweVirtualMachine) SetLabels(v []InfraMetaData) {\n\to.Labels = v\n}", "func (l Set) Add(labels ...Instance) Set {\n\tc := l.Clone()\n\tfor _, label := range labels {\n\t\tc[label] = struct{}{}\n\t}\n\n\treturn c\n}", "func withLabels(labels map[string]string, volumes []*v1.PersistentVolume) []*v1.PersistentVolume {\n\tvolumes[0].Labels = labels\n\treturn volumes\n}", "func withLabels(labels map[string]string, volumes []*v1.PersistentVolume) []*v1.PersistentVolume {\n\tvolumes[0].Labels = labels\n\treturn volumes\n}", "func (t *AuroraTask) AddLabel(key string, value string) *AuroraTask {\n\tt.task.Metadata = append(t.task.Metadata, &aurora.Metadata{Key: key, Value: value})\n\treturn t\n}", "func (a *addLabelsSeries) Labels() labels.Labels {\n\treturn setLabelsRetainExisting(a.upstream.Labels(), a.labels...)\n}", "func (b *Builder) WithLabels(labels map[string]string) *Builder {\n\tif len(labels) == 0 {\n\t\tb.errs = append(\n\t\t\tb.errs,\n\t\t\terrors.New(\"failed to build PVC object: missing labels\"),\n\t\t)\n\t\treturn b\n\t}\n\n\tif b.pvc.object.Labels == nil {\n\t\tb.pvc.object.Labels = map[string]string{}\n\t}\n\n\tfor key, value := range labels {\n\t\tb.pvc.object.Labels[key] = value\n\t}\n\treturn b\n}", "func (_options *CreateConfigOptions) SetLabels(labels []string) *CreateConfigOptions {\n\t_options.Labels = labels\n\treturn _options\n}", "func (c *Config) WithLabels(lbls map[string]string) *Config {\n\tc.labels = lbls\n\treturn c\n}", "func SetLabelNames(names []string) CustomizeLabelFunc {\n\treturn func(fxt *TestFixture, idx int) error {\n\t\tif len(fxt.Labels) != len(names) {\n\t\t\treturn errs.Errorf(\"number of names (%d) must match number of labels to create (%d)\", len(names), len(fxt.Labels))\n\t\t}\n\t\tfxt.Labels[idx].Name = names[idx]\n\t\treturn nil\n\t}\n}", "func (g *Generator) AddConfigLabel(label, value string) {\n\tg.image.Config.Labels[label] = value\n}", "func (puo *PostUpdateOne) RemoveLabels(l ...*Label) *PostUpdateOne {\n\tids := make([]int, len(l))\n\tfor i := range l {\n\t\tids[i] = l[i].ID\n\t}\n\treturn puo.RemoveLabelIDs(ids...)\n}", "func (ls LabelSet) Append(name, value string) LabelSet {\n\treturn append(ls, Label{Name: name, Value: value})\n}", "func (l *labeler) Labels(otherLabels map[string]string) map[string]string {\n\tlabels := map[string]string{\n\t\tdockerLabelPrefix + \".job.id\": strconv.FormatInt(l.build.ID, 10),\n\t\tdockerLabelPrefix + \".job.url\": l.build.JobURL(),\n\t\tdockerLabelPrefix + \".job.sha\": l.build.GitInfo.Sha,\n\t\tdockerLabelPrefix + \".job.before_sha\": l.build.GitInfo.BeforeSha,\n\t\tdockerLabelPrefix + \".job.ref\": l.build.GitInfo.Ref,\n\t\tdockerLabelPrefix + \".project.id\": strconv.FormatInt(l.build.JobInfo.ProjectID, 10),\n\t\tdockerLabelPrefix + \".pipeline.id\": l.build.GetAllVariables().Value(\"CI_PIPELINE_ID\"),\n\t\tdockerLabelPrefix + \".runner.id\": l.build.Runner.ShortDescription(),\n\t\tdockerLabelPrefix + \".runner.local_id\": strconv.Itoa(l.build.RunnerID),\n\t\tdockerLabelPrefix + \".managed\": \"true\",\n\t}\n\n\tfor k, v := range otherLabels {\n\t\tlabels[fmt.Sprintf(\"%s.%s\", dockerLabelPrefix, k)] = v\n\t}\n\n\treturn labels\n}", "func AddLabel(label types.Label) ([]byte, error) {\n\tlog.Trace.Printf(\"Adding the following label: %+v\", label)\n\tvar ret []byte\n\tvar err error\n\n\tif err = store.DB.Create(&label).Error; err == nil {\n\t\tlog.Trace.Printf(\"Successfully added the label to the database: %+v\", label)\n\t\tret, err = json.Marshal(label)\n\t} else {\n\t\tlog.Warning.Printf(err.Error())\n\t}\n\n\treturn ret, err\n}", "func AddLabelsToInstanceE(t *testing.T, projectID string, zone string, instance string, labels map[string]string) error {\n\tlogger.Logf(t, \"Adding labels to instance %s in zone %s\", instance, zone)\n\n\tctx := context.Background()\n\n\tservice, err := NewComputeServiceE(t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Get the fingerprint of the existing labels\n\texistingInstance, err := service.Instances.Get(projectID, zone, instance).Context(ctx).Do()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Instances.Get(%s) got error: %v\", instance, err)\n\t}\n\treq := compute.InstancesSetLabelsRequest{Labels: labels, LabelFingerprint: existingInstance.LabelFingerprint}\n\n\t// Perform the SetLabels request\n\tif _, err := service.Instances.SetLabels(projectID, zone, instance, &req).Context(ctx).Do(); err != nil {\n\t\treturn fmt.Errorf(\"Instances.SetLabels(%s) got error: %v\", instance, err)\n\t}\n\n\treturn err\n}", "func (resolution *PolicyResolution) RecordLabels(cik *ComponentInstanceKey, labels *lang.LabelSet) {\n\tresolution.GetComponentInstanceEntry(cik).addLabels(labels)\n}", "func (m *ExecutionManager) addProjectLabels(ctx context.Context, projectName string, initialLabels map[string]string) (map[string]string, error) {\n\tproject, err := m.db.ProjectRepo().Get(ctx, projectName)\n\tif err != nil {\n\t\tlogger.Errorf(ctx, \"Failed to get project for [%+v] with error: %v\", project, err)\n\t\treturn nil, err\n\t}\n\t// passing nil domain as not needed to retrieve labels\n\tprojectLabels := transformers.FromProjectModel(project, nil).Labels.GetValues()\n\n\tif initialLabels == nil {\n\t\tinitialLabels = make(map[string]string)\n\t}\n\n\tfor k, v := range projectLabels {\n\t\tif _, ok := initialLabels[k]; !ok {\n\t\t\tinitialLabels[k] = v\n\t\t}\n\t}\n\treturn initialLabels, nil\n}", "func (pu *PostUpdate) RemoveLabels(l ...*Label) *PostUpdate {\n\tids := make([]int, len(l))\n\tfor i := range l {\n\t\tids[i] = l[i].ID\n\t}\n\treturn pu.RemoveLabelIDs(ids...)\n}", "func setLabelsRetainExisting(src labels.Labels, additionalLabels ...labels.Label) labels.Labels {\n\tlb := labels.NewBuilder(src)\n\n\tfor _, additionalL := range additionalLabels {\n\t\tif oldValue := src.Get(additionalL.Name); oldValue != \"\" {\n\t\t\tlb.Set(\n\t\t\t\tretainExistingPrefix+additionalL.Name,\n\t\t\t\toldValue,\n\t\t\t)\n\t\t}\n\t\tlb.Set(additionalL.Name, additionalL.Value)\n\t}\n\n\treturn lb.Labels()\n}", "func (s *Service) AddLabel(label *model.Label) (err error) {\n\tif _, err = s.dao.QueryLabel(label); err == nil {\n\t\terr = ecode.MelloiLabelExistErr\n\t\treturn\n\t}\n\tlabel.Active = 1\n\treturn s.dao.AddLabel(label)\n}", "func Add(name string) *Label {\n\tid := datautils.ToID(name)\n\tlabel := &Label{id, name}\n\terr := labelsIdx.Add(id, label, cache.NoExpiration)\n\tif err == nil {\n\t\tlabelMediasIdx.Set(id, []string{}, cache.NoExpiration)\n\t}\n\treturn label\n}", "func ValidateLabels(limits *Limits, userID string, ls []cortexpb.LabelAdapter, skipLabelNameValidation bool) ValidationError {\n\tif limits.EnforceMetricName {\n\t\tunsafeMetricName, err := extract.UnsafeMetricNameFromLabelAdapters(ls)\n\t\tif err != nil {\n\t\t\tDiscardedSamples.WithLabelValues(missingMetricName, userID).Inc()\n\t\t\treturn newNoMetricNameError()\n\t\t}\n\n\t\tif !model.IsValidMetricName(model.LabelValue(unsafeMetricName)) {\n\t\t\tDiscardedSamples.WithLabelValues(invalidMetricName, userID).Inc()\n\t\t\treturn newInvalidMetricNameError(unsafeMetricName)\n\t\t}\n\t}\n\n\tnumLabelNames := len(ls)\n\tif numLabelNames > limits.MaxLabelNamesPerSeries {\n\t\tDiscardedSamples.WithLabelValues(maxLabelNamesPerSeries, userID).Inc()\n\t\treturn newTooManyLabelsError(ls, limits.MaxLabelNamesPerSeries)\n\t}\n\n\tmaxLabelNameLength := limits.MaxLabelNameLength\n\tmaxLabelValueLength := limits.MaxLabelValueLength\n\tlastLabelName := \"\"\n\tmaxLabelsSizeBytes := limits.MaxLabelsSizeBytes\n\tlabelsSizeBytes := 0\n\n\tfor _, l := range ls {\n\t\tif !skipLabelNameValidation && !model.LabelName(l.Name).IsValid() {\n\t\t\tDiscardedSamples.WithLabelValues(invalidLabel, userID).Inc()\n\t\t\treturn newInvalidLabelError(ls, l.Name)\n\t\t} else if len(l.Name) > maxLabelNameLength {\n\t\t\tDiscardedSamples.WithLabelValues(labelNameTooLong, userID).Inc()\n\t\t\treturn newLabelNameTooLongError(ls, l.Name, maxLabelNameLength)\n\t\t} else if len(l.Value) > maxLabelValueLength {\n\t\t\tDiscardedSamples.WithLabelValues(labelValueTooLong, userID).Inc()\n\t\t\treturn newLabelValueTooLongError(ls, l.Name, l.Value, maxLabelValueLength)\n\t\t} else if cmp := strings.Compare(lastLabelName, l.Name); cmp >= 0 {\n\t\t\tif cmp == 0 {\n\t\t\t\tDiscardedSamples.WithLabelValues(duplicateLabelNames, userID).Inc()\n\t\t\t\treturn newDuplicatedLabelError(ls, l.Name)\n\t\t\t}\n\n\t\t\tDiscardedSamples.WithLabelValues(labelsNotSorted, userID).Inc()\n\t\t\treturn newLabelsNotSortedError(ls, l.Name)\n\t\t}\n\n\t\tlastLabelName = l.Name\n\t\tlabelsSizeBytes += l.Size()\n\t}\n\tif maxLabelsSizeBytes > 0 && labelsSizeBytes > maxLabelsSizeBytes {\n\t\tDiscardedSamples.WithLabelValues(labelsSizeBytesExceeded, userID).Inc()\n\t\treturn labelSizeBytesExceededError(ls, labelsSizeBytes, maxLabelsSizeBytes)\n\t}\n\treturn nil\n}", "func (j *AuroraJob) AddLabel(key string, value string) Job {\n\tif _, ok := j.metadata[key]; !ok {\n\t\tj.metadata[key] = &aurora.Metadata{Key: key}\n\t\tj.jobConfig.TaskConfig.Metadata = append(j.jobConfig.TaskConfig.Metadata, j.metadata[key])\n\t}\n\n\tj.metadata[key].Value = value\n\treturn j\n}", "func (o *TemplateSummaryResources) SetLabels(v []TemplateSummaryLabel) {\n\to.Labels = v\n}", "func (mr *MockRepositoryClientMockRecorder) AddLabelsWithContext(ctx, org, repo, number interface{}, labels ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{ctx, org, repo, number}, labels...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddLabelsWithContext\", reflect.TypeOf((*MockRepositoryClient)(nil).AddLabelsWithContext), varargs...)\n}", "func (m *MockMetrics) AddSampleWithLabels(arg0 []string, arg1 float32, arg2 []metrics.Label) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"AddSampleWithLabels\", arg0, arg1, arg2)\n}", "func (in *NodeLabels) DeepCopy() *NodeLabels {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(NodeLabels)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func RemoveLabels(o metav1.Object, labels ...string) {\n\tl := o.GetLabels()\n\tif l == nil {\n\t\treturn\n\t}\n\tfor _, k := range labels {\n\t\tdelete(l, k)\n\t}\n\to.SetLabels(l)\n}", "func ClearLabels() {\n\tclearLabels()\n}", "func (mr *MockClientMockRecorder) AddLabelsWithContext(ctx, org, repo, number interface{}, labels ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{ctx, org, repo, number}, labels...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddLabelsWithContext\", reflect.TypeOf((*MockClient)(nil).AddLabelsWithContext), varargs...)\n}", "func (p *Peer) SetLabels(labels []storepb.Label) {\n\tp.mtx.Lock()\n\tdefer p.mtx.Unlock()\n\n\ts := p.data[p.Name()]\n\ts.Metadata.Labels = labels\n\tp.data[p.Name()] = s\n}", "func (e *exemplarSampler) trackNewLabels(seriesLabels, labels map[string]string) {\n\tfor k := range labels {\n\t\tif _, ok := e.labelSet[k]; !ok {\n\t\t\te.labelSet[k] = struct{}{}\n\t\t}\n\t}\n\tfor k := range seriesLabels {\n\t\tif _, ok := e.labelSet[k]; !ok {\n\t\t\te.labelSet[k] = struct{}{}\n\t\t}\n\t}\n}", "func (o *VirtualizationIweVirtualMachine) HasLabels() bool {\n\tif o != nil && o.Labels != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s *DockerVolumeConfiguration) SetLabels(v map[string]*string) *DockerVolumeConfiguration {\n\ts.Labels = v\n\treturn s\n}", "func (t *Test) Label(labels ...label.Instance) *Test {\n\tt.labels = append(t.labels, labels...)\n\treturn t\n}", "func (gtc *GroupTagCounter) WithLabels(labels ...string) *GroupCounter {\n\th := secureHash.GetHash(labels)\n\n\tgtc.registredLock.Lock()\n\tdefer gtc.registredLock.Unlock()\n\n\tgc, ok := gtc.registred[h]\n\tif ok {\n\t\treturn gc\n\t}\n\n\tgc = &GroupCounter{}\n\tfor _, tc := range gtc.tagcounters {\n\t\tc := tc.WithLabels(labels...)\n\t\tgc.AddCounter(c)\n\t}\n\n\tgtc.registred[h] = gc\n\treturn gc\n}", "func (o *KubernetesAddonDefinitionAllOf) SetLabels(v []string) {\n\to.Labels = v\n}", "func (r *InformationProtectionSensitivityLabelsCollectionRequest) Add(ctx context.Context, reqObj *SensitivityLabel) (resObj *SensitivityLabel, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func WriteLabels(bkt *bolt.Bucket, labels map[string]string) error {\n\treturn writeMap(bkt, bucketKeyLabels, labels)\n}", "func (l *Label) AddToConfig(config *container.Config) {\n\tconfig.Labels[l.Key] = l.Value\n}", "func (mr *MockClientMockRecorder) AddLabel(org, repo, number, label interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddLabel\", reflect.TypeOf((*MockClient)(nil).AddLabel), org, repo, number, label)\n}", "func (d *DeployManager) CreateLabels() {\n\tlabels := map[string]string{\n\t\t\"siddhi.io/name\": CRDName,\n\t\t\"siddhi.io/instance\": d.Application.Name,\n\t\t\"siddhi.io/version\": OperatorVersion,\n\t\t\"siddhi.io/part-of\": OperatorName,\n\t}\n\td.Labels = labels\n}", "func mergeLabels(l1, l2 map[string]string) {\n\tfor k, v := range l2 {\n\t\tif _, ok := l1[k]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tl1[k] = v\n\t}\n}", "func mergeLabels(l1, l2 map[string]string) {\n\tfor k, v := range l2 {\n\t\tif _, ok := l1[k]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tl1[k] = v\n\t}\n}", "func mergeLabels(l1, l2 map[string]string) {\n\tfor k, v := range l2 {\n\t\tif _, ok := l1[k]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tl1[k] = v\n\t}\n}" ]
[ "0.7185251", "0.7184332", "0.7106733", "0.7091991", "0.705142", "0.70098543", "0.698809", "0.68857425", "0.6851677", "0.68134856", "0.6807551", "0.6742338", "0.6718384", "0.6718384", "0.6577891", "0.65086764", "0.6257343", "0.62466973", "0.6238192", "0.62083596", "0.6200173", "0.6199506", "0.6166516", "0.61330104", "0.6122671", "0.60917634", "0.6037053", "0.6027036", "0.5994872", "0.5954761", "0.59236974", "0.59157246", "0.59019166", "0.59001756", "0.58961695", "0.5894936", "0.5887555", "0.58852935", "0.5883867", "0.58740264", "0.5850611", "0.584455", "0.5820524", "0.5804916", "0.57941467", "0.5740063", "0.5736976", "0.5711741", "0.57091206", "0.56811225", "0.5680673", "0.5672775", "0.5667542", "0.56646425", "0.5651008", "0.5648969", "0.56248915", "0.56248915", "0.56246185", "0.5603965", "0.559755", "0.5581478", "0.55760515", "0.5575691", "0.5567923", "0.556707", "0.5549393", "0.5544437", "0.553326", "0.55231094", "0.5497326", "0.54928017", "0.5492792", "0.5482011", "0.54737014", "0.54634494", "0.54551345", "0.5449261", "0.5446387", "0.54444623", "0.5442141", "0.5428482", "0.54266244", "0.5425673", "0.53965896", "0.53892004", "0.53858334", "0.53779083", "0.53749835", "0.5373312", "0.5373189", "0.5364147", "0.5363609", "0.5349617", "0.53421754", "0.53195024", "0.5313314", "0.5313244", "0.5313244", "0.5313244" ]
0.690418
7
ClusterTagKey generates the key for resources associated with a cluster.
func ClusterTagKey(name string) string { return fmt.Sprintf("%s%s", NameGCPProviderOwned, name) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetClusterKey(clusterId string) string {\n\treturn ClusterKeyPrefix + clusterId\n}", "func ToClusterKey(vc *v1alpha1.Virtualcluster) string {\n\tif len(vc.GetNamespace()) > 0 {\n\t\treturn vc.GetNamespace() + \"-\" + vc.GetName()\n\t}\n\treturn vc.GetName()\n}", "func ResourceKey(group, version, kind string) string {\n\tif group == \"\" {\n\t\tgroup = \"core\"\n\t}\n\treturn \"k8s_\" + ToSnake(group) + \"_\" + version + \"_\" + ToSnake(kind)\n}", "func (r *ClusterServiceResource) Key() types.NamespacedName {\n\treturn types.NamespacedName{Name: r.pandaCluster.Name + \"-cluster\", Namespace: r.pandaCluster.Namespace}\n}", "func ConnEkgKey(cid types.ConnId) []byte {\n return createKey(CONN,EKG,cid.Serialize())\n}", "func (r *ConfigMapResource) Key() types.NamespacedName {\n\treturn ConfigMapKey(r.pandaCluster)\n}", "func ConfigMapKey(pandaCluster *vectorizedv1alpha1.Cluster) types.NamespacedName {\n\treturn types.NamespacedName{Name: resourceNameTrim(pandaCluster.Name, baseSuffix), Namespace: pandaCluster.Namespace}\n}", "func tagKey(dir string, context *build.Context, tags []string) string {\n\tctags := map[string]bool{\n\t\tcontext.GOOS: true,\n\t\tcontext.GOARCH: true,\n\t}\n\tif context.CgoEnabled {\n\t\tctags[\"cgo\"] = true\n\t}\n\tfor _, tag := range context.BuildTags {\n\t\tctags[tag] = true\n\t}\n\t// TODO: ReleaseTags (need to load default)\n\tkey := dir\n\n\t// explicit on GOOS and GOARCH as global cache will use \"all\" cached packages for\n\t// an indirect imported package. See https://github.com/golang/go/issues/21181\n\t// for more detail.\n\ttags = append(tags, context.GOOS, context.GOARCH)\n\tsort.Strings(tags)\n\n\tfor _, tag := range tags {\n\t\tif ctags[tag] {\n\t\t\tkey += \",\" + tag\n\t\t\tctags[tag] = false\n\t\t}\n\t}\n\treturn key\n}", "func (o DataSetRowLevelPermissionTagRuleOutput) TagKey() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DataSetRowLevelPermissionTagRule) string { return v.TagKey }).(pulumi.StringOutput)\n}", "func ETCDKey(id string, ktype KeyType) (string, error) {\n\tsuffix, ok := etcdKeySuffixes[ktype]\n\tif !ok {\n\t\treturn \"\", errors.Wrapf(ErrInvalidKey, \"%v is not a supported key type\", ktype)\n\t}\n\treturn id + suffix, nil\n}", "func ResourceKey(c Codec, resourceName, pk string) string {\n\treturn path.Join(c.Key(), resourceName, pk)\n}", "func CacheKey(metadata *metav1.ObjectMeta) string {\n\treturn fmt.Sprintf(\"%v_%v\", metadata.UID, metadata.ResourceVersion)\n}", "func getKey(cluster *clusteroperator.Cluster, t *testing.T) string {\n\tif key, err := controller.KeyFunc(cluster); err != nil {\n\t\tt.Errorf(\"Unexpected error getting key for Cluster %v: %v\", cluster.Name, err)\n\t\treturn \"\"\n\t} else {\n\t\treturn key\n\t}\n}", "func (o GetKubernetesClusterKubeConfigOutput) ClientKey() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetKubernetesClusterKubeConfig) string { return v.ClientKey }).(pulumi.StringOutput)\n}", "func Key(name string, namespace string) string {\n\treturn ksrkey.Key(PodKeyword, name, namespace)\n}", "func KeyFor(obj runtime.Object, nsn types.NamespacedName) string {\n\tgvk := obj.GetObjectKind().GroupVersionKind()\n\treturn strings.Join([]string{gvk.Group, gvk.Version, gvk.Kind, nsn.Namespace, nsn.Name}, KeyDelimiter)\n}", "func (o KubernetesClusterKubeConfigOutput) ClientKey() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v KubernetesClusterKubeConfig) *string { return v.ClientKey }).(pulumi.StringPtrOutput)\n}", "func ContextForCluster(kindClusterName string) string {\n\treturn kubeconfig.KINDClusterKey(kindClusterName)\n}", "func GetKeyNodeName(cluster, node string) string {\n\t// WARNING - STABLE API: Changing the structure of the key may break\n\t// backwards compatibility\n\treturn path.Join(cluster, node)\n}", "func (t Task) Key() string {\n\treturn fmt.Sprintf(\"%s:%s\", t.Name, t.ID)\n}", "func (o ClusterTrustedAccessRoleBindingOutput) KubernetesClusterId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ClusterTrustedAccessRoleBinding) pulumi.StringOutput { return v.KubernetesClusterId }).(pulumi.StringOutput)\n}", "func (status AppInstanceSummary) Key() string {\n\treturn status.UUIDandVersion.UUID.String()\n}", "func (o OceanLaunchSpecElasticIpPoolTagSelectorOutput) TagKey() pulumi.StringOutput {\n\treturn o.ApplyT(func(v OceanLaunchSpecElasticIpPoolTagSelector) string { return v.TagKey }).(pulumi.StringOutput)\n}", "func generateClusterName() string {\n\treturn string(uuid.NewUUID())\n}", "func (meta *Meta) Key() string {\n\treturn Key(meta.GroupVersionKind.Kind, meta.Name, meta.Namespace)\n}", "func templateTierHashLabelKey(tierName string) string {\n\treturn toolchainv1alpha1.LabelKeyPrefix + tierName + \"-tier-hash\"\n}", "func (c newConfigMap) Key() string {\n\treturn configMapKey(c.ConfigMap.Name)\n}", "func (config *Config) Key() string {\n\treturn config.resourceKey\n}", "func NewResourceKey(o metav1.Object, t metav1.Type) ResourceKey {\n\treturn ResourceKey(fmt.Sprintf(\"%s/%s=%s,Kind=%s\", o.GetNamespace(), o.GetName(), t.GetAPIVersion(), t.GetKind()))\n}", "func KeyTemplateForECDHPrimitiveWithCEK(cek []byte, nistpKW bool, encAlg AEADAlg) *tinkpb.KeyTemplate {\n\t// the curve passed in the template below is ignored when executing the primitive, it's hardcoded to pass key\n\t// key format validation only.\n\treturn ecdh.KeyTemplateForECDHPrimitiveWithCEK(cek, nistpKW, encAlg)\n}", "func (m *Metric) Key() string {\n\treturn fmt.Sprintf(\"<%s%d%s>\", m.Name, m.Timestamp, m.Tags)\n}", "func EkgKey(domain, id []byte) []byte {\n return createKey(EKG,domain,id)\n}", "func computeKey(cfg external.SharedConfig) string {\n\tvar a []string\n\t// keys must be sorted\n\tif cfg.RoleDurationSeconds != nil {\n\t\ta = append(a, fmt.Sprintf(`\"DurationSeconds\": %d`, int(cfg.RoleDurationSeconds.Seconds())))\n\t}\n\ta = append(a, `\"RoleArn\": `+strconv.Quote(cfg.RoleARN))\n\tif cfg.RoleSessionName != \"\" {\n\t\ta = append(a, `\"RoleSessionName\": `+strconv.Quote(cfg.RoleSessionName))\n\t}\n\ta = append(a, `\"SerialNumber\": `+strconv.Quote(cfg.MFASerial))\n\ts := sha1.Sum([]byte(fmt.Sprintf(\"{%s}\", strings.Join(a, \", \"))))\n\treturn hex.EncodeToString(s[:])\n}", "func (c *AzureModelContext) CloudTagsForInstanceGroup(ig *kops.InstanceGroup) map[string]*string {\n\tconst (\n\t\tclusterNodeTemplateLabel = \"k8s.io_cluster_node-template_label_\"\n\t\tclusterNodeTemplateTaint = \"k8s.io_cluster_node-template_taint_\"\n\t)\n\n\tlabels := make(map[string]string)\n\t// Apply any user-specified global labels first so they can be overridden by IG-specific labels.\n\tfor k, v := range c.Cluster.Spec.CloudLabels {\n\t\tlabels[k] = v\n\t}\n\n\t// Apply any user-specified labels.\n\tfor k, v := range ig.Spec.CloudLabels {\n\t\tlabels[k] = v\n\t}\n\n\t// Apply labels for cluster node labels.\n\ti := 0\n\tfor k, v := range ig.Spec.NodeLabels {\n\t\t// Store the label key in the tag value\n\t\t// so that we don't need to espace \"/\" in the label key.\n\t\tlabels[fmt.Sprintf(\"%s%d\", clusterNodeTemplateLabel, i)] = fmt.Sprintf(\"%s=%s\", k, v)\n\t\ti++\n\t}\n\n\t// Apply labels for cluster node taints.\n\tfor _, v := range ig.Spec.Taints {\n\t\tsplits := strings.SplitN(v, \"=\", 2)\n\t\tif len(splits) > 1 {\n\t\t\tlabels[clusterNodeTemplateTaint+splits[0]] = splits[1]\n\t\t}\n\t}\n\n\t// The system tags take priority because the cluster likely breaks without them...\n\tlabels[azure.TagNameRolePrefix+ig.Spec.Role.ToLowerString()] = \"1\"\n\tif ig.Spec.Role == kops.InstanceGroupRoleControlPlane {\n\t\tlabels[azure.TagNameRolePrefix+\"master\"] = \"1\"\n\t}\n\n\t// Set the tag used by kops-controller to identify the instance group to which the VM ScaleSet belongs.\n\tlabels[nodeidentityazure.InstanceGroupNameTag] = ig.Name\n\n\t// Replace all \"/\" with \"_\" as \"/\" is not an allowed key character in Azure.\n\tm := make(map[string]*string)\n\tfor k, v := range labels {\n\t\tm[strings.ReplaceAll(k, \"/\", \"_\")] = fi.PtrTo(v)\n\t}\n\treturn m\n}", "func getClusterNameLabel() string {\n\tkey := fmt.Sprintf(\"%s/cluster-name\", getCAPIGroup())\n\treturn key\n}", "func transportCacheKey(sess *clusterSession) string {\n\tif sess.teleportCluster.isRemote {\n\t\treturn fmt.Sprintf(\"%x\", sess.teleportCluster.name)\n\t}\n\treturn fmt.Sprintf(\"%x/%x\", sess.teleportCluster.name, sess.kubeClusterName)\n}", "func (f *IndexFile) TagKey(name, key []byte) TagKeyElem {\n\ttblk := f.tblks[string(name)]\n\tif tblk == nil {\n\t\treturn nil\n\t}\n\treturn tblk.TagKeyElem(key)\n}", "func (o LookupDatasetKustoClusterResultOutput) KustoClusterId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupDatasetKustoClusterResult) string { return v.KustoClusterId }).(pulumi.StringOutput)\n}", "func (o *CustomHostMetadataKey) GetKey() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Key\n}", "func (r *Cluster) ClusterResourceId() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"clusterResourceId\"])\n}", "func (cg *ConsumerGroup) GetKey() types.NamespacedName {\n\treturn types.NamespacedName{\n\t\tNamespace: cg.GetNamespace(),\n\t\tName: cg.GetName(),\n\t}\n}", "func CacheKey(c Cacheable) (key, command string) {\n\tif len(c.cs.s) == 2 {\n\t\treturn c.cs.s[1], c.cs.s[0]\n\t}\n\n\tkp := 1\n\n\tif c.cf == scrRoTag {\n\t\tif c.cs.s[2] != \"1\" {\n\t\t\tpanic(multiKeyCacheErr)\n\t\t}\n\t\tkp = 3\n\t}\n\n\tlength := 0\n\tfor i, v := range c.cs.s {\n\t\tif i == kp {\n\t\t\tcontinue\n\t\t}\n\t\tlength += len(v)\n\t}\n\tsb := strings.Builder{}\n\tsb.Grow(length)\n\tfor i, v := range c.cs.s {\n\t\tif i == kp {\n\t\t\tkey = v\n\t\t} else {\n\t\t\tsb.WriteString(v)\n\t\t}\n\t}\n\treturn key, sb.String()\n}", "func (o CaCertificateTagOutput) Key() pulumi.StringOutput {\n\treturn o.ApplyT(func(v CaCertificateTag) string { return v.Key }).(pulumi.StringOutput)\n}", "func KeyAdd(organizationId uint, clusterId uint) (string, error) {\n\tlog.Info(\"Generate and store SSH key \")\n\n\tsshKey, err := KeyGenerator()\n\tif err != nil {\n\t\tlog.Errorf(\"KeyGenerator failed reason: %s\", err.Error())\n\t\treturn \"\", err\n\t}\n\n\tdb := model.GetDB()\n\tcluster := model.ClusterModel{ID: clusterId}\n\tif err = db.First(&cluster).Error; err != nil {\n\t\tlog.Errorf(\"Cluster with id=% not found: %s\", cluster.ID, err.Error())\n\t\treturn \"\", err\n\t}\n\tsecretId, err := KeyStore(sshKey, organizationId, cluster.Name)\n\tif err != nil {\n\t\tlog.Errorf(\"KeyStore failed reason: %s\", err.Error())\n\t\treturn \"\", err\n\t}\n\treturn secretId, nil\n}", "func (c Client) Key() string {\n\treturn fmt.Sprintf(\"client.%s\", c.Uid)\n}", "func (t *ScheduledTask) Key() string {\n\treturn fmt.Sprintf(taskKeyFormat, keyPrefixScheduled, t.ID, t.score)\n}", "func CVCKey(cvc *apisv1.CStorVolumeConfig) string {\n\treturn fmt.Sprintf(\"%s/%s\", cvc.Namespace, cvc.Name)\n}", "func GenerateContainerInterfaceKey(containerID string) string {\n\treturn fmt.Sprintf(\"container/%s\", containerID)\n}", "func (a DataNodeKV) Key() string {\n\treturn a.K\n}", "func (sa ServiceAccount) Key() string {\n\treturn fmt.Sprintf(\"%s/%s\", sa.Namespace, sa.Name)\n}", "func (o ClusterNodeGroupOptionsOutput) KeyName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ClusterNodeGroupOptions) *string { return v.KeyName }).(pulumi.StringPtrOutput)\n}", "func (t *RetryTask) Key() string {\n\treturn fmt.Sprintf(taskKeyFormat, keyPrefixRetry, t.ID, t.score)\n}", "func ConstructKey(height uint64) hash.Hash160 {\n\theightInBytes := byteutil.Uint64ToBytes(height)\n\tk := []byte(CandidatesPrefix)\n\tk = append(k, heightInBytes...)\n\treturn hash.Hash160b(k)\n}", "func (o DrsVmOverrideOutput) ComputeClusterId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DrsVmOverride) pulumi.StringOutput { return v.ComputeClusterId }).(pulumi.StringOutput)\n}", "func (t *ArchivedTask) Key() string {\n\treturn fmt.Sprintf(taskKeyFormat, keyPrefixArchived, t.ID, t.score)\n}", "func (plugin *ExamplePlugin) etcdKey(label string) string {\n\treturn \"/vnf-agent/\" + plugin.ServiceLabel.GetAgentLabel() + \"/api/v1/example/db/simple/\" + label\n}", "func (o GetKubernetesClusterKubeAdminConfigOutput) ClientKey() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetKubernetesClusterKubeAdminConfig) string { return v.ClientKey }).(pulumi.StringOutput)\n}", "func keyForTemplate(namespace, tmplName string) string {\n\treturn namespace + \"/\" + tmplName\n}", "func Key(typ, name, namespace string) string {\n\treturn fmt.Sprintf(\"%s/%s/%s\", typ, namespace, name)\n}", "func (o AnomalyMonitorResourceTagOutput) Key() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AnomalyMonitorResourceTag) string { return v.Key }).(pulumi.StringOutput)\n}", "func KeyLabel() (string, error) {\n\treturn keyLabel()\n}", "func (o KubernetesNodePoolOutput) ClusterId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *KubernetesNodePool) pulumi.StringOutput { return v.ClusterId }).(pulumi.StringOutput)\n}", "func GenerateCreateClusterInput(cr *svcapitypes.Cluster) *svcsdk.CreateClusterInput {\n\tres := &svcsdk.CreateClusterInput{}\n\n\tif cr.Spec.ForProvider.CapacityProviders != nil {\n\t\tf0 := []*string{}\n\t\tfor _, f0iter := range cr.Spec.ForProvider.CapacityProviders {\n\t\t\tvar f0elem string\n\t\t\tf0elem = *f0iter\n\t\t\tf0 = append(f0, &f0elem)\n\t\t}\n\t\tres.SetCapacityProviders(f0)\n\t}\n\tif cr.Spec.ForProvider.ClusterName != nil {\n\t\tres.SetClusterName(*cr.Spec.ForProvider.ClusterName)\n\t}\n\tif cr.Spec.ForProvider.Configuration != nil {\n\t\tf2 := &svcsdk.ClusterConfiguration{}\n\t\tif cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration != nil {\n\t\t\tf2f0 := &svcsdk.ExecuteCommandConfiguration{}\n\t\t\tif cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.KMSKeyID != nil {\n\t\t\t\tf2f0.SetKmsKeyId(*cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.KMSKeyID)\n\t\t\t}\n\t\t\tif cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration != nil {\n\t\t\t\tf2f0f1 := &svcsdk.ExecuteCommandLogConfiguration{}\n\t\t\t\tif cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.CloudWatchEncryptionEnabled != nil {\n\t\t\t\t\tf2f0f1.SetCloudWatchEncryptionEnabled(*cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.CloudWatchEncryptionEnabled)\n\t\t\t\t}\n\t\t\t\tif cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.CloudWatchLogGroupName != nil {\n\t\t\t\t\tf2f0f1.SetCloudWatchLogGroupName(*cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.CloudWatchLogGroupName)\n\t\t\t\t}\n\t\t\t\tif cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.S3BucketName != nil {\n\t\t\t\t\tf2f0f1.SetS3BucketName(*cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.S3BucketName)\n\t\t\t\t}\n\t\t\t\tif cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.S3EncryptionEnabled != nil {\n\t\t\t\t\tf2f0f1.SetS3EncryptionEnabled(*cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.S3EncryptionEnabled)\n\t\t\t\t}\n\t\t\t\tif cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.S3KeyPrefix != nil {\n\t\t\t\t\tf2f0f1.SetS3KeyPrefix(*cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.LogConfiguration.S3KeyPrefix)\n\t\t\t\t}\n\t\t\t\tf2f0.SetLogConfiguration(f2f0f1)\n\t\t\t}\n\t\t\tif cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.Logging != nil {\n\t\t\t\tf2f0.SetLogging(*cr.Spec.ForProvider.Configuration.ExecuteCommandConfiguration.Logging)\n\t\t\t}\n\t\t\tf2.SetExecuteCommandConfiguration(f2f0)\n\t\t}\n\t\tres.SetConfiguration(f2)\n\t}\n\tif cr.Spec.ForProvider.DefaultCapacityProviderStrategy != nil {\n\t\tf3 := []*svcsdk.CapacityProviderStrategyItem{}\n\t\tfor _, f3iter := range cr.Spec.ForProvider.DefaultCapacityProviderStrategy {\n\t\t\tf3elem := &svcsdk.CapacityProviderStrategyItem{}\n\t\t\tif f3iter.Base != nil {\n\t\t\t\tf3elem.SetBase(*f3iter.Base)\n\t\t\t}\n\t\t\tif f3iter.CapacityProvider != nil {\n\t\t\t\tf3elem.SetCapacityProvider(*f3iter.CapacityProvider)\n\t\t\t}\n\t\t\tif f3iter.Weight != nil {\n\t\t\t\tf3elem.SetWeight(*f3iter.Weight)\n\t\t\t}\n\t\t\tf3 = append(f3, f3elem)\n\t\t}\n\t\tres.SetDefaultCapacityProviderStrategy(f3)\n\t}\n\tif cr.Spec.ForProvider.Settings != nil {\n\t\tf4 := []*svcsdk.ClusterSetting{}\n\t\tfor _, f4iter := range cr.Spec.ForProvider.Settings {\n\t\t\tf4elem := &svcsdk.ClusterSetting{}\n\t\t\tif f4iter.Name != nil {\n\t\t\t\tf4elem.SetName(*f4iter.Name)\n\t\t\t}\n\t\t\tif f4iter.Value != nil {\n\t\t\t\tf4elem.SetValue(*f4iter.Value)\n\t\t\t}\n\t\t\tf4 = append(f4, f4elem)\n\t\t}\n\t\tres.SetSettings(f4)\n\t}\n\tif cr.Spec.ForProvider.Tags != nil {\n\t\tf5 := []*svcsdk.Tag{}\n\t\tfor _, f5iter := range cr.Spec.ForProvider.Tags {\n\t\t\tf5elem := &svcsdk.Tag{}\n\t\t\tif f5iter.Key != nil {\n\t\t\t\tf5elem.SetKey(*f5iter.Key)\n\t\t\t}\n\t\t\tif f5iter.Value != nil {\n\t\t\t\tf5elem.SetValue(*f5iter.Value)\n\t\t\t}\n\t\t\tf5 = append(f5, f5elem)\n\t\t}\n\t\tres.SetTags(f5)\n\t}\n\n\treturn res\n}", "func generateObjectKey(threadIndex int, payloadSize uint64) string {\n\tvar key string\n\tkeyHash := sha1.Sum([]byte(fmt.Sprintf(\"%03d-%012d\", threadIndex, payloadSize)))\n\tfolder := strconv.Itoa(int(payloadSize))\n\tkey = folder + \"/\" + (fmt.Sprintf(\"%x\", keyHash))\n\treturn key\n}", "func (k Key) KeyID() string {\n\treturn k.kid\n}", "func objectKey(meta api.ObjectMeta) string {\n\treturn fmt.Sprintf(\"%s|%s\", meta.Tenant, meta.Name)\n}", "func stringKeyFunc(obj interface{}) (string, error) {\n\tkey := obj.(*nodeidentity.Info).InstanceID\n\treturn key, nil\n}", "func (o AppV2Output) ClusterId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *AppV2) pulumi.StringOutput { return v.ClusterId }).(pulumi.StringOutput)\n}", "func NewKey(instanceNamePrefix digest.InstanceName, platform *remoteexecution.Platform) (Key, error) {\n\t// Ensure that the platform properties are in normal form.\n\tif platform == nil {\n\t\tplatform = &remoteexecution.Platform{}\n\t}\n\n\t// REv2 requires that platform properties are lexicographically\n\t// sorted by name and value.\n\tproperties := platform.Properties\n\tfor i := 1; i < len(properties); i++ {\n\t\tif properties[i-1].Name > properties[i].Name ||\n\t\t\t(properties[i-1].Name == properties[i].Name &&\n\t\t\t\tproperties[i-1].Value >= properties[i].Value) {\n\t\t\tmarshaler := protojson.MarshalOptions{}\n\t\t\treturn Key{}, status.Errorf(codes.InvalidArgument, \"Platform properties are not lexicographically sorted, as property %s should have been placed before property %s\", marshaler.Format(properties[i]), marshaler.Format(properties[i-1]))\n\t\t}\n\t}\n\n\t// TODO: Switch to protojson.Marshal(). We don't want to use it\n\t// right now, as that will cause Prometheus metrics labels to\n\t// become non-deterministic. protojson.Marshal() injects random\n\t// whitespace into its output.\n\tmarshaler := jsonpb.Marshaler{}\n\tplatformString, err := marshaler.MarshalToString(platform)\n\tif err != nil {\n\t\treturn Key{}, util.StatusWrapWithCode(err, codes.InvalidArgument, \"Failed to marshal platform message\")\n\t}\n\treturn Key{\n\t\tinstanceNamePrefix: instanceNamePrefix,\n\t\tplatform: platformString,\n\t}, nil\n}", "func (o Iperf3SpecClientConfigurationPodSchedulingAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsOutput) Key() pulumi.StringOutput {\n\treturn o.ApplyT(func(v Iperf3SpecClientConfigurationPodSchedulingAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions) string {\n\t\treturn v.Key\n\t}).(pulumi.StringOutput)\n}", "func createKey(name string, version string, nodeID string) string {\n\treturn fmt.Sprintf(\"%s:%s:%s\", nodeID, name, version)\n}", "func (o HostedZoneTagOutput) Key() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HostedZoneTag) string { return v.Key }).(pulumi.StringOutput)\n}", "func (o InferenceClusterOutput) KubernetesClusterId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *InferenceCluster) pulumi.StringOutput { return v.KubernetesClusterId }).(pulumi.StringOutput)\n}", "func (metric DomainMetric) Key() string {\n\treturn metric.UUIDandVersion.UUID.String()\n}", "func (o OceanLaunchSpecElasticIpPoolTagSelectorPtrOutput) TagKey() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *OceanLaunchSpecElasticIpPoolTagSelector) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.TagKey\n\t}).(pulumi.StringPtrOutput)\n}", "func routeKey(route *routev1.Route) ServiceAliasConfigKey {\n\treturn routeKeyFromParts(route.Namespace, route.Name)\n}", "func (p *pvc) key() pvcKey {\n\treturn newPVCKey(p.Cluster, p.Namespace, p.Name)\n}", "func Key(namespaceOrName string, nameOpt ...string) client.ObjectKey {\n\tnamespace, name := nameAndNamespace(namespaceOrName, nameOpt...)\n\treturn client.ObjectKey{Namespace: namespace, Name: name}\n}", "func (cmr *ConsumerMetadataRequest) Key() int16 {\n\treturn 10\n}", "func (o ClusterOutput) ClusterId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Cluster) pulumi.StringOutput { return v.ClusterId }).(pulumi.StringOutput)\n}", "func (o ManagedScalingPolicyOutput) ClusterId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ManagedScalingPolicy) pulumi.StringOutput { return v.ClusterId }).(pulumi.StringOutput)\n}", "func (o *ObjectLabelSelector) Key() ObjectLabelSelectorKey {\n\tk := fmt.Sprintf(\"%s\\\\%s\\\\%s\\\\%s\", o.Group, o.Kind, o.Namespace, o.Selector)\n\treturn ObjectLabelSelectorKey(k)\n}", "func CacheKeyUID(metadata *metav1.ObjectMeta) string {\n\treturn fmt.Sprintf(\"%v\", metadata.UID)\n}", "func (o Iperf3SpecClientConfigurationPodSchedulingAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsOutput) Key() pulumi.StringOutput {\n\treturn o.ApplyT(func(v Iperf3SpecClientConfigurationPodSchedulingAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions) string {\n\t\treturn v.Key\n\t}).(pulumi.StringOutput)\n}", "func RouteKey(dstNetwork, outgoingInterface string) string {\n\treturn models.Key(&Route{\n\t\tDstNetwork: dstNetwork,\n\t\tOutgoingInterface: outgoingInterface,\n\t})\n}", "func (certIdentityRequest *CertificateIdentityRequest) GetClusterID() string {\n\treturn certIdentityRequest.ClusterID\n}", "func (o Iperf3SpecClientConfigurationPodSchedulingAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFieldsOutput) Key() pulumi.StringOutput {\n\treturn o.ApplyT(func(v Iperf3SpecClientConfigurationPodSchedulingAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields) string {\n\t\treturn v.Key\n\t}).(pulumi.StringOutput)\n}", "func (saIdentityRequest *ServiceAccountIdentityRequest) GetClusterID() string {\n\treturn saIdentityRequest.ClusterID\n}", "func generateContractIdMapKey(contractId string) string {\n return contractIdPrefix + contractId\n}", "func (o ThingGroupTagOutput) Key() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ThingGroupTag) string { return v.Key }).(pulumi.StringOutput)\n}", "func NewKey(ctx *pulumi.Context,\n\tname string, args *KeyArgs, opts ...pulumi.ResourceOption) (*Key, error) {\n\tif args == nil {\n\t\targs = &KeyArgs{}\n\t}\n\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"location\",\n\t\t\"project\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Key\n\terr := ctx.RegisterResource(\"google-native:apikeys/v2:Key\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (o BillingGroupTagOutput) Key() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BillingGroupTag) string { return v.Key }).(pulumi.StringOutput)\n}", "func (aih AppAndImageToHash) Key() string {\n\tif aih.PurgeCounter == 0 {\n\t\treturn fmt.Sprintf(\"%s.%s\", aih.AppUUID.String(), aih.ImageID.String())\n\t} else {\n\t\treturn fmt.Sprintf(\"%s.%s.%d\", aih.AppUUID.String(), aih.ImageID.String(), aih.PurgeCounter)\n\t}\n}", "func (o RoleAliasTagOutput) Key() pulumi.StringOutput {\n\treturn o.ApplyT(func(v RoleAliasTag) string { return v.Key }).(pulumi.StringOutput)\n}", "func generateKey(cID, taskID string) (string, error) {\n\tif cutil.IsEmptyStr(cID) {\n\t\treturn \"\", errors.Wrapf(errorType.ErrEmptyValue, \"cID\")\n\t}\n\n\tif cutil.IsEmptyStr(taskID) {\n\t\treturn \"\", errors.Wrapf(errorType.ErrEmptyValue, \"taskID\")\n\t}\n\n\treturn fmt.Sprintf(\"%s%s%s\", cID, \"@\", taskID), nil\n}", "func (instance *DSInstance) NewKey(kind string, name string, parent *datastore.Key) *datastore.Key {\n\tkey := datastore.NameKey(kind, name, parent)\n\tkey.Namespace = instance.namespace\n\treturn key\n}", "func (t *PendingTask) Key() string {\n\t// Note: Pending tasks are stored in redis LIST, therefore no score.\n\t// Use zero for the score to use the same key format.\n\treturn fmt.Sprintf(taskKeyFormat, keyPrefixPending, t.ID, 0)\n}", "func (o DataSourceRedshiftParametersOutput) ClusterId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DataSourceRedshiftParameters) *string { return v.ClusterId }).(pulumi.StringPtrOutput)\n}", "func (r *templateRouter) routeKey(route *routeapi.Route) string {\n\treturn fmt.Sprintf(\"%s-%s\", route.Namespace, route.Name)\n}", "func GetClusterId() string {\n\treturn axClusterId\n}" ]
[ "0.6541917", "0.6501025", "0.61764675", "0.5924296", "0.5737486", "0.5630355", "0.55596966", "0.5547573", "0.54778725", "0.54484475", "0.53929687", "0.5359112", "0.53568804", "0.5336668", "0.5244287", "0.52346826", "0.5230897", "0.5210514", "0.52006227", "0.51887405", "0.5183306", "0.51534367", "0.51363564", "0.5134275", "0.5124106", "0.51228744", "0.5115372", "0.50874937", "0.50759727", "0.5075446", "0.5051748", "0.50508964", "0.50277627", "0.50221163", "0.50151294", "0.5014886", "0.5014637", "0.5001506", "0.49970675", "0.49728182", "0.49698067", "0.49439362", "0.49375695", "0.49269202", "0.4924427", "0.49120113", "0.49003506", "0.48991865", "0.48874405", "0.48800585", "0.4875417", "0.48740634", "0.48731613", "0.4864895", "0.48585463", "0.4858461", "0.48473582", "0.48441648", "0.48432058", "0.48246565", "0.4824545", "0.4821306", "0.4820184", "0.4818894", "0.481042", "0.4806705", "0.4805335", "0.47993174", "0.4798968", "0.47977298", "0.4793298", "0.47910005", "0.47905588", "0.47893688", "0.478758", "0.47816554", "0.47749725", "0.47743902", "0.47738248", "0.47586298", "0.47563767", "0.47540548", "0.47504988", "0.47494718", "0.47493052", "0.47427452", "0.4740542", "0.4739703", "0.4736657", "0.47344762", "0.47328416", "0.4731275", "0.47294164", "0.47286984", "0.4728043", "0.4727133", "0.47174063", "0.47156185", "0.47132772", "0.47131708" ]
0.7726849
0
Build builds tags including the cluster tag and returns them in map form.
func Build(params BuildParams) Labels { tags := make(Labels) for k, v := range params.Additional { tags[strings.ToLower(k)] = strings.ToLower(v) } tags[ClusterTagKey(params.ClusterName)] = string(params.Lifecycle) if params.Role != nil { tags[NameGCPClusterAPIRole] = strings.ToLower(*params.Role) } return tags }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func buildTags() string {\n\treturn *tags\n}", "func (src *prometheusMetricsSource) buildTags(m *dto.Metric) map[string]string {\n\tresult := map[string]string{}\n\tfor k, v := range src.tags {\n\t\tif len(v) > 0 {\n\t\t\tresult[k] = v\n\t\t}\n\t}\n\tfor _, lp := range m.Label {\n\t\tif len(lp.GetValue()) > 0 {\n\t\t\tresult[lp.GetName()] = lp.GetValue()\n\t\t}\n\t}\n\treturn result\n}", "func (r *Cluster) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func (r *Cluster) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func (t *SentryTaggedStruct) BuildTags(v interface{}) (tags map[string]string, err error) {\n\titems := make(map[string]string)\n\tfor prop, name := range t.Tags {\n\t\tif _, value, e := t.GetProperty(v, prop); e == nil {\n\t\t\titems[name] = value\n\t\t} else {\n\t\t\terr = e\n\t\t\treturn\n\t\t}\n\t}\n\n\ttags = items\n\treturn\n}", "func (t *SentryTaggedScalar) BuildTags(v interface{}) (items map[string]string, err error) {\n\titems = make(map[string]string)\n\tif value, e := t.Get(v); e == nil {\n\t\titems[t.Name] = value\n\t} else {\n\t\terr = e\n\t}\n\treturn\n}", "func (o ClusterOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Cluster) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o InferenceClusterOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *InferenceCluster) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func makeTags(ctx context.Context, vsc *vsClient, cfgTags map[string][]string) error {\n\n\tfor t := range cfgTags {\n\t\tpreDesc := \"Hardware configuration for \"\n\t\tpreName := \"config.hardware.\"\n\n\t\tif t == \"memoryHotAddEnabled\" || t == \"cpuHotRemoveEnabled\" || t == \"cpuHotAddEnabled\" {\n\t\t\tpreDesc = \"Configuration for \"\n\t\t\tpreName = \"config.\"\n\t\t}\n\n\t\tcID, err := vsc.tagManager.CreateCategory(ctx, &tags.Category{\n\t\t\tAssociableTypes: []string{\"VirtualMachine\"},\n\t\t\tCardinality: \"SINGLE\",\n\t\t\tDescription: preDesc,\n\t\t\tName: preName + t,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, p := range cfgTags[t] {\n\t\t\tfmt.Println(\"Creating \" + p + \" tag for \" + t)\n\t\t\t_, err := vsc.tagManager.CreateTag(ctx, &tags.Tag{\n\t\t\t\tCategoryID: cID,\n\t\t\t\tDescription: \"Preset for \" + t + \" configuration\",\n\t\t\t\tName: p,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (metadata *GCPMetadata) TagMap() map[string]string {\n\ttagMap := map[string]string{}\n\tif metadata.ContainerID != nil {\n\t\ttagMap[metadata.ContainerID.TagName] = metadata.ContainerID.Value\n\t}\n\tif metadata.Region != nil {\n\t\ttagMap[metadata.Region.TagName] = metadata.Region.Value\n\t}\n\tif metadata.ProjectID != nil {\n\t\ttagMap[metadata.ProjectID.TagName] = metadata.ProjectID.Value\n\t}\n\treturn tagMap\n}", "func (b *ClusterBuilder) Build() (object *Cluster, err error) {\n\tobject = new(Cluster)\n\tobject.id = b.id\n\tobject.href = b.href\n\tobject.bitmap_ = b.bitmap_\n\tif b.api != nil {\n\t\tobject.api, err = b.api.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif b.aws != nil {\n\t\tobject.aws, err = b.aws.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif b.awsInfrastructureAccessRoleGrants != nil {\n\t\tobject.awsInfrastructureAccessRoleGrants, err = b.awsInfrastructureAccessRoleGrants.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif b.ccs != nil {\n\t\tobject.ccs, err = b.ccs.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif b.dns != nil {\n\t\tobject.dns, err = b.dns.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif b.gcp != nil {\n\t\tobject.gcp, err = b.gcp.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif b.addons != nil {\n\t\tobject.addons, err = b.addons.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tobject.billingModel = b.billingModel\n\tif b.cloudProvider != nil {\n\t\tobject.cloudProvider, err = b.cloudProvider.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif b.console != nil {\n\t\tobject.console, err = b.console.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tobject.creationTimestamp = b.creationTimestamp\n\tobject.disableUserWorkloadMonitoring = b.disableUserWorkloadMonitoring\n\tobject.displayName = b.displayName\n\tobject.etcdEncryption = b.etcdEncryption\n\tobject.expirationTimestamp = b.expirationTimestamp\n\tobject.externalID = b.externalID\n\tif b.externalConfiguration != nil {\n\t\tobject.externalConfiguration, err = b.externalConfiguration.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif b.flavour != nil {\n\t\tobject.flavour, err = b.flavour.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif b.groups != nil {\n\t\tobject.groups, err = b.groups.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tobject.healthState = b.healthState\n\tif b.identityProviders != nil {\n\t\tobject.identityProviders, err = b.identityProviders.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif b.ingresses != nil {\n\t\tobject.ingresses, err = b.ingresses.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tobject.loadBalancerQuota = b.loadBalancerQuota\n\tif b.machinePools != nil {\n\t\tobject.machinePools, err = b.machinePools.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tobject.managed = b.managed\n\tobject.multiAZ = b.multiAZ\n\tobject.name = b.name\n\tif b.network != nil {\n\t\tobject.network, err = b.network.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif b.nodeDrainGracePeriod != nil {\n\t\tobject.nodeDrainGracePeriod, err = b.nodeDrainGracePeriod.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif b.nodes != nil {\n\t\tobject.nodes, err = b.nodes.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tobject.openshiftVersion = b.openshiftVersion\n\tif b.product != nil {\n\t\tobject.product, err = b.product.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif b.properties != nil {\n\t\tobject.properties = make(map[string]string)\n\t\tfor k, v := range b.properties {\n\t\t\tobject.properties[k] = v\n\t\t}\n\t}\n\tif b.provisionShard != nil {\n\t\tobject.provisionShard, err = b.provisionShard.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif b.region != nil {\n\t\tobject.region, err = b.region.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tobject.state = b.state\n\tif b.status != nil {\n\t\tobject.status, err = b.status.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif b.storageQuota != nil {\n\t\tobject.storageQuota, err = b.storageQuota.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif b.subscription != nil {\n\t\tobject.subscription, err = b.subscription.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif b.version != nil {\n\t\tobject.version, err = b.version.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}", "func (o ClusterInstanceOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *ClusterInstance) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o TriggerBuildOutput) Tags() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v TriggerBuild) []string { return v.Tags }).(pulumi.StringArrayOutput)\n}", "func (o EciScalingConfigurationOutput) Tags() pulumi.MapOutput {\n\treturn o.ApplyT(func(v *EciScalingConfiguration) pulumi.MapOutput { return v.Tags }).(pulumi.MapOutput)\n}", "func (o EcsLaunchTemplateOutput) Tags() pulumi.MapOutput {\n\treturn o.ApplyT(func(v *EcsLaunchTemplate) pulumi.MapOutput { return v.Tags }).(pulumi.MapOutput)\n}", "func (o BucketOutput) Tags() pulumi.MapOutput {\n\treturn o.ApplyT(func(v *Bucket) pulumi.MapOutput { return v.Tags }).(pulumi.MapOutput)\n}", "func (r *Distribution) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func (b *ServiceClusterBuilder) Build() (object *ServiceCluster, err error) {\n\tobject = new(ServiceCluster)\n\tobject.id = b.id\n\tobject.href = b.href\n\tobject.bitmap_ = b.bitmap_\n\tif b.dns != nil {\n\t\tobject.dns, err = b.dns.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tobject.cloudProvider = b.cloudProvider\n\tif b.clusterManagementReference != nil {\n\t\tobject.clusterManagementReference, err = b.clusterManagementReference.Build()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tobject.name = b.name\n\tobject.region = b.region\n\tobject.status = b.status\n\treturn\n}", "func PackTags(in map[string]string) string {\n\ttags := []string{}\n\n\tfor k, v := range in {\n\t\ttags = append(tags, fmt.Sprintf(\"%s:%s\", k, v))\n\t}\n\n\tsort.Strings(tags)\n\n\treturn strings.Join(tags, \",\")\n}", "func (o GetServerGroupsGroupOutput) Tags() pulumi.MapOutput {\n\treturn o.ApplyT(func(v GetServerGroupsGroup) map[string]interface{} { return v.Tags }).(pulumi.MapOutput)\n}", "func (r *ResourceGroup) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func (r *LogGroup) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func buildLabels(params CRDCreationParameters) (map[string]string, error) {\n\tlabels := map[string]string{}\n\tlabels[tekton.LabelOwner] = params.GitInfo.Organisation\n\tlabels[tekton.LabelRepo] = params.GitInfo.Name\n\tlabels[tekton.LabelBranch] = params.BranchIdentifier\n\tif params.Context != \"\" {\n\t\tlabels[tekton.LabelContext] = params.Context\n\t}\n\tlabels[tekton.LabelBuild] = params.BuildNumber\n\n\t// add any custom labels\n\tcustomLabels, err := util.ExtractKeyValuePairs(params.Labels, \"=\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn util.MergeMaps(labels, customLabels), nil\n}", "func (b *ByNameCategorizerBuilder) Build() Categorizer {\n\tbyNameMap := make(map[string]fin.Cat)\n\tfor k, v := range b.trainingData {\n\t\tif v.cat != fin.Expense {\n\t\t\tbyNameMap[k] = v.cat\n\t\t}\n\t}\n\treturn byNameCategorizer(byNameMap)\n}", "func (o ClusterOutput) TagsAll() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Cluster) pulumi.StringMapOutput { return v.TagsAll }).(pulumi.StringMapOutput)\n}", "func buildAwsTags(configTags []types.Tag, defaultName string) ([]*ec2.Tag, string) {\n\ttags := []*ec2.Tag{}\n\tvar nameSpecified bool\n\tname := defaultName\n\n\tfor _, tag := range configTags {\n\t\ttags = append(tags, &ec2.Tag{Key: aws.String(tag.Key), Value: aws.String(tag.Value)})\n\t\tif tag.Key == \"Name\" {\n\t\t\tnameSpecified = true\n\t\t\tname = tag.Value\n\t\t}\n\t}\n\n\tif !nameSpecified {\n\t\ttags = append(tags, &ec2.Tag{\n\t\t\tKey: aws.String(\"Name\"),\n\t\t\tValue: aws.String(name),\n\t\t})\n\t}\n\n\ttags = append(tags, &ec2.Tag{\n\t\tKey: aws.String(\"CreatedBy\"),\n\t\tValue: aws.String(\"ops\"),\n\t})\n\n\treturn tags, name\n}", "func (t *Tag) Build(w io.Writer) error {\n\tif err := t.start(w); err != nil {\n\t\treturn err\n\t}\n\tif !t.nochildren {\n\t\tfor _, c := range t.children {\n\t\t\tc.level = t.level + 1\n\t\t\tif err := c.Build(w); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tif err := t.end(w); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (point *Point) GetTags(noarray bool, separator string) map[string]string {\n\ttags := map[string]string{}\n\ttags[\"vcenter\"] = point.VCenter\n\ttags[\"type\"] = point.ObjectType\n\ttags[\"name\"] = point.ObjectName\n\tif noarray {\n\t\tif len(point.Datastore) > 0 {\n\t\t\ttags[\"datastore\"] = point.Datastore[0]\n\t\t}\n\t} else {\n\t\tif len(point.Datastore) > 0 {\n\t\t\ttags[\"datastore\"] = strings.Join(point.Datastore, separator)\n\t\t}\n\t}\n\tif noarray {\n\t\tif len(point.Network) > 0 {\n\t\t\ttags[\"network\"] = point.Network[0]\n\t\t}\n\t} else {\n\t\tif len(point.Network) > 0 {\n\t\t\ttags[\"network\"] = strings.Join(point.Network, separator)\n\t\t}\n\t}\n\tif len(point.ESXi) > 0 {\n\t\ttags[\"host\"] = point.ESXi\n\t}\n\tif len(point.Cluster) > 0 {\n\t\ttags[\"cluster\"] = point.Cluster\n\t}\n\tif len(point.Instance) > 0 {\n\t\ttags[\"instance\"] = point.Instance\n\t}\n\tif len(point.ResourcePool) > 0 {\n\t\ttags[\"resourcepool\"] = point.ResourcePool\n\t}\n\tif len(point.Folder) > 0 {\n\t\ttags[\"folder\"] = point.Folder\n\t}\n\tif noarray {\n\t\tif len(point.ViTags) > 0 {\n\t\t\ttags[\"vitags\"] = point.ViTags[0]\n\t\t}\n\t} else {\n\t\tif len(point.ViTags) > 0 {\n\t\t\ttags[\"vitags\"] = strings.Join(point.ViTags, separator)\n\t\t}\n\t}\n\t/*\n\t\tif point.NumCPU != 0 {\n\t\t\ttags[\"numcpu\"] = strconv.FormatInt(int64(point.NumCPU), 10)\n\t\t}\n\t\tif point.MemorySizeMB != 0 {\n\t\t\ttags[\"memorysizemb\"] = strconv.FormatInt(int64(point.MemorySizeMB), 10)\n\t\t}\n\t*/\n\treturn tags\n}", "func (r *Bucket) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func (o RegistryGeoreplicationOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v RegistryGeoreplication) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (r *Document) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func BuildTagMapping(structTemplatePointer interface{}, mappedKeyTag string, resultExclusionTag string, inheritKeyFromField bool, convertKeyToLowerCase bool, tags []string) map[string](map[string]string) {\n\treflectStructType := DiscoverTypeByKind(structTemplatePointer, reflect.Struct)\n\tvar result = make(map[string]map[string]string)\n\tvar anonymousMappings = make(map[string]map[string]string)\n\n\tfor i := 0; i < reflectStructType.NumField(); i++ {\n\t\tvar field reflect.StructField\n\t\tfield = reflectStructType.Field(i)\n\t\tkey := getTagValues(field, mappedKeyTag)\n\n\t\tif field.Anonymous && key == \"\" {\n\t\t\tvar anonymousType = DereferenceType(field.Type)\n\t\t\tif anonymousType.Kind() == reflect.Struct {\n\t\t\t\tanonymousMapping := BuildTagMapping(reflect.New(anonymousType).Interface(), mappedKeyTag, resultExclusionTag, inheritKeyFromField, convertKeyToLowerCase, tags)\n\t\t\t\tfor k, v := range anonymousMapping {\n\t\t\t\t\tanonymousMappings[k] = v\n\t\t\t\t\tanonymousMappings[k][anonymousKey] = \"true\"\n\t\t\t\t\tanonymousMappings[k][fieldIndexKey] = AsString(i)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tisTransient := strings.EqualFold(field.Tag.Get(resultExclusionTag), \"true\")\n\t\tif isTransient {\n\t\t\tcontinue\n\t\t}\n\n\t\tif key == \"\" {\n\t\t\tif !inheritKeyFromField {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tkey = field.Name\n\t\t}\n\n\t\tif convertKeyToLowerCase {\n\t\t\tkey = strings.ToLower(key)\n\t\t}\n\n\t\tresult[key] = make(map[string]string)\n\t\tfor _, tag := range tags {\n\t\t\ttagValue := field.Tag.Get(tag)\n\t\t\tif len(tagValue) > 0 {\n\t\t\t\tresult[key][tag] = tagValue\n\t\t\t}\n\t\t}\n\t\tresult[key][fieldNameKey] = field.Name\n\t}\n\n\tfor k, v := range anonymousMappings {\n\t\tif _, has := result[k]; !has {\n\t\t\tresult[k] = v\n\t\t}\n\t}\n\treturn result\n}", "func (o GetClustersClusterOutput) Tags() GetClustersClusterTagArrayOutput {\n\treturn o.ApplyT(func(v GetClustersCluster) []GetClustersClusterTag { return v.Tags }).(GetClustersClusterTagArrayOutput)\n}", "func (c *NodeImageBuildContext) Build() (err error) {\n\t// ensure kubernetes build is up to date first\n\tlog.Infof(\"Starting to build Kubernetes\")\n\tif err = c.Bits.Build(); err != nil {\n\t\tlog.Errorf(\"Failed to build Kubernetes: %v\", err)\n\t\treturn errors.Wrap(err, \"failed to build kubernetes\")\n\t}\n\tlog.Infof(\"Finished building Kubernetes\")\n\n\t// create tempdir to build the image in\n\tbuildDir, err := TempDir(\"\", \"kind-node-image\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.RemoveAll(buildDir)\n\n\tlog.Infof(\"Building node image in: %s\", buildDir)\n\n\t// populate the kubernetes artifacts first\n\tif err := c.populateBits(buildDir); err != nil {\n\t\treturn err\n\t}\n\n\t// then the perform the actual docker image build\n\treturn c.buildImage(buildDir)\n}", "func (o ClusterNodeGroupOptionsOutput) CloudFormationTags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v ClusterNodeGroupOptions) map[string]string { return v.CloudFormationTags }).(pulumi.StringMapOutput)\n}", "func (o NetworkOutput) Tags() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *Network) pulumi.StringArrayOutput { return v.Tags }).(pulumi.StringArrayOutput)\n}", "func (sm *SpecMore) TagsMap(inclTop, inclOps bool) map[string]int {\n\ttagsMap := map[string]int{}\n\tif inclTop {\n\t\tfor _, tag := range sm.Spec.Tags {\n\t\t\ttagName := strings.TrimSpace(tag.Name)\n\t\t\tif len(tagName) > 0 {\n\t\t\t\tif _, ok := tagsMap[tagName]; !ok {\n\t\t\t\t\ttagsMap[tagName] = 0\n\t\t\t\t}\n\t\t\t\ttagsMap[tagName]++\n\t\t\t}\n\t\t}\n\t}\n\tif inclOps {\n\t\tVisitOperations(sm.Spec, func(skipPath, skipMethod string, op *oas3.Operation) {\n\t\t\tfor _, tagName := range op.Tags {\n\t\t\t\ttagName = strings.TrimSpace(tagName)\n\t\t\t\tif len(tagName) > 0 {\n\t\t\t\t\tif _, ok := tagsMap[tagName]; !ok {\n\t\t\t\t\t\ttagsMap[tagName] = 0\n\t\t\t\t\t}\n\t\t\t\t\ttagsMap[tagName]++\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n\treturn tagsMap\n}", "func BuildParamsToTagSpecification(ec2ResourceType string, params infrav1.BuildParams) *ec2.TagSpecification {\n\ttags := infrav1.Build(params)\n\n\ttagSpec := &ec2.TagSpecification{ResourceType: aws.String(ec2ResourceType)}\n\n\t// For testing, we need sorted keys\n\tsortedKeys := make([]string, 0, len(tags))\n\tfor k := range tags {\n\t\tsortedKeys = append(sortedKeys, k)\n\t}\n\n\tsort.Strings(sortedKeys)\n\n\tfor _, key := range sortedKeys {\n\t\ttagSpec.Tags = append(tagSpec.Tags, &ec2.Tag{\n\t\t\tKey: aws.String(key),\n\t\t\tValue: aws.String(tags[key]),\n\t\t})\n\t}\n\n\treturn tagSpec\n}", "func (b *ClusterNodesBuilder) Build() (object *ClusterNodes, err error) {\n\tobject = new(ClusterNodes)\n\tobject.compute = b.compute\n\tobject.infra = b.infra\n\tobject.master = b.master\n\tobject.total = b.total\n\treturn\n}", "func (o CrawlerOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Crawler) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o LookupNetworkPacketCoreControlPlaneResultOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v LookupNetworkPacketCoreControlPlaneResult) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (m *ccMetric) Tags() map[string]string {\n\treturn m.tags\n}", "func buildLabels(cmdLabels, imageLabels map[string]string) map[string]string {\n\tlabels := make(map[string]string)\n\tfor k, v := range imageLabels {\n\t\tif err := clabels.Validate(k, v); err == nil {\n\t\t\tlabels[k] = v\n\t\t} else {\n\t\t\t// In case the image label is invalid, we output a warning and skip adding it to the\n\t\t\t// container.\n\t\t\tlog.L.WithError(err).Warnf(\"unable to add image label with key %s to the container\", k)\n\t\t}\n\t}\n\t// labels from the command line will override image and the initial image config labels\n\tfor k, v := range cmdLabels {\n\t\tlabels[k] = v\n\t}\n\treturn labels\n}", "func BuildLabelAnnotationProperties(tagMaps []map[string]string) []*proto.EntityDTO_EntityProperty {\n\tvar properties []*proto.EntityDTO_EntityProperty\n\ttagsPropertyNamespace := VCTagsPropertyNamespace\n\n\tfor _, tagMap := range tagMaps {\n\t\tfor key, val := range tagMap {\n\t\t\ttagNamePropertyName := key\n\t\t\ttagNamePropertyValue := val\n\t\t\ttagProperty := &proto.EntityDTO_EntityProperty{\n\t\t\t\tNamespace: &tagsPropertyNamespace,\n\t\t\t\tName: &tagNamePropertyName,\n\t\t\t\tValue: &tagNamePropertyValue,\n\t\t\t}\n\t\t\tproperties = append(properties, tagProperty)\n\t\t}\n\t}\n\n\treturn properties\n}", "func (o TargetGroupOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v TargetGroup) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func buildReleaseMap(instConfig installCfg, cs []metahelm.Chart) metahelm.ReleaseMap {\n\trm := make(map[string]string)\n\tfor _, c := range cs {\n\t\tif _, ok := rm[c.Title]; !ok {\n\t\t\trm[c.Title] = metahelm.ReleaseName(instConfig.releaseNamePrefix + c.Title)\n\t\t}\n\t}\n\treturn rm\n}", "func (Map) Build(vt ValueType, from, to string) *Map {\n\treturn &Map{\n\t\tType: vt,\n\t\tFrom: strings.Split(from, \".\"),\n\t\tTo: strings.Split(to, \".\"),\n\t}\n}", "func GetUntaggedNodes(appSettings appsettings.AppSettings) (nodes2tag []string, err error) {\n\t// creates the in-cluster config\n\tconfig, err := rest.InClusterConfig()\n\tif err != nil {\n\t\treturn nodes2tag, err\n\t}\n\t// creates the clientset\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\tlog.Debugf(\"Getting the in cluster configuration for Kubernetes\")\n\t\treturn nodes2tag, err\n\t}\n\tfor {\n\t\tnodes, err := clientset.CoreV1().Nodes().List(metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\tlog.Debugf(\"Connecting to Kubernetes...\")\n\t\t\treturn nodes2tag, err\n\t\t}\n\t\tlog.Debugf(\"There are %d nodes in the cluster\\n\", len(nodes.Items))\n\n\t\tfor _, node := range nodes.Items {\n\t\t\tlog.Debugf(\"NodeName: %s\", node.Name)\n\t\t\tlog.Debugf(\"Annotations: %v\", node.Annotations)\n\n\t\t\t// not very pretty, this will itirate over annotations of all nodes for all annotations\n\t\t\tfor i := range appSettings.InfrastructureTags {\n\t\t\t\tif node.Annotations[K8sAnnotationDomain+\"/\"+appSettings.InfrastructureTags[i].Key] == \"\" {\n\t\t\t\t\tlog.Debugf(\"Annotation: %v, does not exist on node %s\", appSettings.InfrastructureTags[i].Key, node.Name)\n\t\t\t\t\tlog.Debugf(\"Adding Node %s, to list of nodes to tag\", node.Name)\n\t\t\t\t\tnodes2tag = append(nodes2tag, node.Name)\n\t\t\t\t\tbreak\n\t\t\t\t} else if node.Annotations[K8sAnnotationDomain+\"/\"+appSettings.InfrastructureTags[i].Key] != appSettings.InfrastructureTags[i].Value {\n\t\t\t\t\tlog.Debugf(\"Annotation: %v, value is %s however we expected %s\",\n\t\t\t\t\t\tappSettings.InfrastructureTags[i].Key,\n\t\t\t\t\t\tnode.Annotations[appSettings.InfrastructureTags[i].Key],\n\t\t\t\t\t\tappSettings.InfrastructureTags[i].Value,\n\t\t\t\t\t)\n\t\t\t\t\tlog.Debugf(\"Adding Node %s, to list of nodes to tag\", node.Name)\n\t\t\t\t\tnodes2tag = append(nodes2tag, node.Name)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn nodes2tag, nil\n\t}\n}", "func (o LookupNetworkDataNetworkResultOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v LookupNetworkDataNetworkResult) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o DomainOutput) Tags() pulumi.MapOutput {\n\treturn o.ApplyT(func(v *Domain) pulumi.MapOutput { return v.Tags }).(pulumi.MapOutput)\n}", "func createRunningBuildMap(master *buildbotMaster) buildMap {\n\tresult := buildMap{}\n\tfor _, slave := range master.Slaves {\n\t\tfor _, build := range slave.Runningbuilds {\n\t\t\tresult[builderRef{build.Buildername, build.Number}] = &build\n\t\t}\n\t}\n\treturn result\n}", "func (r *Rule) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func (r *CachesIscsiVolume) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func (o TransitRouterMulticastDomainOutput) Tags() pulumi.MapOutput {\n\treturn o.ApplyT(func(v *TransitRouterMulticastDomain) pulumi.MapOutput { return v.Tags }).(pulumi.MapOutput)\n}", "func (o NetworkInsightsAnalysisOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *NetworkInsightsAnalysis) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (rb *AutoFollowedClusterBuilder) Build() AutoFollowedCluster {\n\treturn *rb.v\n}", "func (o OceanLaunchSpecOutput) Tags() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *OceanLaunchSpec) pulumi.StringArrayOutput { return v.Tags }).(pulumi.StringArrayOutput)\n}", "func (c *ALIModelContext) CloudTagsForInstanceGroup(ig *kops.InstanceGroup) (map[string]string, error) {\n\tlabels := make(map[string]string)\n\n\t// Apply any user-specified global labels first so they can be overridden by IG-specific labels\n\tfor k, v := range c.Cluster.Spec.CloudLabels {\n\t\tlabels[k] = v\n\t}\n\n\t// Apply any user-specified labels\n\tfor k, v := range ig.Spec.CloudLabels {\n\t\tlabels[k] = v\n\t}\n\n\t// Apply labels for cluster autoscaler node labels\n\tfor k, v := range ig.Spec.NodeLabels {\n\t\tlabels[k] = v\n\t}\n\n\t// Apply labels for cluster autoscaler node taints\n\tfor _, v := range ig.Spec.Taints {\n\t\tsplits := strings.SplitN(v, \"=\", 2)\n\t\tif len(splits) > 1 {\n\t\t\tlabels[splits[0]] = splits[1]\n\t\t}\n\t}\n\n\t// The system tags take priority because the cluster likely breaks without them...\n\n\tif ig.Spec.Role == kops.InstanceGroupRoleMaster {\n\t\tlabels[CloudTagInstanceGroupRolePrefix+strings.ToLower(string(kops.InstanceGroupRoleMaster))] = \"1\"\n\t}\n\n\tif ig.Spec.Role == kops.InstanceGroupRoleNode {\n\t\tlabels[CloudTagInstanceGroupRolePrefix+strings.ToLower(string(kops.InstanceGroupRoleNode))] = \"1\"\n\t}\n\n\tif ig.Spec.Role == kops.InstanceGroupRoleBastion {\n\t\tlabels[CloudTagInstanceGroupRolePrefix+strings.ToLower(string(kops.InstanceGroupRoleBastion))] = \"1\"\n\t}\n\n\treturn labels, nil\n}", "func (o KubernetesClusterDefaultNodePoolOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v KubernetesClusterDefaultNodePool) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o CloudConfigurationRuleOutput) Tags() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *CloudConfigurationRule) pulumi.StringArrayOutput { return v.Tags }).(pulumi.StringArrayOutput)\n}", "func (o LookupGroupResultOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v LookupGroupResult) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o KafkaMirrorMakerOutput) Tags() KafkaMirrorMakerTagArrayOutput {\n\treturn o.ApplyT(func(v *KafkaMirrorMaker) KafkaMirrorMakerTagArrayOutput { return v.Tags }).(KafkaMirrorMakerTagArrayOutput)\n}", "func (o LookupVirtualNetworkResultOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v LookupVirtualNetworkResult) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func BuildLabelsMap(name, stat string) map[string]string {\n\tlabels := map[string]string{\n\t\tnameLabel: name,\n\t}\n\tif stat != \"\" {\n\t\tlabels[statLabel] = stat\n\t}\n\treturn labels\n}", "func (b *Builder) Build() ([]*unstructured.Unstructured, error) {\n\tm, err := b.build()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresources, err := m.AsYaml()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"kustomize build failed: %w\", err)\n\t}\n\n\tobjects, err := ssa.ReadObjects(bytes.NewReader(resources))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"kustomize build failed: %w\", err)\n\t}\n\n\tif m := b.kustomization.Spec.CommonMetadata; m != nil {\n\t\tssa.SetCommonMetadata(objects, m.Labels, m.Annotations)\n\t}\n\n\treturn objects, nil\n}", "func (o ReservedInstanceOutput) Tags() pulumi.MapOutput {\n\treturn o.ApplyT(func(v *ReservedInstance) pulumi.MapOutput { return v.Tags }).(pulumi.MapOutput)\n}", "func (o ClusterNodeGroupOptionsPtrOutput) CloudFormationTags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *ClusterNodeGroupOptions) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.CloudFormationTags\n\t}).(pulumi.StringMapOutput)\n}", "func mapToStrings(tagMap map[string]string) []string {\n\ttags := make([]string, 0, len(tagMap))\n\n\tfor k, v := range tagMap {\n\t\ttags = append(tags, buildTag(k, v))\n\t}\n\n\treturn tags\n}", "func (o NodeBalancerOutput) Tags() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *NodeBalancer) pulumi.StringArrayOutput { return v.Tags }).(pulumi.StringArrayOutput)\n}", "func Build(m Moby, w io.Writer, opts BuildOpts) error {\n\tif MobyDir == \"\" {\n\t\tMobyDir = defaultMobyConfigDir()\n\t}\n\n\t// create tmp dir in case needed\n\tif err := os.MkdirAll(filepath.Join(MobyDir, \"tmp\"), 0755); err != nil {\n\t\treturn err\n\t}\n\n\tiw := tar.NewWriter(w)\n\n\t// add additions\n\taddition := additions[opts.BuilderType]\n\n\t// allocate each container a uid, gid that can be referenced by name\n\tidMap := map[string]uint32{}\n\tid := uint32(100)\n\tfor _, image := range m.Onboot {\n\t\tidMap[image.Name] = id\n\t\tid++\n\t}\n\tfor _, image := range m.Onshutdown {\n\t\tidMap[image.Name] = id\n\t\tid++\n\t}\n\tfor _, image := range m.Services {\n\t\tidMap[image.Name] = id\n\t\tid++\n\t}\n\n\t// deduplicate containers with the same image\n\tdupMap := map[string]string{}\n\n\tif m.Kernel.ref != nil {\n\t\t// get kernel and initrd tarball and ucode cpio archive from container\n\t\tlog.Infof(\"Extract kernel image: %s\", m.Kernel.ref)\n\t\tkf := newKernelFilter(iw, m.Kernel.Cmdline, m.Kernel.Binary, m.Kernel.Tar, m.Kernel.UCode, opts.DecompressKernel)\n\t\terr := ImageTar(m.Kernel.ref, \"\", kf, \"\", opts)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to extract kernel image and tarball: %v\", err)\n\t\t}\n\t\terr = kf.Close()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Close error: %v\", err)\n\t\t}\n\t}\n\n\t// convert init images to tarballs\n\tif len(m.Init) != 0 {\n\t\tlog.Infof(\"Add init containers:\")\n\t}\n\tapkTar := newAPKTarWriter(iw)\n\tfor _, ii := range m.initRefs {\n\t\tlog.Infof(\"Process init image: %s\", ii)\n\t\terr := ImageTar(ii, \"\", apkTar, resolvconfSymlink, opts)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to build init tarball from %s: %v\", ii, err)\n\t\t}\n\t}\n\tif err := apkTar.WriteAPKDB(); err != nil {\n\t\treturn err\n\t}\n\n\tif len(m.Onboot) != 0 {\n\t\tlog.Infof(\"Add onboot containers:\")\n\t}\n\tfor i, image := range m.Onboot {\n\t\tso := fmt.Sprintf(\"%03d\", i)\n\t\tif err := outputImage(image, \"onboot\", so+\"-\", m, idMap, dupMap, iw, opts); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(m.Onshutdown) != 0 {\n\t\tlog.Infof(\"Add onshutdown containers:\")\n\t}\n\tfor i, image := range m.Onshutdown {\n\t\tso := fmt.Sprintf(\"%03d\", i)\n\t\tif err := outputImage(image, \"onshutdown\", so+\"-\", m, idMap, dupMap, iw, opts); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(m.Services) != 0 {\n\t\tlog.Infof(\"Add service containers:\")\n\t}\n\tfor _, image := range m.Services {\n\t\tif err := outputImage(image, \"services\", \"\", m, idMap, dupMap, iw, opts); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// add files\n\terr := filesystem(m, iw, idMap)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to add filesystem parts: %v\", err)\n\t}\n\n\t// add anything additional for this output type\n\tif addition != nil {\n\t\terr = addition(iw)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to add additional files: %v\", err)\n\t\t}\n\t}\n\n\terr = iw.Close()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"initrd close error: %v\", err)\n\t}\n\n\treturn nil\n}", "func getBuildLabels(name string) map[string]string {\n\treturn map[string]string{\n\t\t\"build\": name,\n\t\t\"component_cr\": name,\n\t}\n}", "func (r *Portfolio) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func (o ClusterInstanceOutput) TagsAll() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *ClusterInstance) pulumi.StringMapOutput { return v.TagsAll }).(pulumi.StringMapOutput)\n}", "func (o PlacementGroupOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *PlacementGroup) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (c *AzureModelContext) CloudTagsForInstanceGroup(ig *kops.InstanceGroup) map[string]*string {\n\tconst (\n\t\tclusterNodeTemplateLabel = \"k8s.io_cluster_node-template_label_\"\n\t\tclusterNodeTemplateTaint = \"k8s.io_cluster_node-template_taint_\"\n\t)\n\n\tlabels := make(map[string]string)\n\t// Apply any user-specified global labels first so they can be overridden by IG-specific labels.\n\tfor k, v := range c.Cluster.Spec.CloudLabels {\n\t\tlabels[k] = v\n\t}\n\n\t// Apply any user-specified labels.\n\tfor k, v := range ig.Spec.CloudLabels {\n\t\tlabels[k] = v\n\t}\n\n\t// Apply labels for cluster node labels.\n\ti := 0\n\tfor k, v := range ig.Spec.NodeLabels {\n\t\t// Store the label key in the tag value\n\t\t// so that we don't need to espace \"/\" in the label key.\n\t\tlabels[fmt.Sprintf(\"%s%d\", clusterNodeTemplateLabel, i)] = fmt.Sprintf(\"%s=%s\", k, v)\n\t\ti++\n\t}\n\n\t// Apply labels for cluster node taints.\n\tfor _, v := range ig.Spec.Taints {\n\t\tsplits := strings.SplitN(v, \"=\", 2)\n\t\tif len(splits) > 1 {\n\t\t\tlabels[clusterNodeTemplateTaint+splits[0]] = splits[1]\n\t\t}\n\t}\n\n\t// The system tags take priority because the cluster likely breaks without them...\n\tlabels[azure.TagNameRolePrefix+ig.Spec.Role.ToLowerString()] = \"1\"\n\tif ig.Spec.Role == kops.InstanceGroupRoleControlPlane {\n\t\tlabels[azure.TagNameRolePrefix+\"master\"] = \"1\"\n\t}\n\n\t// Set the tag used by kops-controller to identify the instance group to which the VM ScaleSet belongs.\n\tlabels[nodeidentityazure.InstanceGroupNameTag] = ig.Name\n\n\t// Replace all \"/\" with \"_\" as \"/\" is not an allowed key character in Azure.\n\tm := make(map[string]*string)\n\tfor k, v := range labels {\n\t\tm[strings.ReplaceAll(k, \"/\", \"_\")] = fi.PtrTo(v)\n\t}\n\treturn m\n}", "func (o TriggerBuildPtrOutput) Tags() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *TriggerBuild) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Tags\n\t}).(pulumi.StringArrayOutput)\n}", "func (o CustomLayerOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *CustomLayer) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o ImageOutput) Tags() pulumi.MapOutput {\n\treturn o.ApplyT(func(v *Image) pulumi.MapOutput { return v.Tags }).(pulumi.MapOutput)\n}", "func Tags(v interface{}, key string) (map[string]string, error) {\n\treturn New(v).Tags(key)\n}", "func (r *DomainName) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func (c *ContainerBuilder) Build() (workloads.Container, error) {\n\tif c.Container.Flist == \"\" {\n\t\treturn workloads.Container{}, fmt.Errorf(\"flist cannot be an empty string\")\n\t}\n\n\tif c.Container.SecretEnvironment == nil {\n\t\tc.Container.SecretEnvironment = make(map[string]string)\n\t}\n\n\tfor k, value := range c.Container.Environment {\n\t\tsecret, err := encryptSecret(value, c.Container.NodeId)\n\t\tif err != nil {\n\t\t\treturn workloads.Container{}, errors.Wrapf(err, \"failed to encrypt env with key '%s'\", k)\n\t\t}\n\t\tc.Container.SecretEnvironment[k] = secret\n\t}\n\tc.Container.Environment = make(map[string]string)\n\treturn c.Container, nil\n}", "func (o TaskDefinitionOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *TaskDefinition) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func Build(records []Record) (*Node, error) {\n\n\tif len(records) < 1 {\n\t\treturn nil, nil\n\t}\n\n\t// mapping := make(map[int][]int)\n\n\troot := &Node{}\n\n\tfor k, v := range records {\n\n\t\tfmt.Printf(\"v is %#v\\n\", v)\n\t\t//\t\tfmt.Printf(\"ID is: %+v and for ID %+v, Parent is %+v\\n\", v.ID, v.ID, v.Parent)\n\t\tfmt.Printf(\"mapping[%v][%v] = %v\\n\", v.Parent, k, v.ID)\n\t}\n\t/* for i := range mapping {\n\t\tfmt.Println(i)\n\t} */\n\n\treturn root, nil\n}", "func (o ReplicaExternalKeyOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *ReplicaExternalKey) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (s *Site) calculateTags() {\n\n\ttags := make(map[string][]Page)\n\tfor _, post := range s.posts {\n\t\tfor _, tag := range post.GetTags() {\n\t\t\tif posts, ok := tags[tag]; ok == true {\n\t\t\t\ttags[tag] = append(posts, post)\n\t\t\t} else {\n\t\t\t\ttags[tag] = []Page{post}\n\t\t\t}\n\t\t}\n\t}\n\n\ts.Conf.Set(\"tags\", tags)\n}", "func (o SubnetOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Subnet) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (c *cookRun) globalTags(env environ.Env) map[string]string {\n\tret := make(map[string]string, len(c.GlobalLogDogTags)+1)\n\tif c.BuildURL != \"\" {\n\t\tret[logDogViewerURLTag] = c.BuildURL\n\t}\n\n\t// SWARMING_SERVER is the full URL: https://example.com\n\t// We want just the hostname.\n\tif v, ok := env.Get(\"SWARMING_SERVER\"); ok {\n\t\tif u, err := url.Parse(v); err == nil && u.Host != \"\" {\n\t\t\tret[\"swarming.host\"] = u.Host\n\t\t}\n\t}\n\tif v, ok := env.Get(\"SWARMING_TASK_ID\"); ok {\n\t\tret[\"swarming.run_id\"] = v\n\t}\n\tif v, ok := env.Get(\"SWARMING_BOT_ID\"); ok {\n\t\tret[\"bot_id\"] = v\n\t}\n\n\t// Prefer user-supplied tags to our generated ones.\n\tfor k, v := range c.GlobalLogDogTags {\n\t\tret[k] = v\n\t}\n\n\treturn ret\n}", "func Tags(list ...namedTagEventList) map[string]imageapi.TagEventList {\n\tm := make(map[string]imageapi.TagEventList, len(list))\n\tfor _, tag := range list {\n\t\tm[tag.name] = tag.events\n\t}\n\treturn m\n}", "func (m *RawMapper) TagSets() []string { return TagSetCursors(m.cursors).Keys() }", "func Build(k kubectl.CLI, pod string, d docker.CLI, options *Options, image string, context string, out *output.Interface) error {\n\treturn pkgerror(out.Do(\"Building image\", func(op output.Operation) error {\n\t\top.Progress(\"determining build node\")\n\t\tvar node string\n\t\tfor {\n\t\t\tvar err error\n\t\t\tnode, err = k.String(\"get\", \"pod\", pod, \"--output\", `go-template={{.spec.nodeName}}`)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t} else if node != \"\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t}\n\n\t\top.Progress(\"determining build pod\")\n\t\tnodePods, err := server.NodePods(k, out)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif nodePods[node] == \"\" {\n\t\t\treturn fmt.Errorf(\"cannot build on node %s\", node)\n\t\t}\n\n\t\top.Progress(\"connecting to docker daemon\")\n\t\tdockerPort, stop, err := portforward.StartOne(k, \"kube-system\", nodePods[node], \"2375\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer stop()\n\n\t\tbuildArgs := []string{\"--host\", \"localhost:\" + dockerPort, \"build\"}\n\t\tif options.File != \"\" {\n\t\t\tbuildArgs = append(buildArgs, \"--file\", options.File)\n\t\t}\n\t\tfor _, arg := range options.Args {\n\t\t\tbuildArgs = append(buildArgs, \"--build-arg\", arg)\n\t\t}\n\t\tif options.Target != \"\" {\n\t\t\tbuildArgs = append(buildArgs, \"--target\", options.Target)\n\t\t}\n\t\tbuildArgs = append(buildArgs, \"--tag\", image, context)\n\n\t\top.Progress(\"running\")\n\t\treturn d.EachLine(buildArgs, func(line string) {\n\t\t\tif out.Level < output.LevelVerbose && (strings.HasPrefix(line, \"Sending build context \") || strings.HasPrefix(line, \"Step \")) {\n\t\t\t\top.Progress(\"s\" + line[1:])\n\t\t\t} else {\n\t\t\t\tout.Verbose(\"[docker] %s\", line)\n\t\t\t}\n\t\t})\n\t}))\n}", "func (r *PrivateVirtualInterface) Tags() pulumi.MapOutput {\n\treturn (pulumi.MapOutput)(r.s.State[\"tags\"])\n}", "func (o LookupSharedImageResultOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v LookupSharedImageResult) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o BucketIntelligentTieringConfigurationFilterOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v BucketIntelligentTieringConfigurationFilter) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o GraphOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Graph) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (b *Builder) Build(name string, groupByTags bool) (*ct.ImageManifest, error) {\n\timage, err := b.Context.LookupImage(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thistory, err := b.Context.History(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tids := make([]string, 0, len(history))\n\tlayers := make([]*ct.ImageLayer, 0, len(history))\n\tfor i := len(history) - 1; i >= 0; i-- {\n\t\tlayer := history[i]\n\t\tids = append(ids, layer.ID)\n\t\tif !groupByTags || len(layer.Tags) > 0 {\n\t\t\tl, err := b.CreateLayer(ids)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tids = make([]string, 0, len(history))\n\t\t\tlayers = append(layers, l)\n\t\t}\n\t}\n\n\tentrypoint := &ct.ImageEntrypoint{\n\t\tWorkingDir: image.Config.WorkingDir,\n\t\tEnv: make(map[string]string, len(image.Config.Env)),\n\t\tArgs: append(image.Config.Entrypoint.Slice(), image.Config.Cmd.Slice()...),\n\t}\n\tfor _, env := range image.Config.Env {\n\t\tkeyVal := strings.SplitN(env, \"=\", 2)\n\t\tif len(keyVal) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tentrypoint.Env[keyVal[0]] = keyVal[1]\n\t}\n\n\treturn &ct.ImageManifest{\n\t\tType: ct.ImageManifestTypeV1,\n\t\tEntrypoints: map[string]*ct.ImageEntrypoint{\"_default\": entrypoint},\n\t\tRootfs: []*ct.ImageRootfs{{\n\t\t\tPlatform: ct.DefaultImagePlatform,\n\t\t\tLayers: layers,\n\t\t}},\n\t}, nil\n}", "func (o LookupServerResultOutput) Tags() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v LookupServerResult) []string { return v.Tags }).(pulumi.StringArrayOutput)\n}", "func (o ContainerServiceOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *ContainerService) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o LookupMulticastDomainResultOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v LookupMulticastDomainResult) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o LookupServicePlanResultOutput) Tags() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v LookupServicePlanResult) map[string]string { return v.Tags }).(pulumi.StringMapOutput)\n}", "func (o KubernetesNodePoolOutput) Tags() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *KubernetesNodePool) pulumi.StringArrayOutput { return v.Tags }).(pulumi.StringArrayOutput)\n}" ]
[ "0.61947906", "0.6137007", "0.6052599", "0.6052599", "0.59727746", "0.5800963", "0.57946444", "0.56435233", "0.5506044", "0.5390409", "0.5332386", "0.5301837", "0.5233257", "0.5230261", "0.5214728", "0.5183021", "0.516498", "0.51597834", "0.5121788", "0.51138663", "0.50568736", "0.504671", "0.5031043", "0.5029656", "0.502671", "0.49914277", "0.49884972", "0.49883288", "0.4981707", "0.49649408", "0.4940157", "0.49398333", "0.49219927", "0.4921723", "0.4920964", "0.4887474", "0.48843035", "0.48818165", "0.48599488", "0.48560333", "0.4832263", "0.4828498", "0.4824947", "0.48195168", "0.48138815", "0.48111954", "0.4805692", "0.4798451", "0.4782128", "0.47763258", "0.47731483", "0.4772344", "0.47634903", "0.475684", "0.47452408", "0.47428548", "0.47383967", "0.4736622", "0.47261697", "0.4717399", "0.4698999", "0.46931764", "0.46931112", "0.46892422", "0.46882275", "0.46871507", "0.46825418", "0.46808386", "0.46799392", "0.46747798", "0.4669573", "0.46685612", "0.46634102", "0.46628258", "0.46627736", "0.4646064", "0.463838", "0.4631701", "0.46308312", "0.4629321", "0.46258587", "0.46236342", "0.4621515", "0.46179232", "0.4615294", "0.46102774", "0.46038076", "0.46018308", "0.45996374", "0.45970094", "0.4588664", "0.4578703", "0.4578069", "0.4574973", "0.45667732", "0.456435", "0.4544743", "0.45437783", "0.4537817", "0.4531656" ]
0.6989099
0